diff --git a/drivers/spi/Kconfig.andes_atcspi200 b/drivers/spi/Kconfig.andes_atcspi200 index 41b331023af..ad7dfca16cc 100644 --- a/drivers/spi/Kconfig.andes_atcspi200 +++ b/drivers/spi/Kconfig.andes_atcspi200 @@ -11,3 +11,12 @@ config SPI_ANDES_ATCSPI200 depends on DT_HAS_ANDESTECH_ATCSPI200_ENABLED help Enable driver for Andes ATCSPI200 SPI controller + +if SPI_ANDES_ATCSPI200 + +config ANDES_SPI_DMA_MODE + bool "Using DMA mode for spi" + default y + depends on DMA + +endif # SPI_ANDES_ATCSPI200 diff --git a/drivers/spi/spi_andes_atcspi200.c b/drivers/spi/spi_andes_atcspi200.c index 09a1325221a..adc7b2e9157 100644 --- a/drivers/spi/spi_andes_atcspi200.c +++ b/drivers/spi/spi_andes_atcspi200.c @@ -12,6 +12,27 @@ typedef void (*atcspi200_cfg_func_t)(void); +#ifdef CONFIG_ANDES_SPI_DMA_MODE + +#define ANDES_SPI_DMA_ERROR_FLAG 0x01 +#define ANDES_SPI_DMA_RX_DONE_FLAG 0x02 +#define ANDES_SPI_DMA_TX_DONE_FLAG 0x04 +#define ANDES_SPI_DMA_DONE_FLAG \ + (ANDES_SPI_DMA_RX_DONE_FLAG | ANDES_SPI_DMA_TX_DONE_FLAG) + +struct stream { + const struct device *dma_dev; + uint32_t channel; + uint32_t block_idx; + struct dma_config dma_cfg; + struct dma_block_config dma_blk_cfg; + struct dma_block_config chain_block[MAX_CHAIN_SIZE]; + uint8_t priority; + bool src_addr_increment; + bool dst_addr_increment; +}; +#endif + struct spi_atcspi200_data { struct spi_context ctx; uint32_t tx_fifo_size; @@ -19,6 +40,10 @@ struct spi_atcspi200_data { int tx_cnt; size_t chunk_len; bool busy; +#ifdef CONFIG_ANDES_SPI_DMA_MODE + struct stream dma_rx; + struct stream dma_tx; +#endif }; struct spi_atcspi200_cfg { @@ -85,6 +110,7 @@ static int spi_config(const struct device *dev, static int spi_transfer(const struct device *dev) { struct spi_atcspi200_data * const data = dev->data; + const struct spi_atcspi200_cfg * const cfg = dev->config; struct spi_context *ctx = &data->ctx; uint32_t data_len, tctrl, int_msk; @@ -94,7 +120,7 @@ static int spi_transfer(const struct device *dev) data_len = 0; } - if (len > MAX_TRANSFER_CNT) { + if (data_len > MAX_TRANSFER_CNT) { return -EINVAL; } @@ -164,6 +190,412 @@ static int configure(const struct device *dev, return 0; } + +#ifdef CONFIG_ANDES_SPI_DMA_MODE + +static int spi_dma_tx_load(const struct device *dev); +static int spi_dma_rx_load(const struct device *dev); + +static inline void spi_tx_dma_enable(const struct device *dev) +{ + const struct spi_atcspi200_cfg * const cfg = dev->config; + /* Enable TX DMA */ + sys_set_bits(SPI_CTRL(cfg->base), CTRL_TX_DMA_EN_MSK); +} + +static inline void spi_tx_dma_disable(const struct device *dev) +{ + const struct spi_atcspi200_cfg * const cfg = dev->config; + /* Disable TX DMA */ + sys_clear_bits(SPI_CTRL(cfg->base), CTRL_TX_DMA_EN_MSK); +} + +static inline void spi_rx_dma_enable(const struct device *dev) +{ + const struct spi_atcspi200_cfg * const cfg = dev->config; + /* Enable RX DMA */ + sys_set_bits(SPI_CTRL(cfg->base), CTRL_RX_DMA_EN_MSK); +} + +static inline void spi_rx_dma_disable(const struct device *dev) +{ + const struct spi_atcspi200_cfg * const cfg = dev->config; + /* Disable RX DMA */ + sys_clear_bits(SPI_CTRL(cfg->base), CTRL_RX_DMA_EN_MSK); +} + +static int spi_dma_move_buffers(const struct device *dev) +{ + struct spi_atcspi200_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + uint32_t error = 0; + + data->dma_rx.dma_blk_cfg.next_block = NULL; + data->dma_tx.dma_blk_cfg.next_block = NULL; + + if (spi_context_tx_on(ctx)) { + error = spi_dma_tx_load(dev); + if (error != 0) { + return error; + } + } + + if (spi_context_rx_on(ctx)) { + error = spi_dma_rx_load(dev); + if (error != 0) { + return error; + } + } + + return 0; +} + +static inline void dma_rx_callback(const struct device *dev, void *user_data, + uint32_t channel, int status) +{ + const struct device *spi_dev = (struct device *)user_data; + struct spi_atcspi200_data *data = spi_dev->data; + struct spi_context *ctx = &data->ctx; + int error; + + dma_stop(data->dma_rx.dma_dev, data->dma_rx.channel); + spi_rx_dma_disable(spi_dev); + + if (spi_context_rx_on(ctx)) { + if (spi_dma_rx_load(spi_dev) != 0) { + return; + } + spi_rx_dma_enable(spi_dev); + error = dma_start(data->dma_rx.dma_dev, data->dma_rx.channel); + __ASSERT(error == 0, "dma_start was failed in rx callback"); + } +} + +static inline void dma_tx_callback(const struct device *dev, void *user_data, + uint32_t channel, int status) +{ + const struct device *spi_dev = (struct device *)user_data; + struct spi_atcspi200_data *data = spi_dev->data; + struct spi_context *ctx = &data->ctx; + int error; + + dma_stop(data->dma_tx.dma_dev, data->dma_tx.channel); + spi_tx_dma_disable(spi_dev); + + if (spi_context_tx_on(ctx)) { + if (spi_dma_tx_load(spi_dev) != 0) { + return; + } + spi_tx_dma_enable(spi_dev); + error = dma_start(data->dma_tx.dma_dev, data->dma_tx.channel); + __ASSERT(error == 0, "dma_start was failed in tx callback"); + } +} + +/* + * dummy value used for transferring NOP when tx buf is null + * and use as dummy sink for when rx buf is null + */ +uint32_t dummy_rx_tx_buffer; + +static int spi_dma_tx_load(const struct device *dev) +{ + const struct spi_atcspi200_cfg * const cfg = dev->config; + struct spi_atcspi200_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + int remain_len, ret, dfs; + + /* prepare the block for this TX DMA channel */ + memset(&data->dma_tx.dma_blk_cfg, 0, sizeof(struct dma_block_config)); + + if (ctx->current_tx->len > data->chunk_len) { + data->dma_tx.dma_blk_cfg.block_size = data->chunk_len / + data->dma_tx.dma_cfg.dest_data_size; + } else { + data->dma_tx.dma_blk_cfg.block_size = ctx->current_tx->len / + data->dma_tx.dma_cfg.dest_data_size; + } + + /* tx direction has memory as source and periph as dest. */ + if (ctx->current_tx->buf == NULL) { + dummy_rx_tx_buffer = 0; + /* if tx buff is null, then sends NOP on the line. */ + data->dma_tx.dma_blk_cfg.source_address = (uintptr_t)&dummy_rx_tx_buffer; + data->dma_tx.dma_blk_cfg.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; + } else { + data->dma_tx.dma_blk_cfg.source_address = (uintptr_t)ctx->current_tx->buf; + if (data->dma_tx.src_addr_increment) { + data->dma_tx.dma_blk_cfg.source_addr_adj = DMA_ADDR_ADJ_INCREMENT; + } else { + data->dma_tx.dma_blk_cfg.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; + } + } + + dfs = SPI_WORD_SIZE_GET(ctx->config->operation) >> 3; + remain_len = data->chunk_len - ctx->current_tx->len; + spi_context_update_tx(ctx, dfs, ctx->current_tx->len); + + data->dma_tx.dma_blk_cfg.dest_address = (uint32_t)SPI_DATA(cfg->base); + /* fifo mode NOT USED there */ + if (data->dma_tx.dst_addr_increment) { + data->dma_tx.dma_blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_INCREMENT; + } else { + data->dma_tx.dma_blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; + } + + /* direction is given by the DT */ + data->dma_tx.dma_cfg.head_block = &data->dma_tx.dma_blk_cfg; + data->dma_tx.dma_cfg.head_block->next_block = NULL; + /* give the client dev as arg, as the callback comes from the dma */ + data->dma_tx.dma_cfg.user_data = (void *)dev; + + if (data->dma_tx.dma_cfg.source_chaining_en) { + data->dma_tx.dma_cfg.block_count = ctx->tx_count; + data->dma_tx.dma_cfg.dma_callback = NULL; + data->dma_tx.block_idx = 0; + struct dma_block_config *blk_cfg = &data->dma_tx.dma_blk_cfg; + const struct spi_buf *current_tx = ctx->current_tx; + + while (remain_len > 0) { + struct dma_block_config *next_blk_cfg; + + next_blk_cfg = &data->dma_tx.chain_block[data->dma_tx.block_idx]; + data->dma_tx.block_idx += 1; + + blk_cfg->next_block = next_blk_cfg; + current_tx = ctx->current_tx; + + next_blk_cfg->block_size = current_tx->len / + data->dma_tx.dma_cfg.dest_data_size; + + /* tx direction has memory as source and periph as dest. */ + if (current_tx->buf == NULL) { + dummy_rx_tx_buffer = 0; + /* if tx buff is null, then sends NOP on the line. */ + next_blk_cfg->source_address = (uintptr_t)&dummy_rx_tx_buffer; + next_blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; + } else { + next_blk_cfg->source_address = (uintptr_t)current_tx->buf; + if (data->dma_tx.src_addr_increment) { + next_blk_cfg->source_addr_adj = DMA_ADDR_ADJ_INCREMENT; + } else { + next_blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; + } + } + + next_blk_cfg->dest_address = (uint32_t)SPI_DATA(cfg->base); + /* fifo mode NOT USED there */ + if (data->dma_tx.dst_addr_increment) { + next_blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_INCREMENT; + } else { + next_blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; + } + + blk_cfg = next_blk_cfg; + next_blk_cfg->next_block = NULL; + remain_len -= ctx->current_tx->len; + spi_context_update_tx(ctx, dfs, ctx->current_tx->len); + } + + } else { + data->dma_tx.dma_blk_cfg.next_block = NULL; + data->dma_tx.dma_cfg.block_count = 1; + data->dma_tx.dma_cfg.dma_callback = dma_tx_callback; + } + + /* pass our client origin to the dma: data->dma_tx.dma_channel */ + ret = dma_config(data->dma_tx.dma_dev, data->dma_tx.channel, + &data->dma_tx.dma_cfg); + /* the channel is the actual stream from 0 */ + if (ret != 0) { + data->dma_tx.block_idx = 0; + data->dma_tx.dma_blk_cfg.next_block = NULL; + return ret; + } + + return 0; +} + +static int spi_dma_rx_load(const struct device *dev) +{ + const struct spi_atcspi200_cfg * const cfg = dev->config; + struct spi_atcspi200_data *data = dev->data; + struct spi_context *ctx = &data->ctx; + int remain_len, ret, dfs; + + /* prepare the block for this RX DMA channel */ + memset(&data->dma_rx.dma_blk_cfg, 0, sizeof(struct dma_block_config)); + + if (ctx->current_rx->len > data->chunk_len) { + data->dma_rx.dma_blk_cfg.block_size = data->chunk_len / + data->dma_rx.dma_cfg.dest_data_size; + } else { + data->dma_rx.dma_blk_cfg.block_size = ctx->current_rx->len / + data->dma_rx.dma_cfg.dest_data_size; + } + + /* rx direction has periph as source and mem as dest. */ + if (ctx->current_rx->buf == NULL) { + /* if rx buff is null, then write data to dummy address. */ + data->dma_rx.dma_blk_cfg.dest_address = (uintptr_t)&dummy_rx_tx_buffer; + data->dma_rx.dma_blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; + } else { + data->dma_rx.dma_blk_cfg.dest_address = (uintptr_t)ctx->current_rx->buf; + if (data->dma_rx.dst_addr_increment) { + data->dma_rx.dma_blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_INCREMENT; + } else { + data->dma_rx.dma_blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; + } + } + + dfs = SPI_WORD_SIZE_GET(ctx->config->operation) >> 3; + remain_len = data->chunk_len - ctx->current_rx->len; + spi_context_update_rx(ctx, dfs, ctx->current_rx->len); + + data->dma_rx.dma_blk_cfg.source_address = (uint32_t)SPI_DATA(cfg->base); + + if (data->dma_rx.src_addr_increment) { + data->dma_rx.dma_blk_cfg.source_addr_adj = DMA_ADDR_ADJ_INCREMENT; + } else { + data->dma_rx.dma_blk_cfg.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; + } + + data->dma_rx.dma_cfg.head_block = &data->dma_rx.dma_blk_cfg; + data->dma_rx.dma_cfg.head_block->next_block = NULL; + data->dma_rx.dma_cfg.user_data = (void *)dev; + + if (data->dma_rx.dma_cfg.source_chaining_en) { + data->dma_rx.dma_cfg.block_count = ctx->rx_count; + data->dma_rx.dma_cfg.dma_callback = NULL; + data->dma_rx.block_idx = 0; + struct dma_block_config *blk_cfg = &data->dma_rx.dma_blk_cfg; + const struct spi_buf *current_rx = ctx->current_rx; + + while (remain_len > 0) { + struct dma_block_config *next_blk_cfg; + + next_blk_cfg = &data->dma_rx.chain_block[data->dma_rx.block_idx]; + data->dma_rx.block_idx += 1; + + blk_cfg->next_block = next_blk_cfg; + current_rx = ctx->current_rx; + + next_blk_cfg->block_size = current_rx->len / + data->dma_rx.dma_cfg.dest_data_size; + + /* rx direction has periph as source and mem as dest. */ + if (current_rx->buf == NULL) { + /* if rx buff is null, then write data to dummy address. */ + next_blk_cfg->dest_address = (uintptr_t)&dummy_rx_tx_buffer; + next_blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; + } else { + next_blk_cfg->dest_address = (uintptr_t)current_rx->buf; + if (data->dma_rx.dst_addr_increment) { + next_blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_INCREMENT; + } else { + next_blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; + } + } + + next_blk_cfg->source_address = (uint32_t)SPI_DATA(cfg->base); + + if (data->dma_rx.src_addr_increment) { + next_blk_cfg->source_addr_adj = DMA_ADDR_ADJ_INCREMENT; + } else { + next_blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; + } + + blk_cfg = next_blk_cfg; + next_blk_cfg->next_block = NULL; + remain_len -= ctx->current_rx->len; + spi_context_update_rx(ctx, dfs, ctx->current_rx->len); + } + } else { + data->dma_rx.dma_blk_cfg.next_block = NULL; + data->dma_rx.dma_cfg.block_count = 1; + data->dma_rx.dma_cfg.dma_callback = dma_rx_callback; + } + + /* pass our client origin to the dma: data->dma_rx.channel */ + ret = dma_config(data->dma_rx.dma_dev, data->dma_rx.channel, + &data->dma_rx.dma_cfg); + /* the channel is the actual stream from 0 */ + if (ret != 0) { + data->dma_rx.block_idx = 0; + data->dma_rx.dma_blk_cfg.next_block = NULL; + return ret; + } + + return 0; +} + +static int spi_transfer_dma(const struct device *dev) +{ + const struct spi_atcspi200_cfg * const cfg = dev->config; + struct spi_atcspi200_data * const data = dev->data; + struct spi_context *ctx = &data->ctx; + uint32_t data_len, tctrl, dma_rx_enable, dma_tx_enable; + int error = 0; + + data_len = data->chunk_len - 1; + if (data_len > MAX_TRANSFER_CNT) { + return -EINVAL; + } + + if (!spi_context_rx_on(ctx)) { + tctrl = (TRNS_MODE_WRITE_ONLY << TCTRL_TRNS_MODE_OFFSET) | + (data_len << TCTRL_WR_TCNT_OFFSET); + dma_rx_enable = 0; + dma_tx_enable = 1; + } else if (!spi_context_tx_on(ctx)) { + tctrl = (TRNS_MODE_READ_ONLY << TCTRL_TRNS_MODE_OFFSET) | + (data_len << TCTRL_RD_TCNT_OFFSET); + dma_rx_enable = 1; + dma_tx_enable = 0; + } else { + tctrl = (TRNS_MODE_WRITE_READ << TCTRL_TRNS_MODE_OFFSET) | + (data_len << TCTRL_WR_TCNT_OFFSET) | + (data_len << TCTRL_RD_TCNT_OFFSET); + dma_rx_enable = 1; + dma_tx_enable = 1; + } + + sys_write32(tctrl, SPI_TCTRL(cfg->base)); + + /* Set sclk_div to zero */ + sys_clear_bits(SPI_TIMIN(cfg->base), 0xff); + + /* Enable END Interrupts */ + sys_write32(IEN_END_MSK, SPI_INTEN(cfg->base)); + + /* Setting DMA config*/ + error = spi_dma_move_buffers(dev); + if (error != 0) { + return error; + } + + /* Start transferring */ + sys_write32(0, SPI_CMD(cfg->base)); + + if (dma_rx_enable) { + spi_rx_dma_enable(dev); + error = dma_start(data->dma_rx.dma_dev, data->dma_rx.channel); + if (error != 0) { + return error; + } + } + if (dma_tx_enable) { + spi_tx_dma_enable(dev); + error = dma_start(data->dma_tx.dma_dev, data->dma_tx.channel); + if (error != 0) { + return error; + } + } + + return 0; +} +#endif + static int transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, @@ -187,7 +619,6 @@ static int transceive(const struct device *dev, spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, dfs); spi_context_cs_control(ctx, true); - transfer_next_chunk(dev); sys_set_bits(SPI_CTRL(cfg->base), CTRL_TX_FIFO_RST_MSK); sys_set_bits(SPI_CTRL(cfg->base), CTRL_RX_FIFO_RST_MSK); @@ -204,11 +635,23 @@ static int transceive(const struct device *dev, data->chunk_len = chunk_len; - error = spi_transfer(dev); - if (error != 0) { - return error; - } +#ifdef CONFIG_ANDES_SPI_DMA_MODE + if ((data->dma_tx.dma_dev != NULL) && (data->dma_rx.dma_dev != NULL)) { + error = spi_transfer_dma(dev); + if (error != 0) { + return error; + } + } else { +#endif /* CONFIG_ANDES_SPI_DMA_MODE */ + error = spi_transfer(dev); + if (error != 0) { + return error; + } + +#ifdef CONFIG_ANDES_SPI_DMA_MODE + } +#endif /* CONFIG_ANDES_SPI_DMA_MODE */ error = spi_context_wait_for_completion(ctx); spi_context_cs_control(ctx, false); } @@ -266,6 +709,18 @@ int spi_atcspi200_init(const struct device *dev) spi_context_unlock_unconditionally(&data->ctx); +#ifdef CONFIG_ANDES_SPI_DMA_MODE + if (!data->dma_tx.dma_dev) { + LOG_ERR("DMA device not found"); + return -ENODEV; + } + + if (!data->dma_rx.dma_dev) { + LOG_ERR("DMA device not found"); + return -ENODEV; + } +#endif + /* Get the TX/RX FIFO size of this device */ data->tx_fifo_size = TX_FIFO_SIZE(cfg->base); data->rx_fifo_size = RX_FIFO_SIZE(cfg->base); @@ -385,6 +840,24 @@ static void spi_atcspi200_irq_handler(void *arg) /* Disable all SPI interrupts */ sys_write32(0, SPI_INTEN(cfg->base)); +#ifdef CONFIG_ANDES_SPI_DMA_MODE + if ((data->dma_tx.dma_dev != NULL) && data->dma_tx.dma_cfg.source_chaining_en) { + + spi_tx_dma_disable(dev); + dma_stop(data->dma_tx.dma_dev, data->dma_tx.channel); + data->dma_tx.block_idx = 0; + data->dma_tx.dma_blk_cfg.next_block = NULL; + } + + if ((data->dma_rx.dma_dev != NULL) && data->dma_rx.dma_cfg.source_chaining_en) { + + spi_rx_dma_disable(dev); + dma_stop(data->dma_rx.dma_dev, data->dma_rx.channel); + data->dma_rx.block_idx = 0; + data->dma_rx.dma_blk_cfg.next_block = NULL; + } +#endif /* CONFIG_ANDES_SPI_DMA_MODE */ + data->busy = false; spi_context_complete(ctx, dev, error); @@ -392,7 +865,67 @@ static void spi_atcspi200_irq_handler(void *arg) } } +#if CONFIG_ANDES_SPI_DMA_MODE + +#define ANDES_DMA_CONFIG_DIRECTION(config) (FIELD_GET(GENMASK(1, 0), config)) +#define ANDES_DMA_CONFIG_PERIPHERAL_ADDR_INC(config) (FIELD_GET(BIT(2), config)) +#define ANDES_DMA_CONFIG_MEMORY_ADDR_INC(config) (FIELD_GET(BIT(3), config)) +#define ANDES_DMA_CONFIG_PERIPHERAL_DATA_SIZE(config) (1 << (FIELD_GET(GENMASK(6, 4), config))) +#define ANDES_DMA_CONFIG_MEMORY_DATA_SIZE(config) (1 << (FIELD_GET(GENMASK(9, 7), config))) +#define ANDES_DMA_CONFIG_PRIORITY(config) (FIELD_GET(BIT(10), config)) + +#define DMA_CHANNEL_CONFIG(id, dir) \ + DT_INST_DMAS_CELL_BY_NAME(id, dir, channel_config) + +#define SPI_DMA_CHANNEL_INIT(index, dir, dir_cap, src_dev, dest_dev) \ + .dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(index, dir)), \ + .channel = \ + DT_INST_DMAS_CELL_BY_NAME(index, dir, channel), \ + .dma_cfg = { \ + .dma_slot = \ + DT_INST_DMAS_CELL_BY_NAME(index, dir, slot), \ + .channel_direction = ANDES_DMA_CONFIG_DIRECTION( \ + DMA_CHANNEL_CONFIG(index, dir)), \ + .complete_callback_en = 0, \ + .error_callback_en = 0, \ + .source_data_size = \ + ANDES_DMA_CONFIG_##src_dev##_DATA_SIZE( \ + DMA_CHANNEL_CONFIG(index, dir) \ + ), \ + .dest_data_size = \ + ANDES_DMA_CONFIG_##dest_dev##_DATA_SIZE( \ + DMA_CHANNEL_CONFIG(index, dir) \ + ), \ + .source_burst_length = 1, /* SINGLE transfer */ \ + .dest_burst_length = 1, /* SINGLE transfer */ \ + .channel_priority = ANDES_DMA_CONFIG_PRIORITY( \ + DMA_CHANNEL_CONFIG(index, dir) \ + ), \ + .source_chaining_en = DT_PROP(DT_INST_DMAS_CTLR_BY_NAME( \ + index, dir), chain_transfer), \ + .dest_chaining_en = DT_PROP(DT_INST_DMAS_CTLR_BY_NAME( \ + index, dir), chain_transfer), \ + }, \ + .src_addr_increment = \ + ANDES_DMA_CONFIG_##src_dev##_ADDR_INC( \ + DMA_CHANNEL_CONFIG(index, dir) \ + ), \ + .dst_addr_increment = \ + ANDES_DMA_CONFIG_##dest_dev##_ADDR_INC( \ + DMA_CHANNEL_CONFIG(index, dir) \ + ) + +#define SPI_DMA_CHANNEL(id, dir, DIR, src, dest) \ + .dma_##dir = { \ + COND_CODE_1(DT_INST_DMAS_HAS_NAME(id, dir), \ + (SPI_DMA_CHANNEL_INIT(id, dir, DIR, src, dest)), \ + (NULL)) \ + }, + +#else #define SPI_DMA_CHANNEL(id, dir, DIR, src, dest) +#endif + #define SPI_BUSY_INIT .busy = false, #if (CONFIG_XIP) @@ -402,8 +935,7 @@ static void spi_atcspi200_irq_handler(void *arg) #endif #define SPI_INIT(n) \ - static struct spi_atcspi200_data spi_atcspi200_dev_data_##n = {\ - .self_dev = DEVICE_DT_INST_GET(n), \ + static struct spi_atcspi200_data spi_atcspi200_dev_data_##n = { \ SPI_CONTEXT_INIT_LOCK(spi_atcspi200_dev_data_##n, ctx), \ SPI_CONTEXT_INIT_SYNC(spi_atcspi200_dev_data_##n, ctx), \ SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \ diff --git a/drivers/spi/spi_andes_atcspi200.h b/drivers/spi/spi_andes_atcspi200.h index 3eb9a53202b..e498105f3e5 100644 --- a/drivers/spi/spi_andes_atcspi200.h +++ b/drivers/spi/spi_andes_atcspi200.h @@ -11,6 +11,10 @@ LOG_MODULE_REGISTER(spi_atcspi200); #include #include +#ifdef CONFIG_ANDES_SPI_DMA_MODE +#include +#endif + #define REG_TFMAT 0x10 #define REG_TCTRL 0x20 #define REG_CMD 0x24 @@ -22,16 +26,16 @@ LOG_MODULE_REGISTER(spi_atcspi200); #define REG_TIMIN 0x40 #define REG_CONFIG 0x7c -#define SPI_TFMAT(base) (base + 0x10) -#define SPI_TCTRL(base) (base + 0x20) -#define SPI_CMD(base) (base + 0x24) -#define SPI_DATA(base) (base + 0x2c) -#define SPI_CTRL(base) (base + 0x30) -#define SPI_STAT(base) (base + 0x34) -#define SPI_INTEN(base) (base + 0x38) -#define SPI_INTST(base) (base + 0x3c) -#define SPI_TIMIN(base) (base + 0x40) -#define SPI_CONFIG(base) (base + 0x7c) +#define SPI_TFMAT(base) (base + REG_TFMAT) +#define SPI_TCTRL(base) (base + REG_TCTRL) +#define SPI_CMD(base) (base + REG_CMD) +#define SPI_DATA(base) (base + REG_DATA) +#define SPI_CTRL(base) (base + REG_CTRL) +#define SPI_STAT(base) (base + REG_STAT) +#define SPI_INTEN(base) (base + REG_INTEN) +#define SPI_INTST(base) (base + REG_INTST) +#define SPI_TIMIN(base) (base + REG_TIMIN) +#define SPI_CONFIG(base) (base + REG_CONFIG) /* Field mask of SPI transfer format register */ #define TFMAT_DATA_LEN_OFFSET (8)