spi_rtio: fix transactions for default handler

This patch fixes transaction op items not performed within a single
SPI transfer. This is common for Write + Read commands, that depend on
the CS kept asserted until the end, otherwise the context will be lost.
A similar fix was applied to i2c_rtio_default on #79890.

Signed-off-by: Luis Ubieda <luisf@croxel.com>
This commit is contained in:
Luis Ubieda 2025-02-17 14:14:42 -05:00 committed by Benjamin Cabé
parent 15c5541324
commit b5ca5b7b77
2 changed files with 95 additions and 40 deletions

View File

@ -37,6 +37,24 @@ config SPI_RTIO
This option enables the RTIO API calls. RTIO support is
experimental as the API itself is unstable.
if SPI_RTIO
config SPI_RTIO_FALLBACK_MSGS
int "Number of available spi_buf structs for the default handler to use"
default 4
help
When RTIO is used with a driver that does not yet implement the submit API
natively the submissions are converted back to struct spi_buf values that
are given to spi_transfer. This requires some number of msgs be available to convert
the submissions into on the stack. MISRA rules dictate we must know this in
advance.
In all likelihood 4 is going to work for everyone, but in case you do end up with
an issue where you are using RTIO, your driver does not implement submit natively,
and get an error relating to not enough spi msgs this is the Kconfig to manipulate.
endif # SPI_RTIO
config SPI_SLAVE
bool "Slave support [EXPERIMENTAL]"
select EXPERIMENTAL

View File

@ -22,6 +22,7 @@ static void spi_rtio_iodev_default_submit_sync(struct rtio_iodev_sqe *iodev_sqe)
{
struct spi_dt_spec *dt_spec = iodev_sqe->sqe.iodev->data;
const struct device *dev = dt_spec->bus;
uint8_t num_msgs = 0;
int err = 0;
LOG_DBG("Sync RTIO work item for: %p", (void *)dev);
@ -33,67 +34,103 @@ static void spi_rtio_iodev_default_submit_sync(struct rtio_iodev_sqe *iodev_sqe)
struct rtio_iodev_sqe *txn_head = iodev_sqe;
struct rtio_iodev_sqe *txn_curr = iodev_sqe;
/* We allocate the spi_buf's on the stack, to do so
* the count of messages needs to be determined to
* ensure we don't go over the statically sized array.
*/
do {
switch (txn_curr->sqe.op) {
case RTIO_OP_RX:
case RTIO_OP_TX:
case RTIO_OP_TINY_TX:
case RTIO_OP_TXRX:
num_msgs++;
break;
default:
LOG_ERR("Invalid op code %d for submission %p", txn_curr->sqe.op,
(void *)&txn_curr->sqe);
err = -EIO;
break;
}
txn_curr = rtio_txn_next(txn_curr);
} while (err == 0 && txn_curr != NULL);
if (err != 0) {
rtio_iodev_sqe_err(txn_head, err);
return;
}
/* Allocate msgs on the stack, MISRA doesn't like VLAs so we need a statically
* sized array here. It's pretty unlikely we have more than 4 spi messages
* in a transaction as we typically would only have 2, one to write a
* register address, and another to read/write the register into an array
*/
if (num_msgs > CONFIG_SPI_RTIO_FALLBACK_MSGS) {
LOG_ERR("At most CONFIG_SPI_RTIO_FALLBACK_MSGS"
" submissions in a transaction are"
" allowed in the default handler");
rtio_iodev_sqe_err(txn_head, -ENOMEM);
return;
}
struct spi_buf tx_bufs[CONFIG_SPI_RTIO_FALLBACK_MSGS];
struct spi_buf rx_bufs[CONFIG_SPI_RTIO_FALLBACK_MSGS];
struct spi_buf_set tx_buf_set = {
.buffers = tx_bufs,
.count = num_msgs,
};
struct spi_buf_set rx_buf_set = {
.buffers = rx_bufs,
.count = num_msgs,
};
txn_curr = txn_head;
for (size_t i = 0 ; i < num_msgs ; i++) {
struct rtio_sqe *sqe = &txn_curr->sqe;
struct spi_buf tx_buf = {0};
struct spi_buf_set tx_buf_set = {
.buffers = &tx_buf,
};
struct spi_buf rx_buf = {0};
struct spi_buf_set rx_buf_set = {
.buffers = &rx_buf,
};
LOG_DBG("Preparing transfer: %p", txn_curr);
switch (sqe->op) {
case RTIO_OP_RX:
rx_buf.buf = sqe->rx.buf;
rx_buf.len = sqe->rx.buf_len;
rx_buf_set.count = 1;
rx_bufs[i].buf = sqe->rx.buf;
rx_bufs[i].len = sqe->rx.buf_len;
tx_bufs[i].buf = NULL;
tx_bufs[i].len = sqe->rx.buf_len;
break;
case RTIO_OP_TX:
tx_buf.buf = (uint8_t *)sqe->tx.buf;
tx_buf.len = sqe->tx.buf_len;
tx_buf_set.count = 1;
rx_bufs[i].buf = NULL;
rx_bufs[i].len = sqe->tx.buf_len;
tx_bufs[i].buf = (uint8_t *)sqe->tx.buf;
tx_bufs[i].len = sqe->tx.buf_len;
break;
case RTIO_OP_TINY_TX:
tx_buf.buf = (uint8_t *)sqe->tiny_tx.buf;
tx_buf.len = sqe->tiny_tx.buf_len;
tx_buf_set.count = 1;
rx_bufs[i].buf = NULL;
rx_bufs[i].len = sqe->tiny_tx.buf_len;
tx_bufs[i].buf = (uint8_t *)sqe->tiny_tx.buf;
tx_bufs[i].len = sqe->tiny_tx.buf_len;
break;
case RTIO_OP_TXRX:
rx_buf.buf = sqe->txrx.rx_buf;
rx_buf.len = sqe->txrx.buf_len;
tx_buf.buf = (uint8_t *)sqe->txrx.tx_buf;
tx_buf.len = sqe->txrx.buf_len;
rx_buf_set.count = 1;
tx_buf_set.count = 1;
rx_bufs[i].buf = sqe->txrx.rx_buf;
rx_bufs[i].len = sqe->txrx.buf_len;
tx_bufs[i].buf = (uint8_t *)sqe->txrx.tx_buf;
tx_bufs[i].len = sqe->txrx.buf_len;
break;
default:
LOG_ERR("Invalid op code %d for submission %p\n", sqe->op, (void *)sqe);
err = -EIO;
break;
}
if (!err) {
struct spi_buf_set *tx_buf_ptr = tx_buf_set.count > 0 ? &tx_buf_set : NULL;
struct spi_buf_set *rx_buf_ptr = rx_buf_set.count > 0 ? &rx_buf_set : NULL;
txn_curr = rtio_txn_next(txn_curr);
}
err = spi_transceive_dt(dt_spec, tx_buf_ptr, rx_buf_ptr);
if (err == 0) {
__ASSERT_NO_MSG(num_msgs > 0);
err = spi_transceive_dt(dt_spec, &tx_buf_set, &rx_buf_set);
}
/* NULL if this submission is not a transaction */
txn_curr = rtio_txn_next(txn_curr);
}
} while (err >= 0 && txn_curr != NULL);
if (err < 0) {
LOG_ERR("Transfer failed: %d", err);
if (err != 0) {
rtio_iodev_sqe_err(txn_head, err);
} else {
LOG_DBG("Transfer OK: %d", err);
rtio_iodev_sqe_ok(txn_head, err);
rtio_iodev_sqe_ok(txn_head, 0);
}
}