shell: backends: uart: Rework and add support for async API

Rework UART backend to clearly use single instance approach as
before it was a bit messy with some parts of implementation indicating
multi-instance approach and some single instance. Backend has been
around for years and multi-instance requirement never came.

Added support for UART asynchronous API which is more efficient
in terms of power consumption and performance. Asynchronous API
support is using uart_async_rx helper module for handling data
received asynchronously.

Signed-off-by: Krzysztof Chruściński <krzysztof.chruscinski@nordicsemi.no>
This commit is contained in:
Krzysztof Chruściński 2023-10-16 07:49:32 +02:00 committed by Carles Cufí
parent 8ba4249ef7
commit 5e4e944cc0
3 changed files with 400 additions and 171 deletions

View File

@ -8,81 +8,15 @@
#define SHELL_UART_H__
#include <zephyr/shell/shell.h>
#include <zephyr/sys/ring_buffer.h>
#include <zephyr/sys/atomic.h>
#include <zephyr/mgmt/mcumgr/transport/smp_shell.h>
#ifdef __cplusplus
extern "C" {
#endif
extern const struct shell_transport_api shell_uart_transport_api;
/** @brief Shell UART transport instance control block (RW data). */
struct shell_uart_ctrl_blk {
const struct device *dev;
shell_transport_handler_t handler;
void *context;
atomic_t tx_busy;
bool blocking_tx;
#ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
struct smp_shell_data smp;
#endif /* CONFIG_MCUMGR_TRANSPORT_SHELL */
};
#ifdef CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN
#define Z_UART_SHELL_TX_RINGBUF_DECLARE(_name, _size) \
RING_BUF_DECLARE(_name##_tx_ringbuf, _size)
#define Z_UART_SHELL_RX_TIMER_DECLARE(_name) /* Empty */
#define Z_UART_SHELL_TX_RINGBUF_PTR(_name) (&_name##_tx_ringbuf)
#define Z_UART_SHELL_RX_TIMER_PTR(_name) NULL
#define Z_UART_SHELL_DTR_TIMER_DECLARE(_name) static struct k_timer _name##_dtr_timer
#define Z_UART_SHELL_DTR_TIMER_PTR(_name) (&_name##_dtr_timer)
#else /* CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN */
#define Z_UART_SHELL_TX_RINGBUF_DECLARE(_name, _size) /* Empty */
#define Z_UART_SHELL_RX_TIMER_DECLARE(_name) static struct k_timer _name##_timer
#define Z_UART_SHELL_TX_RINGBUF_PTR(_name) NULL
#define Z_UART_SHELL_RX_TIMER_PTR(_name) (&_name##_timer)
#define Z_UART_SHELL_DTR_TIMER_DECLARE(_name) /* Empty */
#define Z_UART_SHELL_DTR_TIMER_PTR(_name) NULL
#endif /* CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN */
/** @brief Shell UART transport instance structure. */
struct shell_uart {
struct shell_uart_ctrl_blk *ctrl_blk;
struct k_timer *timer;
struct k_timer *dtr_timer;
struct ring_buf *tx_ringbuf;
struct ring_buf *rx_ringbuf;
};
/** @brief Macro for creating shell UART transport instance. */
#define SHELL_UART_DEFINE(_name, _tx_ringbuf_size, _rx_ringbuf_size) \
static struct shell_uart_ctrl_blk _name##_ctrl_blk; \
Z_UART_SHELL_RX_TIMER_DECLARE(_name); \
Z_UART_SHELL_DTR_TIMER_DECLARE(_name); \
Z_UART_SHELL_TX_RINGBUF_DECLARE(_name, _tx_ringbuf_size); \
RING_BUF_DECLARE(_name##_rx_ringbuf, _rx_ringbuf_size); \
static const struct shell_uart _name##_shell_uart = { \
.ctrl_blk = &_name##_ctrl_blk, \
.timer = Z_UART_SHELL_RX_TIMER_PTR(_name), \
.dtr_timer = Z_UART_SHELL_DTR_TIMER_PTR(_name), \
.tx_ringbuf = Z_UART_SHELL_TX_RINGBUF_PTR(_name), \
.rx_ringbuf = &_name##_rx_ringbuf, \
}; \
struct shell_transport _name = { \
.api = &shell_uart_transport_api, \
.ctx = (struct shell_uart *)&_name##_shell_uart \
}
/**
* @brief This function provides pointer to shell uart backend instance.
* @brief This function provides pointer to the shell UART backend instance.
*
* Function returns pointer to the shell uart instance. This instance can be
* Function returns pointer to the shell UART instance. This instance can be
* next used with shell_execute_cmd function in order to test commands behavior.
*
* @returns Pointer to the shell instance.

View File

@ -42,23 +42,44 @@ config SHELL_PROMPT_UART
Displayed prompt name for UART backend. If prompt is set, the shell will
send two newlines during initialization.
# Internal config to enable UART interrupts if supported.
config SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN
bool "Interrupt driven"
default y
depends on SERIAL_SUPPORT_INTERRUPT
choice SHELL_BACKEND_SERIAL_API
prompt "Mode"
default SHELL_BACKEND_SERIAL_API_ASYNC if SERIAL_SUPPORT_ASYNC
default SHELL_BACKEND_SERIAL_API_INTERRUPT_DRIVEN if SERIAL_SUPPORT_INTERRUPT
default SHELL_BACKEND_SERIAL_API_POLLING
config SHELL_BACKEND_SERIAL_API_POLLING
prompt "Polling"
config SHELL_BACKEND_SERIAL_API_INTERRUPT_DRIVEN
bool "Interrupt driven"
depends on SERIAL_SUPPORT_INTERRUPT
select UART_INTERRUPT_DRIVEN
config SHELL_BACKEND_SERIAL_API_ASYNC
bool "Asynchronous"
depends on SERIAL_SUPPORT_ASYNC
select UART_ASYNC_RX_HELPER
select UART_ASYNC_API
endchoice
config SHELL_BACKEND_SERIAL_TX_RING_BUFFER_SIZE
int "Set TX ring buffer size"
default 8
depends on SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN
depends on SHELL_BACKEND_SERIAL_API_INTERRUPT_DRIVEN
help
If UART is utilizing DMA transfers then increasing ring buffer size
increases transfers length and reduces number of interrupts.
config SHELL_BACKEND_SERIAL_RX_RING_BUFFER_SIZE
int "Set RX ring buffer size"
depends on SHELL_BACKEND_SERIAL_API_INTERRUPT_DRIVEN || SHELL_BACKEND_SERIAL_API_POLLING
default 256 if MCUMGR_TRANSPORT_SHELL
default 64
help
@ -68,16 +89,45 @@ config SHELL_BACKEND_SERIAL_RX_RING_BUFFER_SIZE
escape sequences). However, if bulk data is transferred it may be
required to increase it.
if SHELL_BACKEND_SERIAL_API_ASYNC
config SHELL_BACKEND_SERIAL_ASYNC_RX_TIMEOUT
int "RX inactivity timeout (in microseconds)"
default 10000
help
Inactivity timeout after which received data is reported.
config SHELL_BACKEND_SERIAL_ASYNC_RX_BUFFER_COUNT
int "Number of RX buffers"
default 4
range 2 64
help
Number of RX buffers. Some UART driver implementations changes buffers
on timeout so this number should be big enough to cover handling on
time incoming data. 4 should be enough for almost all the cases unless
CPU load is high and there is very high shell thread latency.
config SHELL_BACKEND_SERIAL_ASYNC_RX_BUFFER_SIZE
int "Size of the RX buffer"
default 16
help
Size of a single RX buffer. Together with buffer count it defines the
space that can hold RX data. It may be decreased if shell input is
slow and may need to be increased if long messages are pasted directly
to the shell prompt.
endif # SHELL_BACKEND_SERIAL_API_ASYNC
config SHELL_BACKEND_SERIAL_RX_POLL_PERIOD
int "RX polling period (in milliseconds)"
default 10
depends on !SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN
depends on SHELL_BACKEND_SERIAL_API_POLLING
help
Determines how often UART is polled for RX byte.
config SHELL_BACKEND_SERIAL_CHECK_DTR
bool "Check DTR signal before TX"
depends on SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN
depends on SHELL_BACKEND_SERIAL_API_INTERRUPT_DRIVEN
depends on UART_LINE_CTRL
help
Check DTR signal before TX.

View File

@ -4,8 +4,12 @@
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/sys/ring_buffer.h>
#include <zephyr/sys/atomic.h>
#include <zephyr/mgmt/mcumgr/transport/smp_shell.h>
#include <zephyr/shell/shell_uart.h>
#include <zephyr/drivers/uart.h>
#include <zephyr/drivers/serial/uart_async_rx.h>
#include <zephyr/init.h>
#include <zephyr/logging/log.h>
#include <zephyr/net/buf.h>
@ -19,34 +23,122 @@ LOG_MODULE_REGISTER(shell_uart);
#define RX_POLL_PERIOD K_NO_WAIT
#endif
#ifndef CONFIG_SHELL_BACKEND_SERIAL_RX_RING_BUFFER_SIZE
#define CONFIG_SHELL_BACKEND_SERIAL_RX_RING_BUFFER_SIZE 0
#endif
#ifndef CONFIG_SHELL_BACKEND_SERIAL_TX_RING_BUFFER_SIZE
#define CONFIG_SHELL_BACKEND_SERIAL_TX_RING_BUFFER_SIZE 0
#endif
#ifndef CONFIG_SHELL_BACKEND_SERIAL_ASYNC_RX_BUFFER_COUNT
#define CONFIG_SHELL_BACKEND_SERIAL_ASYNC_RX_BUFFER_COUNT 0
#endif
#ifndef CONFIG_SHELL_BACKEND_SERIAL_ASYNC_RX_BUFFER_SIZE
#define CONFIG_SHELL_BACKEND_SERIAL_ASYNC_RX_BUFFER_SIZE 0
#endif
#define ASYNC_RX_BUF_SIZE (CONFIG_SHELL_BACKEND_SERIAL_ASYNC_RX_BUFFER_COUNT * \
(CONFIG_SHELL_BACKEND_SERIAL_ASYNC_RX_BUFFER_SIZE + \
UART_ASYNC_RX_BUF_OVERHEAD))
struct shell_uart_common {
const struct device *dev;
shell_transport_handler_t handler;
void *context;
bool blocking_tx;
#ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
struct smp_shell_data smp;
#endif /* CONFIG_MCUMGR_TRANSPORT_SHELL */
};
struct shell_uart_int_driven {
struct shell_uart_common common;
struct ring_buf tx_ringbuf;
struct ring_buf rx_ringbuf;
struct k_timer dtr_timer;
atomic_t tx_busy;
};
struct shell_uart_async {
struct shell_uart_common common;
struct k_sem tx_sem;
struct uart_async_rx async_rx;
atomic_t pending_rx_req;
};
struct shell_uart_polling {
struct shell_uart_common common;
struct ring_buf rx_ringbuf;
struct k_timer rx_timer;
};
static uint8_t __noinit async_rx_data[ASYNC_RX_BUF_SIZE];
static uint8_t __noinit rx_ringbuf_data[CONFIG_SHELL_BACKEND_SERIAL_RX_RING_BUFFER_SIZE];
static uint8_t __noinit tx_ringbuf_data[CONFIG_SHELL_BACKEND_SERIAL_TX_RING_BUFFER_SIZE];
static struct shell_uart_int_driven shell_uart_i;
static struct shell_uart_async shell_uart_a;
static struct shell_uart_polling shell_uart_p;
#ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
NET_BUF_POOL_DEFINE(smp_shell_rx_pool, CONFIG_MCUMGR_TRANSPORT_SHELL_RX_BUF_COUNT,
SMP_SHELL_RX_BUF_SIZE, 0, NULL);
#endif /* CONFIG_MCUMGR_TRANSPORT_SHELL */
SHELL_UART_DEFINE(shell_transport_uart,
CONFIG_SHELL_BACKEND_SERIAL_TX_RING_BUFFER_SIZE,
CONFIG_SHELL_BACKEND_SERIAL_RX_RING_BUFFER_SIZE);
SHELL_DEFINE(shell_uart, CONFIG_SHELL_PROMPT_UART, &shell_transport_uart,
CONFIG_SHELL_BACKEND_SERIAL_LOG_MESSAGE_QUEUE_SIZE,
CONFIG_SHELL_BACKEND_SERIAL_LOG_MESSAGE_QUEUE_TIMEOUT,
SHELL_FLAG_OLF_CRLF);
static void async_callback(const struct device *dev, struct uart_event *evt, void *user_data)
{
struct shell_uart_async *sh_uart = (struct shell_uart_async *)user_data;
#ifdef CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN
static void uart_rx_handle(const struct device *dev,
const struct shell_uart *sh_uart)
switch (evt->type) {
case UART_TX_DONE:
k_sem_give(&sh_uart->tx_sem);
break;
case UART_RX_RDY:
uart_async_rx_on_rdy(&sh_uart->async_rx, evt->data.rx.buf, evt->data.rx.len);
sh_uart->common.handler(SHELL_TRANSPORT_EVT_RX_RDY, sh_uart->common.context);
break;
case UART_RX_BUF_REQUEST:
{
uint8_t *buf = uart_async_rx_buf_req(&sh_uart->async_rx);
size_t len = uart_async_rx_get_buf_len(&sh_uart->async_rx);
if (buf) {
int err = uart_rx_buf_rsp(dev, buf, len);
if (err < 0) {
uart_async_rx_on_buf_rel(&sh_uart->async_rx, buf);
}
} else {
atomic_inc(&sh_uart->pending_rx_req);
}
break;
}
case UART_RX_BUF_RELEASED:
uart_async_rx_on_buf_rel(&sh_uart->async_rx, evt->data.rx_buf.buf);
break;
case UART_RX_DISABLED:
break;
default:
break;
};
}
static void uart_rx_handle(const struct device *dev, struct shell_uart_int_driven *sh_uart)
{
uint8_t *data;
uint32_t len;
uint32_t rd_len;
bool new_data = false;
#ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
struct smp_shell_data *const smp = &sh_uart->ctrl_blk->smp;
struct smp_shell_data *const smp = &sh_uart->common.smp;
#endif
do {
len = ring_buf_put_claim(sh_uart->rx_ringbuf, &data,
sh_uart->rx_ringbuf->size);
len = ring_buf_put_claim(&sh_uart->rx_ringbuf, &data,
sh_uart->rx_ringbuf.size);
if (len > 0) {
rd_len = uart_fifo_read(dev, data, len);
@ -72,8 +164,7 @@ static void uart_rx_handle(const struct device *dev,
}
}
#endif /* CONFIG_MCUMGR_TRANSPORT_SHELL */
int err = ring_buf_put_finish(sh_uart->rx_ringbuf,
rd_len);
int err = ring_buf_put_finish(&sh_uart->rx_ringbuf, rd_len);
(void)err;
__ASSERT_NO_MSG(err == 0);
} else {
@ -87,8 +178,7 @@ static void uart_rx_handle(const struct device *dev,
/* If successful in getting byte from the fifo, try
* feeding it to SMP as a part of mcumgr frame.
*/
if ((rd_len != 0) &&
(smp_shell_rx_bytes(smp, &dummy, 1) == 1)) {
if ((rd_len != 0) && (smp_shell_rx_bytes(smp, &dummy, 1) == 1)) {
new_data = true;
}
#endif /* CONFIG_MCUMGR_TRANSPORT_SHELL */
@ -96,8 +186,7 @@ static void uart_rx_handle(const struct device *dev,
} while (rd_len && (rd_len == len));
if (new_data) {
sh_uart->ctrl_blk->handler(SHELL_TRANSPORT_EVT_RX_RDY,
sh_uart->ctrl_blk->context);
sh_uart->common.handler(SHELL_TRANSPORT_EVT_RX_RDY, sh_uart->common.context);
}
}
@ -123,19 +212,18 @@ static bool uart_dtr_check(const struct device *dev)
static void dtr_timer_handler(struct k_timer *timer)
{
const struct shell_uart *sh_uart = k_timer_user_data_get(timer);
struct shell_uart_int_driven *sh_uart = k_timer_user_data_get(timer);
if (!uart_dtr_check(sh_uart->ctrl_blk->dev)) {
if (!uart_dtr_check(sh_uart->common.dev)) {
return;
}
/* DTR is active, stop timer and start TX */
k_timer_stop(timer);
uart_irq_tx_enable(sh_uart->ctrl_blk->dev);
uart_irq_tx_enable(sh_uart->common.dev);
}
static void uart_tx_handle(const struct device *dev,
const struct shell_uart *sh_uart)
static void uart_tx_handle(const struct device *dev, struct shell_uart_int_driven *sh_uart)
{
uint32_t len;
const uint8_t *data;
@ -143,31 +231,30 @@ static void uart_tx_handle(const struct device *dev,
if (!uart_dtr_check(dev)) {
/* Wait for DTR signal before sending anything to output. */
uart_irq_tx_disable(dev);
k_timer_start(sh_uart->dtr_timer, K_MSEC(100), K_MSEC(100));
k_timer_start(&sh_uart->dtr_timer, K_MSEC(100), K_MSEC(100));
return;
}
len = ring_buf_get_claim(sh_uart->tx_ringbuf, (uint8_t **)&data,
sh_uart->tx_ringbuf->size);
len = ring_buf_get_claim(&sh_uart->tx_ringbuf, (uint8_t **)&data,
sh_uart->tx_ringbuf.size);
if (len) {
int err;
len = uart_fifo_fill(dev, data, len);
err = ring_buf_get_finish(sh_uart->tx_ringbuf, len);
err = ring_buf_get_finish(&sh_uart->tx_ringbuf, len);
__ASSERT_NO_MSG(err == 0);
ARG_UNUSED(err);
} else {
uart_irq_tx_disable(dev);
sh_uart->ctrl_blk->tx_busy = 0;
sh_uart->tx_busy = 0;
}
sh_uart->ctrl_blk->handler(SHELL_TRANSPORT_EVT_TX_RDY,
sh_uart->ctrl_blk->context);
sh_uart->common.handler(SHELL_TRANSPORT_EVT_TX_RDY, sh_uart->common.context);
}
static void uart_callback(const struct device *dev, void *user_data)
{
const struct shell_uart *sh_uart = (struct shell_uart *)user_data;
struct shell_uart_int_driven *sh_uart = (struct shell_uart_int_driven *)user_data;
uart_irq_update(dev);
@ -179,80 +266,135 @@ static void uart_callback(const struct device *dev, void *user_data)
uart_tx_handle(dev, sh_uart);
}
}
#endif /* CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN */
static void uart_irq_init(const struct shell_uart *sh_uart)
static void irq_init(struct shell_uart_int_driven *sh_uart)
{
#ifdef CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN
const struct device *dev = sh_uart->ctrl_blk->dev;
const struct device *dev = sh_uart->common.dev;
ring_buf_reset(sh_uart->tx_ringbuf);
ring_buf_reset(sh_uart->rx_ringbuf);
sh_uart->ctrl_blk->tx_busy = 0;
ring_buf_init(&sh_uart->rx_ringbuf, CONFIG_SHELL_BACKEND_SERIAL_RX_RING_BUFFER_SIZE,
rx_ringbuf_data);
ring_buf_init(&sh_uart->tx_ringbuf, CONFIG_SHELL_BACKEND_SERIAL_TX_RING_BUFFER_SIZE,
tx_ringbuf_data);
sh_uart->tx_busy = 0;
uart_irq_callback_user_data_set(dev, uart_callback, (void *)sh_uart);
uart_irq_rx_enable(dev);
if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_CHECK_DTR)) {
k_timer_init(sh_uart->dtr_timer, dtr_timer_handler, NULL);
k_timer_user_data_set(sh_uart->dtr_timer, (void *)sh_uart);
k_timer_init(&sh_uart->dtr_timer, dtr_timer_handler, NULL);
k_timer_user_data_set(&sh_uart->dtr_timer, (void *)sh_uart);
}
#endif
}
static void timer_handler(struct k_timer *timer)
static int rx_enable(const struct device *dev, uint8_t *buf, size_t len)
{
return uart_rx_enable(dev, buf, len, 10000);
}
static void async_init(struct shell_uart_async *sh_uart)
{
static const struct uart_async_rx_config async_rx_config = {
.buffer = async_rx_data,
.length = sizeof(async_rx_data),
.buf_cnt = CONFIG_SHELL_BACKEND_SERIAL_ASYNC_RX_BUFFER_COUNT
};
const struct device *dev = sh_uart->common.dev;
struct uart_async_rx *async_rx = &sh_uart->async_rx;
int err;
k_sem_init(&sh_uart->tx_sem, 0, 1);
err = uart_async_rx_init(async_rx, &async_rx_config);
(void)err;
__ASSERT_NO_MSG(err == 0);
uint8_t *buf = uart_async_rx_buf_req(async_rx);
err = uart_callback_set(dev, async_callback, (void *)sh_uart);
(void)err;
__ASSERT_NO_MSG(err == 0);
err = rx_enable(dev, buf, uart_async_rx_get_buf_len(async_rx));
(void)err;
__ASSERT_NO_MSG(err == 0);
}
static void polling_rx_timeout_handler(struct k_timer *timer)
{
uint8_t c;
const struct shell_uart *sh_uart = k_timer_user_data_get(timer);
struct shell_uart_polling *sh_uart = k_timer_user_data_get(timer);
while (uart_poll_in(sh_uart->ctrl_blk->dev, &c) == 0) {
if (ring_buf_put(sh_uart->rx_ringbuf, &c, 1) == 0U) {
while (uart_poll_in(sh_uart->common.dev, &c) == 0) {
if (ring_buf_put(&sh_uart->rx_ringbuf, &c, 1) == 0U) {
/* ring buffer full. */
LOG_WRN("RX ring buffer full.");
}
sh_uart->ctrl_blk->handler(SHELL_TRANSPORT_EVT_RX_RDY,
sh_uart->ctrl_blk->context);
sh_uart->common.handler(SHELL_TRANSPORT_EVT_RX_RDY, sh_uart->common.context);
}
}
static void polling_init(struct shell_uart_polling *sh_uart)
{
k_timer_init(&sh_uart->rx_timer, polling_rx_timeout_handler, NULL);
k_timer_user_data_set(&sh_uart->rx_timer, (void *)sh_uart);
k_timer_start(&sh_uart->rx_timer, RX_POLL_PERIOD, RX_POLL_PERIOD);
ring_buf_init(&sh_uart->rx_ringbuf, CONFIG_SHELL_BACKEND_SERIAL_RX_RING_BUFFER_SIZE,
rx_ringbuf_data);
}
static int init(const struct shell_transport *transport,
const void *config,
shell_transport_handler_t evt_handler,
void *context)
{
const struct shell_uart *sh_uart = (struct shell_uart *)transport->ctx;
struct shell_uart_common *common = (struct shell_uart_common *)transport->ctx;
sh_uart->ctrl_blk->dev = (const struct device *)config;
sh_uart->ctrl_blk->handler = evt_handler;
sh_uart->ctrl_blk->context = context;
common->dev = (const struct device *)config;
common->handler = evt_handler;
common->context = context;
#ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
sh_uart->ctrl_blk->smp.buf_pool = &smp_shell_rx_pool;
k_fifo_init(&sh_uart->ctrl_blk->smp.buf_ready);
common->smp.buf_pool = &smp_shell_rx_pool;
k_fifo_init(&common->smp.buf_ready);
#endif
if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN)) {
uart_irq_init(sh_uart);
if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_ASYNC)) {
async_init((struct shell_uart_async *)transport->ctx);
} else if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_INTERRUPT_DRIVEN)) {
irq_init((struct shell_uart_int_driven *)transport->ctx);
} else {
k_timer_init(sh_uart->timer, timer_handler, NULL);
k_timer_user_data_set(sh_uart->timer, (void *)sh_uart);
k_timer_start(sh_uart->timer, RX_POLL_PERIOD, RX_POLL_PERIOD);
polling_init((struct shell_uart_polling *)transport->ctx);
}
return 0;
}
static void irq_uninit(struct shell_uart_int_driven *sh_uart)
{
const struct device *dev = sh_uart->common.dev;
k_timer_stop(&sh_uart->dtr_timer);
uart_irq_tx_disable(dev);
uart_irq_rx_disable(dev);
}
static void async_uninit(struct shell_uart_async *sh_uart)
{
}
static void polling_uninit(struct shell_uart_polling *sh_uart)
{
k_timer_stop(&sh_uart->rx_timer);
}
static int uninit(const struct shell_transport *transport)
{
const struct shell_uart *sh_uart = (struct shell_uart *)transport->ctx;
if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN)) {
const struct device *dev = sh_uart->ctrl_blk->dev;
k_timer_stop(sh_uart->dtr_timer);
uart_irq_tx_disable(dev);
uart_irq_rx_disable(dev);
if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_ASYNC)) {
async_uninit((struct shell_uart_async *)transport->ctx);
} else if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_INTERRUPT_DRIVEN)) {
irq_uninit((struct shell_uart_int_driven *)transport->ctx);
} else {
k_timer_stop(sh_uart->timer);
polling_uninit((struct shell_uart_polling *)transport->ctx);
}
return 0;
@ -260,49 +402,138 @@ static int uninit(const struct shell_transport *transport)
static int enable(const struct shell_transport *transport, bool blocking_tx)
{
const struct shell_uart *sh_uart = (struct shell_uart *)transport->ctx;
struct shell_uart_common *sh_uart = (struct shell_uart_common *)transport->ctx;
sh_uart->ctrl_blk->blocking_tx = blocking_tx;
sh_uart->blocking_tx = blocking_tx;
if (blocking_tx) {
#ifdef CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN
uart_irq_tx_disable(sh_uart->ctrl_blk->dev);
#endif
if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_INTERRUPT_DRIVEN) && blocking_tx) {
uart_irq_tx_disable(sh_uart->dev);
}
return 0;
}
static void irq_write(const struct shell_uart *sh_uart, const void *data,
size_t length, size_t *cnt)
static int polling_write(struct shell_uart_common *sh_uart,
const void *data, size_t length, size_t *cnt)
{
*cnt = ring_buf_put(sh_uart->tx_ringbuf, data, length);
const uint8_t *data8 = (const uint8_t *)data;
if (atomic_set(&sh_uart->ctrl_blk->tx_busy, 1) == 0) {
#ifdef CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN
uart_irq_tx_enable(sh_uart->ctrl_blk->dev);
#endif
for (size_t i = 0; i < length; i++) {
uart_poll_out(sh_uart->dev, data8[i]);
}
*cnt = length;
sh_uart->handler(SHELL_TRANSPORT_EVT_TX_RDY, sh_uart->context);
return 0;
}
static int irq_write(struct shell_uart_int_driven *sh_uart,
const void *data, size_t length, size_t *cnt)
{
*cnt = ring_buf_put(&sh_uart->tx_ringbuf, data, length);
if (atomic_set(&sh_uart->tx_busy, 1) == 0) {
uart_irq_tx_enable(sh_uart->common.dev);
}
return 0;
}
static int async_write(struct shell_uart_async *sh_uart,
const void *data, size_t length, size_t *cnt)
{
int err;
err = uart_tx(sh_uart->common.dev, data, length, SYS_FOREVER_US);
if (err < 0) {
*cnt = 0;
return err;
}
err = k_sem_take(&sh_uart->tx_sem, K_FOREVER);
*cnt = length;
sh_uart->common.handler(SHELL_TRANSPORT_EVT_TX_RDY, sh_uart->common.context);
return err;
}
static int write(const struct shell_transport *transport,
const void *data, size_t length, size_t *cnt)
{
const struct shell_uart *sh_uart = (struct shell_uart *)transport->ctx;
const uint8_t *data8 = (const uint8_t *)data;
struct shell_uart_common *sh_uart = (struct shell_uart_common *)transport->ctx;
if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN) &&
!sh_uart->ctrl_blk->blocking_tx) {
irq_write(sh_uart, data, length, cnt);
if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_POLLING) || sh_uart->blocking_tx) {
return polling_write(sh_uart, data, length, cnt);
} else if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_INTERRUPT_DRIVEN)) {
return irq_write((struct shell_uart_int_driven *)transport->ctx, data, length, cnt);
} else {
for (size_t i = 0; i < length; i++) {
uart_poll_out(sh_uart->ctrl_blk->dev, data8[i]);
return async_write((struct shell_uart_async *)transport->ctx, data, length, cnt);
}
}
static int irq_read(struct shell_uart_int_driven *sh_uart,
void *data, size_t length, size_t *cnt)
{
*cnt = ring_buf_get(&sh_uart->rx_ringbuf, data, length);
return 0;
}
static int polling_read(struct shell_uart_polling *sh_uart,
void *data, size_t length, size_t *cnt)
{
*cnt = ring_buf_get(&sh_uart->rx_ringbuf, data, length);
return 0;
}
static int async_read(struct shell_uart_async *sh_uart,
void *data, size_t length, size_t *cnt)
{
uint8_t *buf;
size_t blen;
struct uart_async_rx *async_rx = &sh_uart->async_rx;
blen = uart_async_rx_data_claim(async_rx, &buf, length);
#ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
struct smp_shell_data *const smp = &sh_uart->common.smp;
size_t sh_cnt = 0;
for (size_t i = 0; i < blen; i++) {
if (smp_shell_rx_bytes(smp, &buf[i], 1) == 0) {
data[sh_cnt++] = buf[i];
}
}
#else
size_t sh_cnt = blen;
*cnt = length;
memcpy(data, buf, blen);
#endif
uart_async_rx_data_consume(async_rx, sh_cnt);
*cnt = sh_cnt;
sh_uart->ctrl_blk->handler(SHELL_TRANSPORT_EVT_TX_RDY,
sh_uart->ctrl_blk->context);
if (sh_uart->pending_rx_req) {
uint8_t *buf = uart_async_rx_buf_req(async_rx);
if (buf) {
int err;
size_t len = uart_async_rx_get_buf_len(async_rx);
atomic_dec(&sh_uart->pending_rx_req);
err = uart_rx_buf_rsp(sh_uart->common.dev, buf, len);
/* If it is too late and RX is disabled then re-enable it. */
if (err < 0) {
if (err == -EACCES) {
sh_uart->pending_rx_req = 0;
err = rx_enable(sh_uart->common.dev, buf, len);
} else {
return err;
}
}
}
}
return 0;
@ -311,19 +542,21 @@ static int write(const struct shell_transport *transport,
static int read(const struct shell_transport *transport,
void *data, size_t length, size_t *cnt)
{
struct shell_uart *sh_uart = (struct shell_uart *)transport->ctx;
*cnt = ring_buf_get(sh_uart->rx_ringbuf, data, length);
return 0;
if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_INTERRUPT_DRIVEN)) {
return irq_read((struct shell_uart_int_driven *)transport->ctx, data, length, cnt);
} else if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_ASYNC)) {
return async_read((struct shell_uart_async *)transport->ctx, data, length, cnt);
} else {
return polling_read((struct shell_uart_polling *)transport->ctx, data, length, cnt);
}
}
#ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
static void update(const struct shell_transport *transport)
{
struct shell_uart *sh_uart = (struct shell_uart *)transport->ctx;
struct shell_uart_common *sh_uart = (struct shell_uart_common *)transport->ctx;
smp_shell_process(&sh_uart->ctrl_blk->smp);
smp_shell_process(&sh_uart->smp);
}
#endif /* CONFIG_MCUMGR_TRANSPORT_SHELL */
@ -338,6 +571,18 @@ const struct shell_transport_api shell_uart_transport_api = {
#endif /* CONFIG_MCUMGR_TRANSPORT_SHELL */
};
struct shell_transport shell_transport_uart = {
.api = &shell_uart_transport_api,
.ctx = IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_POLLING) ? (void *)&shell_uart_p :
(IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_ASYNC) ? (void *)&shell_uart_a :
(void *)&shell_uart_i)
};
SHELL_DEFINE(shell_uart, CONFIG_SHELL_PROMPT_UART, &shell_transport_uart,
CONFIG_SHELL_BACKEND_SERIAL_LOG_MESSAGE_QUEUE_SIZE,
CONFIG_SHELL_BACKEND_SERIAL_LOG_MESSAGE_QUEUE_TIMEOUT,
SHELL_FLAG_OLF_CRLF);
static int enable_shell_uart(void)
{
const struct device *const dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_shell_uart));