ipc_service: icmsg: Add "unbound" functionality

In some cases, CPUs that may need to reset or
temporary stop communication. This commit adds "unbound"
functionality that provides a callback to IPC service user
when connection was interrupted for some reason, e.g.
expected or unexpected CPU reset, closing the
endpoint. The "unbound" callback is optional to implement
by endpoints. This commit implements it in the ICMsg
backend.

Signed-off-by: Dominik Kilian <Dominik.Kilian@nordicsemi.no>
This commit is contained in:
Dominik Kilian 2025-01-24 12:26:45 +01:00 committed by Benjamin Cabé
parent 5155cb673f
commit 84a215aff8
13 changed files with 638 additions and 242 deletions

View File

@ -83,8 +83,7 @@ connected through the IPC instance:
memory.
#. It then sends a signal to the other domain or CPU, informing that the data
has been written. Sending the signal to the other domain or CPU is repeated
with timeout specified by
:kconfig:option:`CONFIG_IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS` option.
with timeout.
#. When the signal from the other domain or CPU is received, the magic number
is read from ``rx-region``. If it is correct, the bonding process is finished
and the backend informs the application by calling

View File

@ -21,6 +21,21 @@ properties:
required: true
type: phandle
unbound:
type: string
enum:
- disable
- enable
- detect
default: disable
description: |
Select unbound() callback mode. The callback can be enabled or disabled with the
"enable" and "disable" option respectively. This functionality requires session
number handshake procedure on both sides, so you cannot set "enable" on one side
and "disable" on the other. The "detect" mode detects if the remote side
supports handshake procedure and adjusts its behavior accordingly. The
"detect" mode is possible only if "dcache-alignment" is at least 8 bytes.
dcache-alignment:
type: int
description: |

View File

@ -27,14 +27,58 @@ extern "C" {
*/
enum icmsg_state {
/** Instance is not initialized yet. In this state: sending will fail, opening allowed.
*/
ICMSG_STATE_OFF,
ICMSG_STATE_BUSY,
ICMSG_STATE_READY,
/** Instance is initializing without session handshake. In this state: sending will fail,
* opening will fail.
*/
ICMSG_STATE_INITIALIZING_SID_DISABLED,
/** Instance is initializing with session handshake. It is waiting for remote to acknowledge
* local session id. In this state: sending will fail, opening is allowed (local session id
* will change, so the remote may get unbound() callback).
*/
ICMSG_STATE_INITIALIZING_SID_ENABLED,
/** Instance is initializing with detection of session handshake support on remote side.
* It is waiting for remote to acknowledge local session id or to send magic bytes.
* In this state: sending will fail, opening is allowed (local session id
* will change, so the remote may get unbound() callback if it supports it).
*/
ICMSG_STATE_INITIALIZING_SID_DETECT,
/** Instance was closed on remote side. The unbound() callback was send on local side.
* In this state: sending will be silently discarded (there may be outdated sends),
* opening is allowed.
*/
ICMSG_STATE_DISCONNECTED,
/* Connected states must be at the end. */
/** Instance is connected without session handshake support. In this state: sending will be
* successful, opening will fail.
*/
ICMSG_STATE_CONNECTED_SID_DISABLED,
/** Instance is connected with session handshake support. In this state: sending will be
* successful, opening is allowed (session will change and remote will get unbound()
* callback).
*/
ICMSG_STATE_CONNECTED_SID_ENABLED,
};
enum icmsg_unbound_mode {
ICMSG_UNBOUND_MODE_DISABLE = ICMSG_STATE_INITIALIZING_SID_DISABLED,
ICMSG_UNBOUND_MODE_ENABLE = ICMSG_STATE_INITIALIZING_SID_ENABLED,
ICMSG_UNBOUND_MODE_DETECT = ICMSG_STATE_INITIALIZING_SID_DETECT,
};
struct icmsg_config_t {
struct mbox_dt_spec mbox_tx;
struct mbox_dt_spec mbox_rx;
enum icmsg_unbound_mode unbound_mode;
};
struct icmsg_data_t {
@ -52,9 +96,10 @@ struct icmsg_data_t {
/* General */
const struct icmsg_config_t *cfg;
#ifdef CONFIG_MULTITHREADING
struct k_work_delayable notify_work;
struct k_work mbox_work;
#endif
uint16_t remote_sid;
uint16_t local_sid;
atomic_t state;
};

View File

@ -151,6 +151,21 @@ struct ipc_service_cb {
*/
void (*bound)(void *priv);
/** @brief The endpoint unbound by the remote.
*
* This callback is called when the endpoint binding is removed. It may happen on
* different reasons, e.g. when the remote deregistered the endpoint, connection was
* lost, or remote CPU got reset.
*
* You may want to do some cleanup, resetting, e.t.c. and after that if you want to bound
* again, you can register the endpoint. When the remote becomes available again and it
* also registers the endpoint, the binding will be reestablished and the `bound()`
* callback will be called.
*
* @param[in] priv Private user data.
*/
void (*unbound)(void *priv);
/** @brief New packet arrived.
*
* This callback is called when new data is received.

View File

@ -47,20 +47,23 @@ extern "C" {
* The structure contains configuration data.
*/
struct pbuf_cfg {
volatile uint32_t *rd_idx_loc; /* Address of the variable holding
* index value of the first valid byte
* in data[].
*/
volatile uint32_t *wr_idx_loc; /* Address of the variable holding
* index value of the first free byte
* in data[].
*/
uint32_t dcache_alignment; /* CPU data cache line size in bytes.
* Used for validation - TODO: To be
* replaced by flags.
*/
uint32_t len; /* Length of data[] in bytes. */
uint8_t *data_loc; /* Location of the data[]. */
volatile uint32_t *rd_idx_loc; /* Address of the variable holding
* index value of the first valid byte
* in data[].
*/
volatile uint32_t *handshake_loc;/* Address of the variable holding
* handshake information.
*/
volatile uint32_t *wr_idx_loc; /* Address of the variable holding
* index value of the first free byte
* in data[].
*/
uint32_t dcache_alignment; /* CPU data cache line size in bytes.
* Used for validation - TODO: To be
* replaced by flags.
*/
uint32_t len; /* Length of data[] in bytes. */
uint8_t *data_loc; /* Location of the data[]. */
};
/**
@ -111,16 +114,21 @@ struct pbuf {
* @param mem_addr Memory address for pbuf.
* @param size Size of the memory.
* @param dcache_align Data cache alignment.
* @param use_handshake Add handshake word inside shared memory that can be access with
* @ref pbuf_handshake_read and @ref pbuf_handshake_write.
*/
#define PBUF_CFG_INIT(mem_addr, size, dcache_align) \
#define PBUF_CFG_INIT(mem_addr, size, dcache_align, use_handshake) \
{ \
.rd_idx_loc = (uint32_t *)(mem_addr), \
.wr_idx_loc = (uint32_t *)((uint8_t *)(mem_addr) + \
MAX(dcache_align, _PBUF_IDX_SIZE)), \
.handshake_loc = use_handshake ? (uint32_t *)((uint8_t *)(mem_addr) + \
_PBUF_IDX_SIZE) : NULL, \
.wr_idx_loc = (uint32_t *)((uint8_t *)(mem_addr) + MAX(dcache_align, \
(use_handshake ? 2 : 1) * _PBUF_IDX_SIZE)), \
.data_loc = (uint8_t *)((uint8_t *)(mem_addr) + \
MAX(dcache_align, _PBUF_IDX_SIZE) + _PBUF_IDX_SIZE), \
.len = (uint32_t)((uint32_t)(size) - MAX(dcache_align, _PBUF_IDX_SIZE) - \
_PBUF_IDX_SIZE), \
MAX(dcache_align, (use_handshake ? 2 : 1) * \
_PBUF_IDX_SIZE) + _PBUF_IDX_SIZE), \
.len = (uint32_t)((uint32_t)(size) - MAX(dcache_align, \
(use_handshake ? 2 : 1) * _PBUF_IDX_SIZE) - _PBUF_IDX_SIZE), \
.dcache_alignment = (dcache_align), \
}
@ -140,9 +148,11 @@ struct pbuf {
* @param name Name of the pbuf.
* @param mem_addr Memory address for pbuf.
* @param size Size of the memory.
* @param dcache_align Data cache line size.
* @param dcache_align Data cache line size.
* @param use_handshake Add handshake word inside shared memory that can be access with
* @ref pbuf_handshake_read and @ref pbuf_handshake_write.
*/
#define PBUF_DEFINE(name, mem_addr, size, dcache_align) \
#define PBUF_DEFINE(name, mem_addr, size, dcache_align, use_handshake, compatibility) \
BUILD_ASSERT(dcache_align >= 0, \
"Cache line size must be non negative."); \
BUILD_ASSERT((size) > 0 && IS_PTR_ALIGNED_BYTES(size, _PBUF_IDX_SIZE), \
@ -151,8 +161,10 @@ struct pbuf {
"Misaligned memory."); \
BUILD_ASSERT(size >= (MAX(dcache_align, _PBUF_IDX_SIZE) + _PBUF_IDX_SIZE + \
_PBUF_MIN_DATA_LEN), "Insufficient size."); \
BUILD_ASSERT(!(compatibility) || (dcache_align) >= 8, \
"Data cache alignment must be at least 8 if compatibility is enabled.");\
static PBUF_MAYBE_CONST struct pbuf_cfg cfg_##name = \
PBUF_CFG_INIT(mem_addr, size, dcache_align); \
PBUF_CFG_INIT(mem_addr, size, dcache_align, use_handshake); \
static struct pbuf name = { \
.cfg = &cfg_##name, \
}
@ -223,6 +235,40 @@ int pbuf_write(struct pbuf *pb, const char *buf, uint16_t len);
*/
int pbuf_read(struct pbuf *pb, char *buf, uint16_t len);
/**
* @brief Read handshake word from pbuf.
*
* The pb must be defined with "PBUF_DEFINE" with "use_handshake" set.
*
* @param pb A buffer from which data will be read.
* @retval uint32_t The handshake word value.
*/
uint32_t pbuf_handshake_read(struct pbuf *pb);
/**
* @brief Write handshake word to pbuf.
*
* The pb must be defined with "PBUF_DEFINE" with "use_handshake" set.
*
* @param pb A buffer to which data will be written.
* @param value A handshake value.
*/
void pbuf_handshake_write(struct pbuf *pb, uint32_t value);
/**
* @brief Get first buffer from pbuf.
*
* This function retrieves buffer located at the beginning of queue.
* It will be continuous block since it is the first buffer.
*
* @param pb A buffer from which data will be read.
* @param[out] buf A pointer to output pointer to the date of the first buffer.
* @param[out] len A pointer to output length the first buffer.
* @retval 0 on success.
* -EINVAL when there is no buffer at the beginning of queue.
*/
int pbuf_get_initial_buf(struct pbuf *pb, volatile char **buf, uint16_t *len);
/**
* @}
*/

View File

@ -40,6 +40,11 @@ static void ep_bound(void *priv)
LOG_INF("Ep bounded");
}
static void ep_unbound(void *priv)
{
LOG_INF("Ep unbounded");
}
static void ep_recv(const void *data, size_t len, void *priv)
{
#if defined(CONFIG_ASSERT)
@ -68,6 +73,11 @@ static void ep_recv(const void *data, size_t len, void *priv)
}
}
static void ep_error(const char *message, void *priv)
{
LOG_ERR("ICMsg error: %s", message);
}
static int send_for_time(struct ipc_ept *ep, const int64_t sending_time_ms)
{
struct data_packet msg = {.data[0] = 'a'};
@ -123,7 +133,9 @@ static int send_for_time(struct ipc_ept *ep, const int64_t sending_time_ms)
static struct ipc_ept_cfg ep_cfg = {
.cb = {
.bound = ep_bound,
.unbound = ep_unbound,
.received = ep_recv,
.error = ep_error,
},
};

View File

@ -1408,11 +1408,11 @@ const static struct ipc_service_backend backend_ops = {
PBUF_DEFINE(tx_icbmsg_pb_##i, \
GET_MEM_ADDR_INST(i, tx), \
GET_ICMSG_SIZE_INST(i, tx, rx), \
GET_CACHE_ALIGNMENT(i)); \
GET_CACHE_ALIGNMENT(i), 0, 0); \
PBUF_DEFINE(rx_icbmsg_pb_##i, \
GET_MEM_ADDR_INST(i, rx), \
GET_ICMSG_SIZE_INST(i, rx, tx), \
GET_CACHE_ALIGNMENT(i)); \
GET_CACHE_ALIGNMENT(i), 0, 0); \
static struct backend_data backend_data_##i = { \
.control_data = { \
.tx_pb = &tx_icbmsg_pb_##i, \
@ -1424,6 +1424,7 @@ const static struct ipc_service_backend backend_ops = {
.control_config = { \
.mbox_tx = MBOX_DT_SPEC_INST_GET(i, tx), \
.mbox_rx = MBOX_DT_SPEC_INST_GET(i, rx), \
.unbound_mode = ICMSG_UNBOUND_MODE_DISABLE, \
}, \
.tx = { \
.blocks_ptr = (uint8_t *)GET_BLOCKS_ADDR_INST(i, tx, rx), \

View File

@ -54,33 +54,52 @@ static int backend_init(const struct device *instance)
return 0;
}
#define DEFINE_BACKEND_DEVICE(i) \
static const struct icmsg_config_t backend_config_##i = { \
.mbox_tx = MBOX_DT_SPEC_INST_GET(i, tx), \
.mbox_rx = MBOX_DT_SPEC_INST_GET(i, rx), \
}; \
\
PBUF_DEFINE(tx_pb_##i, \
DT_REG_ADDR(DT_INST_PHANDLE(i, tx_region)), \
DT_REG_SIZE(DT_INST_PHANDLE(i, tx_region)), \
DT_INST_PROP_OR(i, dcache_alignment, 0)); \
PBUF_DEFINE(rx_pb_##i, \
DT_REG_ADDR(DT_INST_PHANDLE(i, rx_region)), \
DT_REG_SIZE(DT_INST_PHANDLE(i, rx_region)), \
DT_INST_PROP_OR(i, dcache_alignment, 0)); \
\
static struct icmsg_data_t backend_data_##i = { \
.tx_pb = &tx_pb_##i, \
.rx_pb = &rx_pb_##i, \
}; \
\
DEVICE_DT_INST_DEFINE(i, \
&backend_init, \
NULL, \
&backend_data_##i, \
&backend_config_##i, \
POST_KERNEL, \
CONFIG_IPC_SERVICE_REG_BACKEND_PRIORITY, \
#define UNBOUND_MODE(i) CONCAT(ICMSG_UNBOUND_MODE_, DT_INST_STRING_UPPER_TOKEN(i, unbound))
#define DEFINE_BACKEND_DEVICE(i) \
static const struct icmsg_config_t backend_config_##i = { \
.mbox_tx = MBOX_DT_SPEC_INST_GET(i, tx), \
.mbox_rx = MBOX_DT_SPEC_INST_GET(i, rx), \
.unbound_mode = UNBOUND_MODE(i), \
}; \
\
PBUF_DEFINE(tx_pb_##i, \
DT_REG_ADDR(DT_INST_PHANDLE(i, tx_region)), \
DT_REG_SIZE(DT_INST_PHANDLE(i, tx_region)), \
DT_INST_PROP_OR(i, dcache_alignment, 0), \
UNBOUND_MODE(i) != ICMSG_UNBOUND_MODE_DISABLE, \
UNBOUND_MODE(i) == ICMSG_UNBOUND_MODE_DETECT); \
PBUF_DEFINE(rx_pb_##i, \
DT_REG_ADDR(DT_INST_PHANDLE(i, rx_region)), \
DT_REG_SIZE(DT_INST_PHANDLE(i, rx_region)), \
DT_INST_PROP_OR(i, dcache_alignment, 0), \
UNBOUND_MODE(i) != ICMSG_UNBOUND_MODE_DISABLE, \
UNBOUND_MODE(i) == ICMSG_UNBOUND_MODE_DETECT); \
\
BUILD_ASSERT(UNBOUND_MODE(i) != ICMSG_UNBOUND_MODE_DISABLE || \
IS_ENABLED(CONFIG_IPC_SERVICE_ICMSG_UNBOUND_DISABLED_ALLOWED), \
"Unbound mode \"disabled\" is was forbidden in Kconfig."); \
\
BUILD_ASSERT(UNBOUND_MODE(i) != ICMSG_UNBOUND_MODE_ENABLE || \
IS_ENABLED(CONFIG_IPC_SERVICE_ICMSG_UNBOUND_ENABLED_ALLOWED), \
"Unbound mode \"enabled\" is was forbidden in Kconfig."); \
\
BUILD_ASSERT(UNBOUND_MODE(i) != ICMSG_UNBOUND_MODE_DETECT || \
IS_ENABLED(CONFIG_IPC_SERVICE_ICMSG_UNBOUND_DETECT_ALLOWED), \
"Unbound mode \"detect\" is was forbidden in Kconfig."); \
\
static struct icmsg_data_t backend_data_##i = { \
.tx_pb = &tx_pb_##i, \
.rx_pb = &rx_pb_##i, \
}; \
\
DEVICE_DT_INST_DEFINE(i, \
&backend_init, \
NULL, \
&backend_data_##i, \
&backend_config_##i, \
POST_KERNEL, \
CONFIG_IPC_SERVICE_REG_BACKEND_PRIORITY, \
&backend_ops);
DT_INST_FOREACH_STATUS_OKAY(DEFINE_BACKEND_DEVICE)

View File

@ -280,16 +280,17 @@ static int backend_init(const struct device *instance)
static const struct icmsg_config_t backend_config_##i = { \
.mbox_tx = MBOX_DT_SPEC_INST_GET(i, tx), \
.mbox_rx = MBOX_DT_SPEC_INST_GET(i, rx), \
.unbound_mode = ICMSG_UNBOUND_MODE_DISABLE, \
}; \
\
PBUF_DEFINE(tx_pb_##i, \
DT_REG_ADDR(DT_INST_PHANDLE(i, tx_region)), \
DT_REG_SIZE(DT_INST_PHANDLE(i, tx_region)), \
DT_INST_PROP_OR(i, dcache_alignment, 0)); \
DT_INST_PROP_OR(i, dcache_alignment, 0), 0, 0); \
PBUF_DEFINE(rx_pb_##i, \
DT_REG_ADDR(DT_INST_PHANDLE(i, rx_region)), \
DT_REG_SIZE(DT_INST_PHANDLE(i, rx_region)), \
DT_INST_PROP_OR(i, dcache_alignment, 0)); \
DT_INST_PROP_OR(i, dcache_alignment, 0), 0, 0); \
\
static struct backend_data_t backend_data_##i = { \
.icmsg_me_data = { \

View File

@ -186,16 +186,17 @@ static int backend_init(const struct device *instance)
static const struct icmsg_config_t backend_config_##i = { \
.mbox_tx = MBOX_DT_SPEC_INST_GET(i, tx), \
.mbox_rx = MBOX_DT_SPEC_INST_GET(i, rx), \
.unbound_mode = ICMSG_UNBOUND_MODE_DISABLE, \
}; \
\
PBUF_DEFINE(tx_pb_##i, \
DT_REG_ADDR(DT_INST_PHANDLE(i, tx_region)), \
DT_REG_SIZE(DT_INST_PHANDLE(i, tx_region)), \
DT_INST_PROP_OR(i, dcache_alignment, 0)); \
DT_INST_PROP_OR(i, dcache_alignment, 0), 0, 0); \
PBUF_DEFINE(rx_pb_##i, \
DT_REG_ADDR(DT_INST_PHANDLE(i, rx_region)), \
DT_REG_SIZE(DT_INST_PHANDLE(i, rx_region)), \
DT_INST_PROP_OR(i, dcache_alignment, 0)); \
DT_INST_PROP_OR(i, dcache_alignment, 0), 0, 0); \
\
static struct backend_data_t backend_data_##i = { \
.icmsg_me_data = { \

View File

@ -21,14 +21,6 @@ config IPC_SERVICE_ICMSG_SHMEM_ACCESS_TO_MS
Maximum time to wait, in milliseconds, for access to send data with
backends basing on icmsg library. This time should be relatively low.
config IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS
int "Bond notification timeout in miliseconds"
range 1 100
default 1
help
Time to wait for remote bonding notification before the
notification is repeated.
config IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE
bool "Use dedicated workqueue"
depends on MULTITHREADING
@ -68,6 +60,33 @@ config IPC_SERVICE_BACKEND_ICMSG_WQ_PRIORITY
endif
config IPC_SERVICE_ICMSG_UNBOUND_ENABLED_ALLOWED
bool "Instance is allowed to set unbound to enabled"
default y
help
Controls whether the "enabled" value of the "unbound"
property is allowed to be set in any of the ICMsg instances. You
can set this option to "n", if you want to reduce code size and
no instance of ICMsg is using unbound functionality.
config IPC_SERVICE_ICMSG_UNBOUND_DISABLED_ALLOWED
bool "Instance is allowed to set unbound to disabled"
default y
help
Controls whether the "disabled" value of the "unbound"
property is allowed to be set in any of the ICMsg instances. You
can set this option to "n", if you want to reduce code size and
all instances of ICMsg are using unbound functionality.
config IPC_SERVICE_ICMSG_UNBOUND_DETECT_ALLOWED
bool "Instance is allowed to set unbound to detect"
default y
help
Controls whether the "detect" value of the "unbound"
property is allowed to be set in any of the ICMsg instances. You
can set this option to "n", if you want to reduce code size and
all instances of ICMsg are using unbound detection functionality.
# The Icmsg library in its simplicity requires the system workqueue to execute
# at a cooperative priority.
config SYSTEM_WORKQUEUE_PRIORITY

View File

@ -12,7 +12,39 @@
#include <zephyr/ipc/pbuf.h>
#include <zephyr/init.h>
#define BOND_NOTIFY_REPEAT_TO K_MSEC(CONFIG_IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS)
#define UNBOUND_DISABLED IS_ENABLED(CONFIG_IPC_SERVICE_ICMSG_UNBOUND_DISABLED_ALLOWED)
#define UNBOUND_ENABLED IS_ENABLED(CONFIG_IPC_SERVICE_ICMSG_UNBOUND_ENABLED_ALLOWED)
#define UNBOUND_DETECT IS_ENABLED(CONFIG_IPC_SERVICE_ICMSG_UNBOUND_DETECT_ALLOWED)
/** Get local session id request from RX handshake word.
*/
#define LOCAL_SID_REQ_FROM_RX(rx_handshake) ((rx_handshake) & 0xFFFF)
/** Get remote session id request from TX handshake word.
*/
#define REMOTE_SID_REQ_FROM_TX(tx_handshake) ((tx_handshake) & 0xFFFF)
/** Get local session id acknowledge from TX handshake word.
*/
#define LOCAL_SID_ACK_FROM_TX(tx_handshake) ((tx_handshake) >> 16)
/** Create RX handshake word from local session id request
* and remote session id acknowledge.
*/
#define MAKE_RX_HANDSHAKE(local_sid_req, remote_sid_ack) \
((local_sid_req) | ((remote_sid_ack) << 16))
/** Create TX handshake word from remote session id request
* and local session id acknowledge.
*/
#define MAKE_TX_HANDSHAKE(remote_sid_req, local_sid_ack) \
((remote_sid_req) | ((local_sid_ack) << 16))
/** Special session id indicating that peers are disconnected.
*/
#define SID_DISCONNECTED 0
#define SHMEM_ACCESS_TO K_MSEC(CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_TO_MS)
static const uint8_t magic[] = {0x45, 0x6d, 0x31, 0x6c, 0x31, 0x4b,
@ -23,13 +55,10 @@ static const uint8_t magic[] = {0x45, 0x6d, 0x31, 0x6c, 0x31, 0x4b,
static K_THREAD_STACK_DEFINE(icmsg_stack, CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_STACK_SIZE);
static struct k_work_q icmsg_workq;
static struct k_work_q *const workq = &icmsg_workq;
#else
#else /* defined(CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE) */
static struct k_work_q *const workq = &k_sys_work_q;
#endif
static void mbox_callback_process(struct k_work *item);
#else
static void mbox_callback_process(struct icmsg_data_t *dev_data);
#endif
#endif /* defined(CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE) */
#endif /* def CONFIG_MULTITHREADING */
static int mbox_deinit(const struct icmsg_config_t *conf,
struct icmsg_data_t *dev_data)
@ -48,76 +77,33 @@ static int mbox_deinit(const struct icmsg_config_t *conf,
#ifdef CONFIG_MULTITHREADING
(void)k_work_cancel(&dev_data->mbox_work);
(void)k_work_cancel_delayable(&dev_data->notify_work);
#endif
return 0;
}
static bool is_endpoint_ready(struct icmsg_data_t *dev_data)
static bool is_endpoint_ready(atomic_t state)
{
return atomic_get(&dev_data->state) == ICMSG_STATE_READY;
return state >= MIN(ICMSG_STATE_CONNECTED_SID_DISABLED, ICMSG_STATE_CONNECTED_SID_ENABLED);
}
#ifdef CONFIG_MULTITHREADING
static void notify_process(struct k_work *item)
static inline int reserve_tx_buffer_if_unused(struct icmsg_data_t *dev_data)
{
struct k_work_delayable *dwork = k_work_delayable_from_work(item);
struct icmsg_data_t *dev_data =
CONTAINER_OF(dwork, struct icmsg_data_t, notify_work);
(void)mbox_send_dt(&dev_data->cfg->mbox_tx, NULL);
atomic_t state = atomic_get(&dev_data->state);
if (state != ICMSG_STATE_READY) {
int ret;
ret = k_work_reschedule_for_queue(workq, dwork, BOND_NOTIFY_REPEAT_TO);
__ASSERT_NO_MSG(ret >= 0);
(void)ret;
}
}
#else
static void notify_process(struct icmsg_data_t *dev_data)
{
(void)mbox_send_dt(&dev_data->cfg->mbox_tx, NULL);
#if defined(CONFIG_SYS_CLOCK_EXISTS)
int64_t start = k_uptime_get();
#endif
while (false == is_endpoint_ready(dev_data)) {
mbox_callback_process(dev_data);
#if defined(CONFIG_SYS_CLOCK_EXISTS)
if ((k_uptime_get() - start) > CONFIG_IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS) {
#endif
(void)mbox_send_dt(&dev_data->cfg->mbox_tx, NULL);
#if defined(CONFIG_SYS_CLOCK_EXISTS)
start = k_uptime_get();
};
#endif
}
}
#endif
#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
static int reserve_tx_buffer_if_unused(struct icmsg_data_t *dev_data)
{
int ret = k_mutex_lock(&dev_data->tx_lock, SHMEM_ACCESS_TO);
if (ret < 0) {
return ret;
}
return k_mutex_lock(&dev_data->tx_lock, SHMEM_ACCESS_TO);
#else
return 0;
#endif
}
static int release_tx_buffer(struct icmsg_data_t *dev_data)
static inline int release_tx_buffer(struct icmsg_data_t *dev_data)
{
#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
return k_mutex_unlock(&dev_data->tx_lock);
}
#else
return 0;
#endif
}
static uint32_t data_available(struct icmsg_data_t *dev_data)
{
@ -135,103 +121,242 @@ static void submit_mbox_work(struct icmsg_data_t *dev_data)
}
}
static void submit_work_if_buffer_free(struct icmsg_data_t *dev_data)
{
submit_mbox_work(dev_data);
}
#endif
static void submit_work_if_buffer_free_and_data_available(
struct icmsg_data_t *dev_data)
static int initialize_tx_with_sid_disabled(struct icmsg_data_t *dev_data)
{
if (!data_available(dev_data)) {
return;
int ret;
ret = pbuf_tx_init(dev_data->tx_pb);
if (ret < 0) {
__ASSERT(false, "Incorrect Tx configuration");
return ret;
}
submit_mbox_work(dev_data);
}
#else
static void submit_if_buffer_free(struct icmsg_data_t *dev_data)
{
mbox_callback_process(dev_data);
}
ret = pbuf_write(dev_data->tx_pb, magic, sizeof(magic));
static void submit_if_buffer_free_and_data_available(
struct icmsg_data_t *dev_data)
{
if (!data_available(dev_data)) {
return;
if (ret < 0) {
__ASSERT_NO_MSG(false);
return ret;
}
mbox_callback_process(dev_data);
}
#endif
if (ret < (int)sizeof(magic)) {
__ASSERT_NO_MSG(ret == sizeof(magic));
return -EINVAL;
}
#ifdef CONFIG_MULTITHREADING
static void mbox_callback_process(struct k_work *item)
#else
static void mbox_callback_process(struct icmsg_data_t *dev_data)
#endif
return 0;
}
static bool callback_process(struct icmsg_data_t *dev_data)
{
#ifdef CONFIG_MULTITHREADING
struct icmsg_data_t *dev_data = CONTAINER_OF(item, struct icmsg_data_t, mbox_work);
#endif
int ret;
uint8_t rx_buffer[CONFIG_PBUF_RX_READ_BUF_SIZE] __aligned(4);
uint32_t len = 0;
uint32_t len_available;
bool rerun = false;
bool notify_remote = false;
atomic_t state = atomic_get(&dev_data->state);
uint32_t len = data_available(dev_data);
switch (state) {
if (len == 0) {
/* Unlikely, no data in buffer. */
return;
}
#if UNBOUND_DETECT
case ICMSG_STATE_INITIALIZING_SID_DETECT: {
/* Initialization with detection of remote session awareness */
volatile char *magic_buf;
uint16_t magic_len;
__ASSERT_NO_MSG(len <= sizeof(rx_buffer));
ret = pbuf_get_initial_buf(dev_data->rx_pb, &magic_buf, &magic_len);
if (sizeof(rx_buffer) < len) {
return;
}
len = pbuf_read(dev_data->rx_pb, rx_buffer, sizeof(rx_buffer));
if (state == ICMSG_STATE_READY) {
if (dev_data->cb->received) {
dev_data->cb->received(rx_buffer, len, dev_data->ctx);
if (ret == 0 && magic_len == sizeof(magic) &&
memcmp((void *)magic_buf, magic, sizeof(magic)) == 0) {
/* Remote initialized in session-unaware mode, so we do old way of
* initialization.
*/
ret = initialize_tx_with_sid_disabled(dev_data);
if (ret < 0) {
if (dev_data->cb->error) {
dev_data->cb->error("Incorrect Tx configuration",
dev_data->ctx);
}
__ASSERT(false, "Incorrect Tx configuration");
atomic_set(&dev_data->state, ICMSG_STATE_OFF);
return false;
}
/* We got magic data, so we can handle it later. */
notify_remote = true;
rerun = true;
atomic_set(&dev_data->state, ICMSG_STATE_INITIALIZING_SID_DISABLED);
break;
}
} else {
__ASSERT_NO_MSG(state == ICMSG_STATE_BUSY);
/* If remote did not initialize the RX in session-unaware mode, we can try
* with session-aware initialization.
*/
__fallthrough;
}
#endif /* UNBOUND_DETECT */
/* Allow magic number longer than sizeof(magic) for future protocol version. */
bool endpoint_invalid = (len < sizeof(magic) ||
memcmp(magic, rx_buffer, sizeof(magic)));
#if UNBOUND_ENABLED || UNBOUND_DETECT
case ICMSG_STATE_INITIALIZING_SID_ENABLED: {
uint32_t tx_handshake = pbuf_handshake_read(dev_data->tx_pb);
uint32_t remote_sid_req = REMOTE_SID_REQ_FROM_TX(tx_handshake);
uint32_t local_sid_ack = LOCAL_SID_ACK_FROM_TX(tx_handshake);
if (endpoint_invalid) {
__ASSERT_NO_MSG(false);
return;
if (remote_sid_req != dev_data->remote_sid && remote_sid_req != SID_DISCONNECTED) {
/* We can now initialize TX, since we know that remote, during receiving,
* will first read FIFO indexes and later, it will check if session has
* changed before using indexes to receive the message. Additionally,
* we know that remote after session request change will no try to receive
* more data.
*/
ret = pbuf_tx_init(dev_data->tx_pb);
if (ret < 0) {
if (dev_data->cb->error) {
dev_data->cb->error("Incorrect Tx configuration",
dev_data->ctx);
}
__ASSERT(false, "Incorrect Tx configuration");
atomic_set(&dev_data->state, ICMSG_STATE_DISCONNECTED);
return false;
}
/* Acknowledge the remote session. */
dev_data->remote_sid = remote_sid_req;
pbuf_handshake_write(dev_data->rx_pb,
MAKE_RX_HANDSHAKE(dev_data->local_sid, dev_data->remote_sid));
notify_remote = true;
}
if (dev_data->cb->bound) {
dev_data->cb->bound(dev_data->ctx);
if (local_sid_ack == dev_data->local_sid &&
dev_data->remote_sid != SID_DISCONNECTED) {
/* We send acknowledge to remote, receive acknowledge from remote,
* so we are ready.
*/
atomic_set(&dev_data->state, ICMSG_STATE_CONNECTED_SID_ENABLED);
if (dev_data->cb->bound) {
dev_data->cb->bound(dev_data->ctx);
}
/* Re-run this handler, because remote may already send something. */
rerun = true;
notify_remote = true;
}
atomic_set(&dev_data->state, ICMSG_STATE_READY);
break;
}
#ifdef CONFIG_MULTITHREADING
submit_work_if_buffer_free_and_data_available(dev_data);
#else
submit_if_buffer_free_and_data_available(dev_data);
#endif /* UNBOUND_ENABLED || UNBOUND_DETECT */
#if UNBOUND_ENABLED || UNBOUND_DETECT
case ICMSG_STATE_CONNECTED_SID_ENABLED:
#endif
#if UNBOUND_DISABLED || UNBOUND_DETECT
case ICMSG_STATE_CONNECTED_SID_DISABLED:
#endif
#if UNBOUND_DISABLED
case ICMSG_STATE_INITIALIZING_SID_DISABLED:
#endif
len_available = data_available(dev_data);
if (len_available > 0 && sizeof(rx_buffer) >= len_available) {
len = pbuf_read(dev_data->rx_pb, rx_buffer, sizeof(rx_buffer));
}
if (state == ICMSG_STATE_CONNECTED_SID_ENABLED &&
(UNBOUND_ENABLED || UNBOUND_DETECT)) {
/* The incoming message is valid only if remote session is as expected,
* so we need to check remote session now.
*/
uint32_t remote_sid_req = REMOTE_SID_REQ_FROM_TX(
pbuf_handshake_read(dev_data->tx_pb));
if (remote_sid_req != dev_data->remote_sid) {
atomic_set(&dev_data->state, ICMSG_STATE_DISCONNECTED);
if (dev_data->cb->unbound) {
dev_data->cb->unbound(dev_data->ctx);
}
return false;
}
}
if (len_available == 0) {
/* Unlikely, no data in buffer. */
return false;
}
__ASSERT_NO_MSG(len_available <= sizeof(rx_buffer));
if (sizeof(rx_buffer) < len_available) {
return false;
}
if (state != ICMSG_STATE_INITIALIZING_SID_DISABLED || !UNBOUND_DISABLED) {
if (dev_data->cb->received) {
dev_data->cb->received(rx_buffer, len, dev_data->ctx);
}
} else {
/* Allow magic number longer than sizeof(magic) for future protocol
* version.
*/
bool endpoint_invalid = (len < sizeof(magic) ||
memcmp(magic, rx_buffer, sizeof(magic)));
if (endpoint_invalid) {
__ASSERT_NO_MSG(false);
return false;
}
if (dev_data->cb->bound) {
dev_data->cb->bound(dev_data->ctx);
}
atomic_set(&dev_data->state, ICMSG_STATE_CONNECTED_SID_DISABLED);
notify_remote = true;
}
rerun = (data_available(dev_data) > 0);
break;
case ICMSG_STATE_OFF:
case ICMSG_STATE_DISCONNECTED:
default:
/* Nothing to do in this state. */
return false;
}
if (notify_remote) {
(void)mbox_send_dt(&dev_data->cfg->mbox_tx, NULL);
}
return rerun;
}
#ifdef CONFIG_MULTITHREADING
static void workq_callback_process(struct k_work *item)
{
bool rerun;
struct icmsg_data_t *dev_data = CONTAINER_OF(item, struct icmsg_data_t, mbox_work);
rerun = callback_process(dev_data);
if (rerun) {
submit_mbox_work(dev_data);
}
}
#endif /* def CONFIG_MULTITHREADING */
static void mbox_callback(const struct device *instance, uint32_t channel,
void *user_data, struct mbox_msg *msg_data)
{
bool rerun;
struct icmsg_data_t *dev_data = user_data;
#ifdef CONFIG_MULTITHREADING
submit_work_if_buffer_free(dev_data);
ARG_UNUSED(rerun);
submit_mbox_work(dev_data);
#else
submit_if_buffer_free(dev_data);
do {
rerun = callback_process(dev_data);
} while (rerun);
#endif
}
@ -241,8 +366,7 @@ static int mbox_init(const struct icmsg_config_t *conf,
int err;
#ifdef CONFIG_MULTITHREADING
k_work_init(&dev_data->mbox_work, mbox_callback_process);
k_work_init_delayable(&dev_data->notify_work, notify_process);
k_work_init(&dev_data->mbox_work, workq_callback_process);
#endif
err = mbox_register_callback_dt(&conf->mbox_rx, mbox_callback, dev_data);
@ -257,9 +381,27 @@ int icmsg_open(const struct icmsg_config_t *conf,
struct icmsg_data_t *dev_data,
const struct ipc_service_cb *cb, void *ctx)
{
if (!atomic_cas(&dev_data->state, ICMSG_STATE_OFF, ICMSG_STATE_BUSY)) {
/* Already opened. */
return -EALREADY;
int ret;
enum icmsg_state old_state;
__ASSERT(conf->unbound_mode != ICMSG_UNBOUND_MODE_DISABLE || UNBOUND_DISABLED,
"Unbound mode \"disabled\" is was forbidden in Kconfig.");
__ASSERT(conf->unbound_mode != ICMSG_UNBOUND_MODE_ENABLE || UNBOUND_ENABLED,
"Unbound mode \"enabled\" is was forbidden in Kconfig.");
__ASSERT(conf->unbound_mode != ICMSG_UNBOUND_MODE_DETECT || UNBOUND_DETECT,
"Unbound mode \"detect\" is was forbidden in Kconfig.");
if (conf->unbound_mode == ICMSG_UNBOUND_MODE_DISABLE ||
!(UNBOUND_ENABLED || UNBOUND_DETECT)) {
if (!atomic_cas(&dev_data->state, ICMSG_STATE_OFF,
ICMSG_STATE_INITIALIZING_SID_DISABLED)) {
/* Already opened. */
return -EALREADY;
}
old_state = ICMSG_STATE_OFF;
} else {
/* Unbound mode has the same values as ICMSG_STATE_INITIALIZING_* */
old_state = atomic_set(&dev_data->state, conf->unbound_mode);
}
dev_data->cb = cb;
@ -270,60 +412,82 @@ int icmsg_open(const struct icmsg_config_t *conf,
k_mutex_init(&dev_data->tx_lock);
#endif
int ret = pbuf_tx_init(dev_data->tx_pb);
if (ret < 0) {
__ASSERT(false, "Incorrect Tx configuration");
return ret;
}
ret = pbuf_rx_init(dev_data->rx_pb);
if (ret < 0) {
__ASSERT(false, "Incorrect Rx configuration");
return ret;
goto cleanup_and_exit;
}
ret = pbuf_write(dev_data->tx_pb, magic, sizeof(magic));
if (conf->unbound_mode != ICMSG_UNBOUND_MODE_DISABLE &&
(UNBOUND_ENABLED || UNBOUND_DETECT)) {
/* Increment local session id without conflicts with forbidden values. */
uint32_t local_sid_ack =
LOCAL_SID_ACK_FROM_TX(pbuf_handshake_read(dev_data->tx_pb));
dev_data->local_sid =
LOCAL_SID_REQ_FROM_RX(pbuf_handshake_read(dev_data->tx_pb));
dev_data->remote_sid = SID_DISCONNECTED;
do {
dev_data->local_sid = (dev_data->local_sid + 1) & 0xFFFF;
} while (dev_data->local_sid == local_sid_ack ||
dev_data->local_sid == SID_DISCONNECTED);
/* Write local session id request without remote acknowledge */
pbuf_handshake_write(dev_data->rx_pb,
MAKE_RX_HANDSHAKE(dev_data->local_sid, SID_DISCONNECTED));
} else if (UNBOUND_DISABLED) {
ret = initialize_tx_with_sid_disabled(dev_data);
}
if (old_state == ICMSG_STATE_OFF && (UNBOUND_ENABLED || UNBOUND_DETECT)) {
/* Initialize mbox only if we are doing first-time open (not re-open
* after unbound)
*/
ret = mbox_init(conf, dev_data);
if (ret) {
goto cleanup_and_exit;
}
}
/* We need to send a notification to remote, it may not be delivered
* since it may be uninitialized yet, but when it finishes the initialization
* we get a notification from it. We need to send this notification in callback
* again to make sure that it arrived.
*/
ret = mbox_send_dt(&conf->mbox_tx, NULL);
if (ret < 0) {
__ASSERT_NO_MSG(false);
return ret;
__ASSERT(false, "Cannot send mbox notification");
goto cleanup_and_exit;
}
if (ret < (int)sizeof(magic)) {
__ASSERT_NO_MSG(ret == sizeof(magic));
return ret;
}
return ret;
ret = mbox_init(conf, dev_data);
if (ret) {
return ret;
}
#ifdef CONFIG_MULTITHREADING
ret = k_work_schedule_for_queue(workq, &dev_data->notify_work, K_NO_WAIT);
if (ret < 0) {
return ret;
}
#else
notify_process(dev_data);
#endif
return 0;
cleanup_and_exit:
atomic_set(&dev_data->state, ICMSG_STATE_OFF);
return ret;
}
int icmsg_close(const struct icmsg_config_t *conf,
struct icmsg_data_t *dev_data)
{
int ret;
int ret = 0;
enum icmsg_state old_state;
ret = mbox_deinit(conf, dev_data);
if (ret) {
return ret;
if (conf->unbound_mode != ICMSG_UNBOUND_MODE_DISABLE &&
(UNBOUND_ENABLED || UNBOUND_DETECT)) {
pbuf_handshake_write(dev_data->rx_pb,
MAKE_RX_HANDSHAKE(SID_DISCONNECTED, SID_DISCONNECTED));
}
atomic_set(&dev_data->state, ICMSG_STATE_OFF);
(void)mbox_send_dt(&conf->mbox_tx, NULL);
return 0;
old_state = atomic_set(&dev_data->state, ICMSG_STATE_OFF);
if (old_state != ICMSG_STATE_OFF) {
ret = mbox_deinit(conf, dev_data);
}
return ret;
}
int icmsg_send(const struct icmsg_config_t *conf,
@ -332,13 +496,15 @@ int icmsg_send(const struct icmsg_config_t *conf,
{
int ret;
int write_ret;
#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
int release_ret;
#endif
int sent_bytes;
uint32_t state = atomic_get(&dev_data->state);
if (!is_endpoint_ready(dev_data)) {
return -EBUSY;
if (!is_endpoint_ready(state)) {
/* If instance was disconnected on the remote side, some threads may still
* don't know it yet and still may try to send messages.
*/
return (state == ICMSG_STATE_DISCONNECTED) ? len : -EBUSY;
}
/* Empty message is not allowed */
@ -346,19 +512,15 @@ int icmsg_send(const struct icmsg_config_t *conf,
return -ENODATA;
}
#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
ret = reserve_tx_buffer_if_unused(dev_data);
if (ret < 0) {
return -ENOBUFS;
}
#endif
write_ret = pbuf_write(dev_data->tx_pb, msg, len);
#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
release_ret = release_tx_buffer(dev_data);
__ASSERT_NO_MSG(!release_ret);
#endif
if (write_ret < 0) {
return write_ret;

View File

@ -38,6 +38,7 @@ static int validate_cfg(const struct pbuf_cfg *cfg)
/* Validate pointer alignment. */
if (!IS_PTR_ALIGNED_BYTES(cfg->rd_idx_loc, MAX(cfg->dcache_alignment, _PBUF_IDX_SIZE)) ||
!IS_PTR_ALIGNED_BYTES(cfg->wr_idx_loc, MAX(cfg->dcache_alignment, _PBUF_IDX_SIZE)) ||
!IS_PTR_ALIGNED_BYTES(cfg->handshake_loc, _PBUF_IDX_SIZE) ||
!IS_PTR_ALIGNED_BYTES(cfg->data_loc, _PBUF_IDX_SIZE)) {
return -EINVAL;
}
@ -49,6 +50,8 @@ static int validate_cfg(const struct pbuf_cfg *cfg)
/* Validate pointer values. */
if (!(cfg->rd_idx_loc < cfg->wr_idx_loc) ||
(cfg->handshake_loc && !(cfg->rd_idx_loc < cfg->handshake_loc)) ||
!(cfg->handshake_loc < cfg->wr_idx_loc) ||
!((uint8_t *)cfg->wr_idx_loc < cfg->data_loc) ||
!(((uint8_t *)cfg->rd_idx_loc + MAX(_PBUF_IDX_SIZE, cfg->dcache_alignment)) ==
(uint8_t *)cfg->wr_idx_loc)) {
@ -176,6 +179,44 @@ int pbuf_write(struct pbuf *pb, const char *data, uint16_t len)
return len;
}
int pbuf_get_initial_buf(struct pbuf *pb, volatile char **buf, uint16_t *len)
{
uint32_t wr_idx;
uint16_t plen;
if (pb == NULL || pb->data.rd_idx != 0) {
/* Incorrect call. */
return -EINVAL;
}
sys_cache_data_invd_range((void *)(pb->cfg->wr_idx_loc), sizeof(*(pb->cfg->wr_idx_loc)));
__sync_synchronize();
wr_idx = *(pb->cfg->wr_idx_loc);
if (wr_idx >= pb->cfg->len || wr_idx > 0xFFFF || wr_idx == 0) {
/* Wrong index - probably pbuf was not initialized or message was not send yet. */
return -EINVAL;
}
sys_cache_data_invd_range((void *)(pb->cfg->data_loc), PBUF_PACKET_LEN_SZ);
__sync_synchronize();
plen = sys_get_be16(&pb->cfg->data_loc[0]);
if (plen + 4 > wr_idx) {
/* Wrong length - probably pbuf was not initialized or message was not send yet. */
return -EINVAL;
}
*buf = &pb->cfg->data_loc[PBUF_PACKET_LEN_SZ];
*len = plen;
sys_cache_data_invd_range((void *)*buf, plen);
__sync_synchronize();
return 0;
}
int pbuf_read(struct pbuf *pb, char *buf, uint16_t len)
{
if (pb == NULL) {
@ -253,3 +294,23 @@ int pbuf_read(struct pbuf *pb, char *buf, uint16_t len)
return len;
}
uint32_t pbuf_handshake_read(struct pbuf *pb)
{
volatile uint32_t *ptr = pb->cfg->handshake_loc;
__ASSERT_NO_MSG(ptr);
sys_cache_data_invd_range((void *)ptr, sizeof(*ptr));
__sync_synchronize();
return *ptr;
}
void pbuf_handshake_write(struct pbuf *pb, uint32_t value)
{
volatile uint32_t *ptr = pb->cfg->handshake_loc;
__ASSERT_NO_MSG(ptr);
*ptr = value;
__sync_synchronize();
sys_cache_data_flush_range((void *)ptr, sizeof(*ptr));
}