usb: device_next: introduce UDC_BUF_POOL_*_DEFINE macros

Introduce UDC_BUF_POOL_*_DEFINE macros based on NET_BUF_POOL_*_DEFINE
macros but use our own version of alloc and unref callbacks to get
buffers with specific alignment and granularity. Also, do not use ref
callback because it breaks alignment.
Also introduces helper macros for defining and checking UDC
driver-compliant static buffers.

Signed-off-by: Johann Fischer <johann.fischer@nordicsemi.no>
This commit is contained in:
Johann Fischer 2024-06-07 19:35:45 +02:00 committed by Fabio Baltieri
parent bfc0cdc905
commit 67caae3aca
16 changed files with 222 additions and 23 deletions

View File

@ -16,3 +16,5 @@ API reference
*************
.. doxygengroup:: udc_api
.. doxygengroup:: udc_buf

View File

@ -25,6 +25,13 @@ config UDC_BUF_POOL_SIZE
help
Total amount of memory available for UDC requests.
config UDC_BUF_FORCE_NOCACHE
bool "Place the buffer pools in the nocache memory region"
depends on NOCACHE_MEMORY && DCACHE
help
Place the buffer pools in the nocache memory region if the driver
cannot handle buffers in cached memory.
config UDC_WORKQUEUE
bool "Use a dedicate work queue for UDC drivers"
help

View File

@ -10,6 +10,7 @@
#include <zephyr/sys/byteorder.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/usb/usb_ch9.h>
#include <zephyr/drivers/usb/udc_buf.h>
#include "udc_common.h"
#include <zephyr/logging/log.h>
@ -20,9 +21,39 @@
#endif
LOG_MODULE_REGISTER(udc, CONFIG_UDC_DRIVER_LOG_LEVEL);
static inline uint8_t *udc_pool_data_alloc(struct net_buf *const buf,
size_t *const size, k_timeout_t timeout)
{
struct net_buf_pool *const buf_pool = net_buf_pool_get(buf->pool_id);
struct k_heap *const pool = buf_pool->alloc->alloc_data;
void *b;
*size = ROUND_UP(*size, UDC_BUF_GRANULARITY);
b = k_heap_aligned_alloc(pool, UDC_BUF_ALIGN, *size, timeout);
if (b == NULL) {
*size = 0;
return NULL;
}
return b;
}
static inline void udc_pool_data_unref(struct net_buf *buf, uint8_t *const data)
{
struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id);
struct k_heap *pool = buf_pool->alloc->alloc_data;
k_heap_free(pool, data);
}
const struct net_buf_data_cb net_buf_dma_cb = {
.alloc = udc_pool_data_alloc,
.unref = udc_pool_data_unref,
};
static inline void udc_buf_destroy(struct net_buf *buf);
NET_BUF_POOL_VAR_DEFINE(udc_ep_pool,
UDC_BUF_POOL_VAR_DEFINE(udc_ep_pool,
CONFIG_UDC_BUF_COUNT, CONFIG_UDC_BUF_POOL_SIZE,
sizeof(struct udc_buf_info), udc_buf_destroy);

View File

@ -14,7 +14,7 @@
#include <zephyr/kernel.h>
#include <zephyr/device.h>
#include <zephyr/net/buf.h>
#include <zephyr/drivers/usb/udc_buf.h>
#include <zephyr/sys/atomic.h>
#include <zephyr/usb/usb_ch9.h>

View File

@ -0,0 +1,154 @@
/*
* Copyright (c) 2024 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Buffers for USB device support
*/
#ifndef ZEPHYR_INCLUDE_UDC_BUF_H
#define ZEPHYR_INCLUDE_UDC_BUF_H
#include <zephyr/kernel.h>
#include <zephyr/net/buf.h>
#if defined(CONFIG_DCACHE) && !defined(CONFIG_UDC_BUF_FORCE_NOCACHE)
/*
* Here we try to get DMA-safe buffers, but we lack a consistent source of
* information about data cache properties, such as line cache size, and a
* consistent source of information about what part of memory is DMA'able.
* For now, we simply assume that all available memory is DMA'able and use
* Kconfig option DCACHE_LINE_SIZE for alignment and granularity.
*/
#define Z_UDC_BUF_ALIGN CONFIG_DCACHE_LINE_SIZE
#define Z_UDC_BUF_GRANULARITY CONFIG_DCACHE_LINE_SIZE
#else
/*
* Default alignment and granularity to pointer size if the platform does not
* have a data cache or buffers are placed in nocache memory region.
*/
#define Z_UDC_BUF_ALIGN sizeof(void *)
#define Z_UDC_BUF_GRANULARITY sizeof(void *)
#endif
/**
* @brief Buffer macros and definitions used in USB device support
* @defgroup udc_buf Buffer macros and definitions used in USB device support
* @ingroup usb
* @{
*/
/** Buffer alignment required by the UDC driver */
#define UDC_BUF_ALIGN Z_UDC_BUF_ALIGN
/** Buffer granularity required by the UDC driver */
#define UDC_BUF_GRANULARITY Z_UDC_BUF_GRANULARITY
/**
* @brief Define a UDC driver-compliant static buffer
*
* This macro should be used if the application defines its own buffers to be
* used for USB transfers.
*
* @param name Buffer name
* @param size Buffer size
*/
#define UDC_STATIC_BUF_DEFINE(name, size) \
static uint8_t __aligned(UDC_BUF_ALIGN) name[ROUND_UP(size, UDC_BUF_GRANULARITY)];
/**
* @brief Verify that the buffer is aligned as required by the UDC driver
*
* @see IS_ALIGNED
*
* @param buf Buffer pointer
*/
#define IS_UDC_ALIGNED(buf) IS_ALIGNED(buf, UDC_BUF_ALIGN)
/**
* @cond INTERNAL_HIDDEN
*/
#define UDC_HEAP_DEFINE(name, bytes, in_section) \
uint8_t in_section __aligned(UDC_BUF_ALIGN) \
kheap_##name[MAX(bytes, Z_HEAP_MIN_SIZE)]; \
STRUCT_SECTION_ITERABLE(k_heap, name) = { \
.heap = { \
.init_mem = kheap_##name, \
.init_bytes = MAX(bytes, Z_HEAP_MIN_SIZE), \
}, \
}
#define UDC_K_HEAP_DEFINE(name, size) \
COND_CODE_1(CONFIG_UDC_BUF_FORCE_NOCACHE, \
(UDC_HEAP_DEFINE(name, size, __nocache)), \
(UDC_HEAP_DEFINE(name, size, __noinit)))
extern const struct net_buf_data_cb net_buf_dma_cb;
/** @endcond */
/**
* @brief Define a new pool for UDC buffers with variable-size payloads
*
* This macro is similar to `NET_BUF_POOL_VAR_DEFINE`, but provides buffers
* with alignment and granularity suitable for use by UDC driver.
*
* @see NET_BUF_POOL_VAR_DEFINE
*
* @param pname Name of the pool variable.
* @param count Number of buffers in the pool.
* @param size Maximum data payload per buffer.
* @param ud_size User data space to reserve per buffer.
* @param fdestroy Optional destroy callback when buffer is freed.
*/
#define UDC_BUF_POOL_VAR_DEFINE(pname, count, size, ud_size, fdestroy) \
_NET_BUF_ARRAY_DEFINE(pname, count, ud_size); \
UDC_K_HEAP_DEFINE(net_buf_mem_pool_##pname, size); \
static const struct net_buf_data_alloc net_buf_data_alloc_##pname = { \
.cb = &net_buf_dma_cb, \
.alloc_data = &net_buf_mem_pool_##pname, \
.max_alloc_size = 0, \
}; \
static STRUCT_SECTION_ITERABLE(net_buf_pool, pname) = \
NET_BUF_POOL_INITIALIZER(pname, &net_buf_data_alloc_##pname, \
_net_buf_##pname, count, ud_size, \
fdestroy)
/**
* @brief Define a new pool for UDC buffers based on fixed-size data
*
* This macro is similar to `NET_BUF_POOL_DEFINE`, but provides buffers
* with alignment and granularity suitable for use by UDC driver.
*
* @see NET_BUF_POOL_DEFINE
* @param pname Name of the pool variable.
* @param count Number of buffers in the pool.
* @param size Maximum data payload per buffer.
* @param ud_size User data space to reserve per buffer.
* @param fdestroy Optional destroy callback when buffer is freed.
*/
#define UDC_BUF_POOL_DEFINE(pname, count, size, ud_size, fdestroy) \
_NET_BUF_ARRAY_DEFINE(pname, count, ud_size); \
static uint8_t __nocache __aligned(UDC_BUF_ALIGN) \
net_buf_data_##pname[count][size]; \
static const struct net_buf_pool_fixed net_buf_fixed_##pname = { \
.data_pool = (uint8_t *)net_buf_data_##pname, \
}; \
static const struct net_buf_data_alloc net_buf_fixed_alloc_##pname = { \
.cb = &net_buf_fixed_cb, \
.alloc_data = (void *)&net_buf_fixed_##pname, \
.max_alloc_size = size, \
}; \
static STRUCT_SECTION_ITERABLE(net_buf_pool, pname) = \
NET_BUF_POOL_INITIALIZER(pname, &net_buf_fixed_alloc_##pname, \
_net_buf_##pname, count, ud_size, \
fdestroy)
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_UDC_BUF_H */

View File

@ -18,7 +18,7 @@
#include <zephyr/usb/bos.h>
#include <zephyr/usb/usb_ch9.h>
#include <zephyr/usb/usbd_msg.h>
#include <zephyr/net/buf.h>
#include <zephyr/drivers/usb/udc_buf.h>
#include <zephyr/sys/byteorder.h>
#include <zephyr/sys/slist.h>
#include <zephyr/logging/log.h>

View File

@ -51,7 +51,7 @@ struct kb_event {
K_MSGQ_DEFINE(kb_msgq, sizeof(struct kb_event), 2, 1);
static uint8_t __aligned(sizeof(void *)) report[KB_REPORT_COUNT];
UDC_STATIC_BUF_DEFINE(report, KB_REPORT_COUNT);
static uint32_t kb_duration;
static bool kb_ready;
@ -273,7 +273,7 @@ int main(void)
continue;
}
ret = hid_device_submit_report(hid_dev, sizeof(report), report);
ret = hid_device_submit_report(hid_dev, KB_REPORT_COUNT, report);
if (ret) {
LOG_ERR("HID submit report error, %d", ret);
}

View File

@ -174,11 +174,11 @@ int main(void)
}
while (true) {
uint8_t __aligned(sizeof(void *)) report[MOUSE_REPORT_COUNT];
UDC_STATIC_BUF_DEFINE(report, MOUSE_REPORT_COUNT);
k_msgq_get(&mouse_msgq, &report, K_FOREVER);
ret = hid_int_ep_write(hid_dev, report, sizeof(report), NULL);
ret = hid_int_ep_write(hid_dev, report, MOUSE_REPORT_COUNT, NULL);
if (ret) {
LOG_ERR("HID write error, %d", ret);
} else {

View File

@ -36,7 +36,8 @@ LOG_MODULE_REGISTER(uac2_sample, LOG_LEVEL_INF);
* when USB host decides to perform rapid terminal enable/disable cycles.
*/
#define I2S_BUFFERS_COUNT 7
K_MEM_SLAB_DEFINE_STATIC(i2s_tx_slab, MAX_BLOCK_SIZE, I2S_BUFFERS_COUNT, 4);
K_MEM_SLAB_DEFINE_STATIC(i2s_tx_slab, ROUND_UP(MAX_BLOCK_SIZE, UDC_BUF_GRANULARITY),
I2S_BUFFERS_COUNT, UDC_BUF_ALIGN);
struct usb_i2s_ctx {
const struct device *i2s_dev;

View File

@ -73,9 +73,9 @@ static K_FIFO_DEFINE(bt_hci_tx_queue);
* REVISE: global (bulk, interrupt, iso) specific pools would be more
* RAM usage efficient.
*/
NET_BUF_POOL_FIXED_DEFINE(bt_hci_ep_pool,
3, 512,
sizeof(struct udc_buf_info), NULL);
UDC_BUF_POOL_DEFINE(bt_hci_ep_pool,
3, 512,
sizeof(struct udc_buf_info), NULL);
/* HCI RX/TX threads */
static K_KERNEL_STACK_DEFINE(rx_thread_stack, CONFIG_BT_HCI_TX_STACK_SIZE);
static struct k_thread rx_thread_data;

View File

@ -32,9 +32,9 @@
#endif
LOG_MODULE_REGISTER(usbd_cdc_acm, CONFIG_USBD_CDC_ACM_LOG_LEVEL);
NET_BUF_POOL_FIXED_DEFINE(cdc_acm_ep_pool,
DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) * 2,
512, sizeof(struct udc_buf_info), NULL);
UDC_BUF_POOL_DEFINE(cdc_acm_ep_pool,
DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) * 2,
512, sizeof(struct udc_buf_info), NULL);
#define CDC_ACM_DEFAULT_LINECODING {sys_cpu_to_le32(115200), 0, 0, 8}
#define CDC_ACM_DEFAULT_INT_EP_MPS 16

View File

@ -36,10 +36,10 @@ enum {
* Transfers through two endpoints proceed in a synchronous manner,
* with maximum block of NET_ETH_MAX_FRAME_SIZE.
*/
NET_BUF_POOL_FIXED_DEFINE(cdc_ecm_ep_pool,
DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) * 2,
NET_ETH_MAX_FRAME_SIZE,
sizeof(struct udc_buf_info), NULL);
UDC_BUF_POOL_DEFINE(cdc_ecm_ep_pool,
DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) * 2,
NET_ETH_MAX_FRAME_SIZE,
sizeof(struct udc_buf_info), NULL);
struct cdc_ecm_notification {
union {

View File

@ -496,6 +496,8 @@ static struct net_buf *hid_buf_alloc_ext(struct hid_device_data *const ddata,
struct net_buf *buf = NULL;
struct udc_buf_info *bi;
__ASSERT(IS_UDC_ALIGNED(data), "Application provided unaligned buffer");
buf = net_buf_alloc_with_data(ddata->pool_in, data, size, K_NO_WAIT);
if (!buf) {
return NULL;

View File

@ -1156,7 +1156,7 @@
#define HID_OUT_POOL_DEFINE(n) \
COND_CODE_1(DT_INST_NODE_HAS_PROP(n, out_report_size), \
(NET_BUF_POOL_DEFINE(hid_buf_pool_out_##n, \
(UDC_BUF_POOL_DEFINE(hid_buf_pool_out_##n, \
CONFIG_USBD_HID_OUT_BUF_COUNT, \
DT_INST_PROP(n, out_report_size), \
sizeof(struct udc_buf_info), NULL)), \

View File

@ -63,9 +63,9 @@ struct CSW {
/* Can be 64 if device is not High-Speed capable */
#define MSC_BUF_SIZE 512
NET_BUF_POOL_FIXED_DEFINE(msc_ep_pool,
MSC_NUM_INSTANCES * 2, MSC_BUF_SIZE,
sizeof(struct udc_buf_info), NULL);
UDC_BUF_POOL_DEFINE(msc_ep_pool,
MSC_NUM_INSTANCES * 2, MSC_BUF_SIZE,
sizeof(struct udc_buf_info), NULL);
struct msc_event {
struct usbd_class_data *c_data;

View File

@ -40,7 +40,7 @@ LOG_MODULE_REGISTER(usbd_uac2, CONFIG_USBD_UAC2_LOG_LEVEL);
* "wasted memory" here is likely to be smaller than the memory overhead for
* more complex "only as much as needed" schemes (e.g. heap).
*/
NET_BUF_POOL_DEFINE(uac2_pool, UAC2_NUM_ENDPOINTS, 6,
UDC_BUF_POOL_DEFINE(uac2_pool, UAC2_NUM_ENDPOINTS, 6,
sizeof(struct udc_buf_info), NULL);
/* 5.2.2 Control Request Layout */
@ -205,6 +205,8 @@ uac2_buf_alloc(const uint8_t ep, void *data, uint16_t size)
struct net_buf *buf = NULL;
struct udc_buf_info *bi;
__ASSERT(IS_UDC_ALIGNED(data), "Application provided unaligned buffer");
buf = net_buf_alloc_with_data(&uac2_pool, data, size, K_NO_WAIT);
if (!buf) {
return NULL;