Add a net_buf_id() API which translates a buffer into a zero-based index, based on its placement in the buffer pool. This can be useful if you want to associate an external array of meta-data contexts with the buffers of a pool. The added value of this API is slightly limited at the moment, since the net_buf API allows custom user-data sizes for each pool (i.e. the user data can be used instead of a separately allocated meta-data array). However, there's some refactoring coming soon which will unify all net_buf structs to have the same fixed (and typically small) amount of user data. In such cases it may be desirable to have external user data in order not to inflate all buffers in the system because of a single pool needing the extra memory. Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
575 lines
13 KiB
C
575 lines
13 KiB
C
/* buf.c - Buffer management */
|
|
|
|
/*
|
|
* Copyright (c) 2015 Intel Corporation
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
|
|
#include <stdio.h>
|
|
#include <errno.h>
|
|
#include <stddef.h>
|
|
#include <string.h>
|
|
#include <misc/byteorder.h>
|
|
|
|
#include <net/buf.h>
|
|
|
|
#if defined(CONFIG_NET_BUF_LOG)
|
|
#define SYS_LOG_DOMAIN "net/buf"
|
|
#define SYS_LOG_LEVEL CONFIG_SYS_LOG_NET_BUF_LEVEL
|
|
#include <logging/sys_log.h>
|
|
|
|
#define NET_BUF_DBG(fmt, ...) SYS_LOG_DBG("(%p) " fmt, k_current_get(), \
|
|
##__VA_ARGS__)
|
|
#define NET_BUF_ERR(fmt, ...) SYS_LOG_ERR(fmt, ##__VA_ARGS__)
|
|
#define NET_BUF_WARN(fmt, ...) SYS_LOG_WRN(fmt, ##__VA_ARGS__)
|
|
#define NET_BUF_INFO(fmt, ...) SYS_LOG_INF(fmt, ##__VA_ARGS__)
|
|
#define NET_BUF_ASSERT(cond) do { if (!(cond)) { \
|
|
NET_BUF_ERR("assert: '" #cond "' failed"); \
|
|
} } while (0)
|
|
#else
|
|
|
|
#define NET_BUF_DBG(fmt, ...)
|
|
#define NET_BUF_ERR(fmt, ...)
|
|
#define NET_BUF_WARN(fmt, ...)
|
|
#define NET_BUF_INFO(fmt, ...)
|
|
#define NET_BUF_ASSERT(cond)
|
|
#endif /* CONFIG_NET_BUF_LOG */
|
|
|
|
#if CONFIG_NET_BUF_WARN_ALLOC_INTERVAL > 0
|
|
#define WARN_ALLOC_INTERVAL K_SECONDS(CONFIG_NET_BUF_WARN_ALLOC_INTERVAL)
|
|
#else
|
|
#define WARN_ALLOC_INTERVAL K_FOREVER
|
|
#endif
|
|
|
|
/* Linker-defined symbol bound to the static pool structs */
|
|
extern struct net_buf_pool _net_buf_pool_list[];
|
|
|
|
struct net_buf_pool *net_buf_pool_get(int id)
|
|
{
|
|
return &_net_buf_pool_list[id];
|
|
}
|
|
|
|
static int pool_id(struct net_buf_pool *pool)
|
|
{
|
|
return pool - _net_buf_pool_list;
|
|
}
|
|
|
|
/* Helpers to access the storage array, since we don't have access to its
|
|
* type at this point anymore.
|
|
*/
|
|
#define BUF_SIZE(pool) (sizeof(struct net_buf) + \
|
|
ROUND_UP(pool->buf_size, 4) + \
|
|
ROUND_UP(pool->user_data_size, 4))
|
|
#define UNINIT_BUF(pool, n) (struct net_buf *)(((u8_t *)(pool->__bufs)) + \
|
|
((n) * BUF_SIZE(pool)))
|
|
|
|
int net_buf_id(struct net_buf *buf)
|
|
{
|
|
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
|
|
u8_t *pool_start = (u8_t *)pool->__bufs;
|
|
u8_t *buf_ptr = (u8_t *)buf;
|
|
|
|
return (buf_ptr - pool_start) / BUF_SIZE(pool);
|
|
}
|
|
|
|
static inline struct net_buf *pool_get_uninit(struct net_buf_pool *pool,
|
|
u16_t uninit_count)
|
|
{
|
|
struct net_buf *buf;
|
|
|
|
buf = UNINIT_BUF(pool, pool->buf_count - uninit_count);
|
|
|
|
buf->pool_id = pool_id(pool);
|
|
buf->size = pool->buf_size;
|
|
|
|
return buf;
|
|
}
|
|
|
|
void net_buf_reset(struct net_buf *buf)
|
|
{
|
|
NET_BUF_ASSERT(buf->flags == 0);
|
|
NET_BUF_ASSERT(buf->frags == NULL);
|
|
|
|
buf->len = 0;
|
|
buf->data = buf->__buf;
|
|
}
|
|
|
|
#if defined(CONFIG_NET_BUF_LOG)
|
|
struct net_buf *net_buf_alloc_debug(struct net_buf_pool *pool, s32_t timeout,
|
|
const char *func, int line)
|
|
#else
|
|
struct net_buf *net_buf_alloc(struct net_buf_pool *pool, s32_t timeout)
|
|
#endif
|
|
{
|
|
struct net_buf *buf;
|
|
unsigned int key;
|
|
|
|
NET_BUF_ASSERT(pool);
|
|
|
|
NET_BUF_DBG("%s():%d: pool %p timeout %d", func, line, pool, timeout);
|
|
|
|
/* We need to lock interrupts temporarily to prevent race conditions
|
|
* when accessing pool->uninit_count.
|
|
*/
|
|
key = irq_lock();
|
|
|
|
/* If there are uninitialized buffers we're guaranteed to succeed
|
|
* with the allocation one way or another.
|
|
*/
|
|
if (pool->uninit_count) {
|
|
u16_t uninit_count;
|
|
|
|
/* If this is not the first access to the pool, we can
|
|
* be opportunistic and try to fetch a previously used
|
|
* buffer from the LIFO with K_NO_WAIT.
|
|
*/
|
|
if (pool->uninit_count < pool->buf_count) {
|
|
buf = k_lifo_get(&pool->free, K_NO_WAIT);
|
|
if (buf) {
|
|
irq_unlock(key);
|
|
goto success;
|
|
}
|
|
}
|
|
|
|
uninit_count = pool->uninit_count--;
|
|
irq_unlock(key);
|
|
|
|
buf = pool_get_uninit(pool, uninit_count);
|
|
goto success;
|
|
}
|
|
|
|
irq_unlock(key);
|
|
|
|
#if defined(CONFIG_NET_BUF_LOG) && SYS_LOG_LEVEL >= SYS_LOG_LEVEL_WARNING
|
|
if (timeout == K_FOREVER) {
|
|
u32_t ref = k_uptime_get_32();
|
|
buf = k_lifo_get(&pool->free, K_NO_WAIT);
|
|
while (!buf) {
|
|
#if defined(CONFIG_NET_BUF_POOL_USAGE)
|
|
NET_BUF_WARN("%s():%d: Pool %s low on buffers.",
|
|
func, line, pool->name);
|
|
#else
|
|
NET_BUF_WARN("%s():%d: Pool %p low on buffers.",
|
|
func, line, pool);
|
|
#endif
|
|
buf = k_lifo_get(&pool->free, WARN_ALLOC_INTERVAL);
|
|
#if defined(CONFIG_NET_BUF_POOL_USAGE)
|
|
NET_BUF_WARN("%s():%d: Pool %s blocked for %u secs",
|
|
func, line, pool->name,
|
|
(k_uptime_get_32() - ref) / MSEC_PER_SEC);
|
|
#else
|
|
NET_BUF_WARN("%s():%d: Pool %p blocked for %u secs",
|
|
func, line, pool,
|
|
(k_uptime_get_32() - ref) / MSEC_PER_SEC);
|
|
#endif
|
|
}
|
|
} else {
|
|
buf = k_lifo_get(&pool->free, timeout);
|
|
}
|
|
#else
|
|
buf = k_lifo_get(&pool->free, timeout);
|
|
#endif
|
|
if (!buf) {
|
|
NET_BUF_ERR("%s():%d: Failed to get free buffer", func, line);
|
|
return NULL;
|
|
}
|
|
|
|
success:
|
|
NET_BUF_DBG("allocated buf %p", buf);
|
|
|
|
buf->ref = 1;
|
|
buf->flags = 0;
|
|
buf->frags = NULL;
|
|
net_buf_reset(buf);
|
|
|
|
#if defined(CONFIG_NET_BUF_POOL_USAGE)
|
|
pool->avail_count--;
|
|
NET_BUF_ASSERT(pool->avail_count >= 0);
|
|
#endif
|
|
|
|
return buf;
|
|
}
|
|
|
|
#if defined(CONFIG_NET_BUF_LOG)
|
|
struct net_buf *net_buf_get_debug(struct k_fifo *fifo, s32_t timeout,
|
|
const char *func, int line)
|
|
#else
|
|
struct net_buf *net_buf_get(struct k_fifo *fifo, s32_t timeout)
|
|
#endif
|
|
{
|
|
struct net_buf *buf, *frag;
|
|
|
|
NET_BUF_DBG("%s():%d: fifo %p timeout %d", func, line, fifo, timeout);
|
|
|
|
buf = k_fifo_get(fifo, timeout);
|
|
if (!buf) {
|
|
return NULL;
|
|
}
|
|
|
|
NET_BUF_DBG("%s():%d: buf %p fifo %p", func, line, buf, fifo);
|
|
|
|
/* Get any fragments belonging to this buffer */
|
|
for (frag = buf; (frag->flags & NET_BUF_FRAGS); frag = frag->frags) {
|
|
frag->frags = k_fifo_get(fifo, K_NO_WAIT);
|
|
NET_BUF_ASSERT(frag->frags);
|
|
|
|
/* The fragments flag is only for FIFO-internal usage */
|
|
frag->flags &= ~NET_BUF_FRAGS;
|
|
}
|
|
|
|
/* Mark the end of the fragment list */
|
|
frag->frags = NULL;
|
|
|
|
return buf;
|
|
}
|
|
|
|
void net_buf_reserve(struct net_buf *buf, size_t reserve)
|
|
{
|
|
NET_BUF_ASSERT(buf);
|
|
NET_BUF_ASSERT(buf->len == 0);
|
|
NET_BUF_DBG("buf %p reserve %zu", buf, reserve);
|
|
|
|
buf->data = buf->__buf + reserve;
|
|
}
|
|
|
|
void net_buf_put(struct k_fifo *fifo, struct net_buf *buf)
|
|
{
|
|
struct net_buf *tail;
|
|
|
|
NET_BUF_ASSERT(fifo);
|
|
NET_BUF_ASSERT(buf);
|
|
|
|
for (tail = buf; tail->frags; tail = tail->frags) {
|
|
tail->flags |= NET_BUF_FRAGS;
|
|
}
|
|
|
|
k_fifo_put_list(fifo, buf, tail);
|
|
}
|
|
|
|
#if defined(CONFIG_NET_BUF_LOG)
|
|
void net_buf_unref_debug(struct net_buf *buf, const char *func, int line)
|
|
#else
|
|
void net_buf_unref(struct net_buf *buf)
|
|
#endif
|
|
{
|
|
NET_BUF_ASSERT(buf);
|
|
|
|
while (buf) {
|
|
struct net_buf *frags = buf->frags;
|
|
struct net_buf_pool *pool;
|
|
|
|
#if defined(CONFIG_NET_BUF_LOG)
|
|
if (!buf->ref) {
|
|
NET_BUF_ERR("%s():%d: buf %p double free", func, line,
|
|
buf);
|
|
return;
|
|
}
|
|
#endif
|
|
NET_BUF_DBG("buf %p ref %u pool_id %u frags %p", buf, buf->ref,
|
|
buf->pool_id, buf->frags);
|
|
|
|
if (--buf->ref > 0) {
|
|
return;
|
|
}
|
|
|
|
buf->frags = NULL;
|
|
|
|
pool = net_buf_pool_get(buf->pool_id);
|
|
|
|
#if defined(CONFIG_NET_BUF_POOL_USAGE)
|
|
pool->avail_count++;
|
|
NET_BUF_ASSERT(pool->avail_count <= pool->buf_count);
|
|
#endif
|
|
|
|
if (pool->destroy) {
|
|
pool->destroy(buf);
|
|
} else {
|
|
net_buf_destroy(buf);
|
|
}
|
|
|
|
buf = frags;
|
|
}
|
|
}
|
|
|
|
struct net_buf *net_buf_ref(struct net_buf *buf)
|
|
{
|
|
NET_BUF_ASSERT(buf);
|
|
|
|
NET_BUF_DBG("buf %p (old) ref %u pool_id %u",
|
|
buf, buf->ref, buf->pool_id);
|
|
buf->ref++;
|
|
return buf;
|
|
}
|
|
|
|
struct net_buf *net_buf_clone(struct net_buf *buf, s32_t timeout)
|
|
{
|
|
struct net_buf_pool *pool;
|
|
struct net_buf *clone;
|
|
|
|
NET_BUF_ASSERT(buf);
|
|
|
|
pool = net_buf_pool_get(buf->pool_id);
|
|
|
|
clone = net_buf_alloc(pool, timeout);
|
|
if (!clone) {
|
|
return NULL;
|
|
}
|
|
|
|
net_buf_reserve(clone, net_buf_headroom(buf));
|
|
|
|
/* TODO: Add reference to the original buffer instead of copying it. */
|
|
memcpy(net_buf_add(clone, buf->len), buf->data, buf->len);
|
|
|
|
return clone;
|
|
}
|
|
|
|
struct net_buf *net_buf_frag_last(struct net_buf *buf)
|
|
{
|
|
NET_BUF_ASSERT(buf);
|
|
|
|
while (buf->frags) {
|
|
buf = buf->frags;
|
|
}
|
|
|
|
return buf;
|
|
}
|
|
|
|
void net_buf_frag_insert(struct net_buf *parent, struct net_buf *frag)
|
|
{
|
|
NET_BUF_ASSERT(parent);
|
|
NET_BUF_ASSERT(frag);
|
|
|
|
if (parent->frags) {
|
|
net_buf_frag_last(frag)->frags = parent->frags;
|
|
}
|
|
/* Take ownership of the fragment reference */
|
|
parent->frags = frag;
|
|
}
|
|
|
|
struct net_buf *net_buf_frag_add(struct net_buf *head, struct net_buf *frag)
|
|
{
|
|
NET_BUF_ASSERT(frag);
|
|
|
|
if (!head) {
|
|
return net_buf_ref(frag);
|
|
}
|
|
|
|
net_buf_frag_insert(net_buf_frag_last(head), frag);
|
|
|
|
return head;
|
|
}
|
|
|
|
#if defined(CONFIG_NET_BUF_LOG)
|
|
struct net_buf *net_buf_frag_del_debug(struct net_buf *parent,
|
|
struct net_buf *frag,
|
|
const char *func, int line)
|
|
#else
|
|
struct net_buf *net_buf_frag_del(struct net_buf *parent, struct net_buf *frag)
|
|
#endif
|
|
{
|
|
struct net_buf *next_frag;
|
|
|
|
NET_BUF_ASSERT(frag);
|
|
|
|
if (parent) {
|
|
NET_BUF_ASSERT(parent->frags);
|
|
NET_BUF_ASSERT(parent->frags == frag);
|
|
parent->frags = frag->frags;
|
|
}
|
|
|
|
next_frag = frag->frags;
|
|
|
|
frag->frags = NULL;
|
|
|
|
#if defined(CONFIG_NET_BUF_LOG)
|
|
net_buf_unref_debug(frag, func, line);
|
|
#else
|
|
net_buf_unref(frag);
|
|
#endif
|
|
|
|
return next_frag;
|
|
}
|
|
|
|
#if defined(CONFIG_NET_BUF_SIMPLE_LOG)
|
|
#define NET_BUF_SIMPLE_DBG(fmt, ...) NET_BUF_DBG(fmt, ##__VA_ARGS__)
|
|
#define NET_BUF_SIMPLE_ERR(fmt, ...) NET_BUF_ERR(fmt, ##__VA_ARGS__)
|
|
#define NET_BUF_SIMPLE_WARN(fmt, ...) NET_BUF_WARN(fmt, ##__VA_ARGS__)
|
|
#define NET_BUF_SIMPLE_INFO(fmt, ...) NET_BUF_INFO(fmt, ##__VA_ARGS__)
|
|
#define NET_BUF_SIMPLE_ASSERT(cond) NET_BUF_ASSERT(cond)
|
|
#else
|
|
#define NET_BUF_SIMPLE_DBG(fmt, ...)
|
|
#define NET_BUF_SIMPLE_ERR(fmt, ...)
|
|
#define NET_BUF_SIMPLE_WARN(fmt, ...)
|
|
#define NET_BUF_SIMPLE_INFO(fmt, ...)
|
|
#define NET_BUF_SIMPLE_ASSERT(cond)
|
|
#endif /* CONFIG_NET_BUF_SIMPLE_LOG */
|
|
|
|
void *net_buf_simple_add(struct net_buf_simple *buf, size_t len)
|
|
{
|
|
u8_t *tail = net_buf_simple_tail(buf);
|
|
|
|
NET_BUF_SIMPLE_DBG("buf %p len %zu", buf, len);
|
|
|
|
NET_BUF_SIMPLE_ASSERT(net_buf_simple_tailroom(buf) >= len);
|
|
|
|
buf->len += len;
|
|
return tail;
|
|
}
|
|
|
|
void *net_buf_simple_add_mem(struct net_buf_simple *buf, const void *mem,
|
|
size_t len)
|
|
{
|
|
NET_BUF_SIMPLE_DBG("buf %p len %zu", buf, len);
|
|
|
|
return memcpy(net_buf_simple_add(buf, len), mem, len);
|
|
}
|
|
|
|
u8_t *net_buf_simple_add_u8(struct net_buf_simple *buf, u8_t val)
|
|
{
|
|
u8_t *u8;
|
|
|
|
NET_BUF_SIMPLE_DBG("buf %p val 0x%02x", buf, val);
|
|
|
|
u8 = net_buf_simple_add(buf, 1);
|
|
*u8 = val;
|
|
|
|
return u8;
|
|
}
|
|
|
|
void net_buf_simple_add_le16(struct net_buf_simple *buf, u16_t val)
|
|
{
|
|
NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
|
|
|
|
val = sys_cpu_to_le16(val);
|
|
memcpy(net_buf_simple_add(buf, sizeof(val)), &val, sizeof(val));
|
|
}
|
|
|
|
void net_buf_simple_add_be16(struct net_buf_simple *buf, u16_t val)
|
|
{
|
|
NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
|
|
|
|
val = sys_cpu_to_be16(val);
|
|
memcpy(net_buf_simple_add(buf, sizeof(val)), &val, sizeof(val));
|
|
}
|
|
|
|
void net_buf_simple_add_le32(struct net_buf_simple *buf, u32_t val)
|
|
{
|
|
NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
|
|
|
|
val = sys_cpu_to_le32(val);
|
|
memcpy(net_buf_simple_add(buf, sizeof(val)), &val, sizeof(val));
|
|
}
|
|
|
|
void net_buf_simple_add_be32(struct net_buf_simple *buf, u32_t val)
|
|
{
|
|
NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
|
|
|
|
val = sys_cpu_to_be32(val);
|
|
memcpy(net_buf_simple_add(buf, sizeof(val)), &val, sizeof(val));
|
|
}
|
|
|
|
void *net_buf_simple_push(struct net_buf_simple *buf, size_t len)
|
|
{
|
|
NET_BUF_SIMPLE_DBG("buf %p len %zu", buf, len);
|
|
|
|
NET_BUF_SIMPLE_ASSERT(net_buf_simple_headroom(buf) >= len);
|
|
|
|
buf->data -= len;
|
|
buf->len += len;
|
|
return buf->data;
|
|
}
|
|
|
|
void net_buf_simple_push_le16(struct net_buf_simple *buf, u16_t val)
|
|
{
|
|
NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
|
|
|
|
val = sys_cpu_to_le16(val);
|
|
memcpy(net_buf_simple_push(buf, sizeof(val)), &val, sizeof(val));
|
|
}
|
|
|
|
void net_buf_simple_push_be16(struct net_buf_simple *buf, u16_t val)
|
|
{
|
|
NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
|
|
|
|
val = sys_cpu_to_be16(val);
|
|
memcpy(net_buf_simple_push(buf, sizeof(val)), &val, sizeof(val));
|
|
}
|
|
|
|
void net_buf_simple_push_u8(struct net_buf_simple *buf, u8_t val)
|
|
{
|
|
u8_t *data = net_buf_simple_push(buf, 1);
|
|
|
|
*data = val;
|
|
}
|
|
|
|
void *net_buf_simple_pull(struct net_buf_simple *buf, size_t len)
|
|
{
|
|
NET_BUF_SIMPLE_DBG("buf %p len %zu", buf, len);
|
|
|
|
NET_BUF_SIMPLE_ASSERT(buf->len >= len);
|
|
|
|
buf->len -= len;
|
|
return buf->data += len;
|
|
}
|
|
|
|
u8_t net_buf_simple_pull_u8(struct net_buf_simple *buf)
|
|
{
|
|
u8_t val;
|
|
|
|
val = buf->data[0];
|
|
net_buf_simple_pull(buf, 1);
|
|
|
|
return val;
|
|
}
|
|
|
|
u16_t net_buf_simple_pull_le16(struct net_buf_simple *buf)
|
|
{
|
|
u16_t val;
|
|
|
|
val = UNALIGNED_GET((u16_t *)buf->data);
|
|
net_buf_simple_pull(buf, sizeof(val));
|
|
|
|
return sys_le16_to_cpu(val);
|
|
}
|
|
|
|
u16_t net_buf_simple_pull_be16(struct net_buf_simple *buf)
|
|
{
|
|
u16_t val;
|
|
|
|
val = UNALIGNED_GET((u16_t *)buf->data);
|
|
net_buf_simple_pull(buf, sizeof(val));
|
|
|
|
return sys_be16_to_cpu(val);
|
|
}
|
|
|
|
u32_t net_buf_simple_pull_le32(struct net_buf_simple *buf)
|
|
{
|
|
u32_t val;
|
|
|
|
val = UNALIGNED_GET((u32_t *)buf->data);
|
|
net_buf_simple_pull(buf, sizeof(val));
|
|
|
|
return sys_le32_to_cpu(val);
|
|
}
|
|
|
|
u32_t net_buf_simple_pull_be32(struct net_buf_simple *buf)
|
|
{
|
|
u32_t val;
|
|
|
|
val = UNALIGNED_GET((u32_t *)buf->data);
|
|
net_buf_simple_pull(buf, sizeof(val));
|
|
|
|
return sys_be32_to_cpu(val);
|
|
}
|
|
|
|
size_t net_buf_simple_headroom(struct net_buf_simple *buf)
|
|
{
|
|
return buf->data - buf->__buf;
|
|
}
|
|
|
|
size_t net_buf_simple_tailroom(struct net_buf_simple *buf)
|
|
{
|
|
return buf->size - net_buf_simple_headroom(buf) - buf->len;
|
|
}
|