The init infrastructure, found in `init.h`, is currently used by:
- `SYS_INIT`: to call functions before `main`
- `DEVICE_*`: to initialize devices
They are all sorted according to an initialization level + a priority.
`SYS_INIT` calls are really orthogonal to devices, however, the required
function signature requires a `const struct device *dev` as a first
argument. The only reason for that is because the same init machinery is
used by devices, so we have something like:
```c
struct init_entry {
int (*init)(const struct device *dev);
/* only set by DEVICE_*, otherwise NULL */
const struct device *dev;
}
```
As a result, we end up with such weird/ugly pattern:
```c
static int my_init(const struct device *dev)
{
/* always NULL! add ARG_UNUSED to avoid compiler warning */
ARG_UNUSED(dev);
...
}
```
This is really a result of poor internals isolation. This patch proposes
a to make init entries more flexible so that they can accept sytem
initialization calls like this:
```c
static int my_init(void)
{
...
}
```
This is achieved using a union:
```c
union init_function {
/* for SYS_INIT, used when init_entry.dev == NULL */
int (*sys)(void);
/* for DEVICE*, used when init_entry.dev != NULL */
int (*dev)(const struct device *dev);
};
struct init_entry {
/* stores init function (either for SYS_INIT or DEVICE*)
union init_function init_fn;
/* stores device pointer for DEVICE*, NULL for SYS_INIT. Allows
* to know which union entry to call.
*/
const struct device *dev;
}
```
This solution **does not increase ROM usage**, and allows to offer clean
public APIs for both SYS_INIT and DEVICE*. Note that however, init
machinery keeps a coupling with devices.
**NOTE**: This is a breaking change! All `SYS_INIT` functions will need
to be converted to the new signature. See the script offered in the
following commit.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
init: convert SYS_INIT functions to the new signature
Conversion scripted using scripts/utils/migrate_sys_init.py.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
manifest: update projects for SYS_INIT changes
Update modules with updated SYS_INIT calls:
- hal_ti
- lvgl
- sof
- TraceRecorderSource
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
tests: devicetree: devices: adjust test
Adjust test according to the recently introduced SYS_INIT
infrastructure.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
tests: kernel: threads: adjust SYS_INIT call
Adjust to the new signature: int (*init_fn)(void);
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
178 lines
3.6 KiB
C
178 lines
3.6 KiB
C
/*
|
|
* Copyright (c) 2018 Intel Corporation
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
|
|
#include <stdlib.h>
|
|
#include <zephyr/kernel.h>
|
|
#include <zephyr/init.h>
|
|
#include <errno.h>
|
|
#include <zephyr/sys/math_extras.h>
|
|
#include <string.h>
|
|
#include <zephyr/app_memory/app_memdomain.h>
|
|
#include <zephyr/sys/mutex.h>
|
|
#include <zephyr/sys/sys_heap.h>
|
|
#include <zephyr/types.h>
|
|
|
|
#define LOG_LEVEL CONFIG_KERNEL_LOG_LEVEL
|
|
#include <zephyr/logging/log.h>
|
|
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
|
|
|
|
#ifdef CONFIG_MINIMAL_LIBC_MALLOC
|
|
|
|
#if (CONFIG_MINIMAL_LIBC_MALLOC_ARENA_SIZE > 0)
|
|
#ifdef CONFIG_USERSPACE
|
|
K_APPMEM_PARTITION_DEFINE(z_malloc_partition);
|
|
#define POOL_SECTION K_APP_DMEM_SECTION(z_malloc_partition)
|
|
#else
|
|
#define POOL_SECTION .bss
|
|
#endif /* CONFIG_USERSPACE */
|
|
|
|
#define HEAP_BYTES CONFIG_MINIMAL_LIBC_MALLOC_ARENA_SIZE
|
|
|
|
Z_GENERIC_SECTION(POOL_SECTION) static struct sys_heap z_malloc_heap;
|
|
Z_GENERIC_SECTION(POOL_SECTION) struct sys_mutex z_malloc_heap_mutex;
|
|
Z_GENERIC_SECTION(POOL_SECTION) static char z_malloc_heap_mem[HEAP_BYTES];
|
|
|
|
void *malloc(size_t size)
|
|
{
|
|
int lock_ret;
|
|
|
|
lock_ret = sys_mutex_lock(&z_malloc_heap_mutex, K_FOREVER);
|
|
__ASSERT_NO_MSG(lock_ret == 0);
|
|
|
|
void *ret = sys_heap_aligned_alloc(&z_malloc_heap,
|
|
__alignof__(z_max_align_t),
|
|
size);
|
|
if (ret == NULL && size != 0) {
|
|
errno = ENOMEM;
|
|
}
|
|
|
|
(void) sys_mutex_unlock(&z_malloc_heap_mutex);
|
|
|
|
return ret;
|
|
}
|
|
|
|
/* Compile in when C11 */
|
|
#if __STDC_VERSION__ >= 201112L
|
|
void *aligned_alloc(size_t alignment, size_t size)
|
|
{
|
|
int lock_ret;
|
|
|
|
lock_ret = sys_mutex_lock(&z_malloc_heap_mutex, K_FOREVER);
|
|
__ASSERT_NO_MSG(lock_ret == 0);
|
|
|
|
void *ret = sys_heap_aligned_alloc(&z_malloc_heap,
|
|
alignment,
|
|
size);
|
|
if (ret == NULL && size != 0) {
|
|
errno = ENOMEM;
|
|
}
|
|
|
|
(void) sys_mutex_unlock(&z_malloc_heap_mutex);
|
|
|
|
return ret;
|
|
}
|
|
#endif /* __STDC_VERSION__ >= 201112L */
|
|
|
|
static int malloc_prepare(void)
|
|
{
|
|
|
|
sys_heap_init(&z_malloc_heap, z_malloc_heap_mem, HEAP_BYTES);
|
|
sys_mutex_init(&z_malloc_heap_mutex);
|
|
|
|
return 0;
|
|
}
|
|
|
|
void *realloc(void *ptr, size_t requested_size)
|
|
{
|
|
int lock_ret;
|
|
|
|
lock_ret = sys_mutex_lock(&z_malloc_heap_mutex, K_FOREVER);
|
|
__ASSERT_NO_MSG(lock_ret == 0);
|
|
|
|
void *ret = sys_heap_aligned_realloc(&z_malloc_heap, ptr,
|
|
__alignof__(z_max_align_t),
|
|
requested_size);
|
|
|
|
if (ret == NULL && requested_size != 0) {
|
|
errno = ENOMEM;
|
|
}
|
|
|
|
(void) sys_mutex_unlock(&z_malloc_heap_mutex);
|
|
|
|
return ret;
|
|
}
|
|
|
|
void free(void *ptr)
|
|
{
|
|
int lock_ret;
|
|
|
|
lock_ret = sys_mutex_lock(&z_malloc_heap_mutex, K_FOREVER);
|
|
__ASSERT_NO_MSG(lock_ret == 0);
|
|
sys_heap_free(&z_malloc_heap, ptr);
|
|
(void) sys_mutex_unlock(&z_malloc_heap_mutex);
|
|
}
|
|
|
|
SYS_INIT(malloc_prepare, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
|
|
#else /* No malloc arena */
|
|
void *malloc(size_t size)
|
|
{
|
|
ARG_UNUSED(size);
|
|
|
|
LOG_ERR("CONFIG_MINIMAL_LIBC_MALLOC_ARENA_SIZE is 0");
|
|
errno = ENOMEM;
|
|
|
|
return NULL;
|
|
}
|
|
|
|
void free(void *ptr)
|
|
{
|
|
ARG_UNUSED(ptr);
|
|
}
|
|
|
|
void *realloc(void *ptr, size_t size)
|
|
{
|
|
ARG_UNUSED(ptr);
|
|
return malloc(size);
|
|
}
|
|
#endif
|
|
|
|
#endif /* CONFIG_MINIMAL_LIBC_MALLOC */
|
|
|
|
#ifdef CONFIG_MINIMAL_LIBC_CALLOC
|
|
void *calloc(size_t nmemb, size_t size)
|
|
{
|
|
void *ret;
|
|
|
|
if (size_mul_overflow(nmemb, size, &size)) {
|
|
errno = ENOMEM;
|
|
return NULL;
|
|
}
|
|
|
|
ret = malloc(size);
|
|
|
|
if (ret != NULL) {
|
|
(void)memset(ret, 0, size);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
#endif /* CONFIG_MINIMAL_LIBC_CALLOC */
|
|
|
|
#ifdef CONFIG_MINIMAL_LIBC_REALLOCARRAY
|
|
void *reallocarray(void *ptr, size_t nmemb, size_t size)
|
|
{
|
|
#if (CONFIG_MINIMAL_LIBC_MALLOC_ARENA_SIZE > 0)
|
|
if (size_mul_overflow(nmemb, size, &size)) {
|
|
errno = ENOMEM;
|
|
return NULL;
|
|
}
|
|
return realloc(ptr, size);
|
|
#else
|
|
return NULL;
|
|
#endif
|
|
}
|
|
#endif /* CONFIG_MINIMAL_LIBC_REALLOCARRAY */
|