zephyr/tests/kernel/threads/no-multithreading/src/main.c
Gerard Marull-Paretas a5fd0d184a init: remove the need for a dummy device pointer in SYS_INIT functions
The init infrastructure, found in `init.h`, is currently used by:

- `SYS_INIT`: to call functions before `main`
- `DEVICE_*`: to initialize devices

They are all sorted according to an initialization level + a priority.
`SYS_INIT` calls are really orthogonal to devices, however, the required
function signature requires a `const struct device *dev` as a first
argument. The only reason for that is because the same init machinery is
used by devices, so we have something like:

```c
struct init_entry {
	int (*init)(const struct device *dev);
	/* only set by DEVICE_*, otherwise NULL */
	const struct device *dev;
}
```

As a result, we end up with such weird/ugly pattern:

```c
static int my_init(const struct device *dev)
{
	/* always NULL! add ARG_UNUSED to avoid compiler warning */
	ARG_UNUSED(dev);
	...
}
```

This is really a result of poor internals isolation. This patch proposes
a to make init entries more flexible so that they can accept sytem
initialization calls like this:

```c
static int my_init(void)
{
	...
}
```

This is achieved using a union:

```c
union init_function {
	/* for SYS_INIT, used when init_entry.dev == NULL */
	int (*sys)(void);
	/* for DEVICE*, used when init_entry.dev != NULL */
	int (*dev)(const struct device *dev);
};

struct init_entry {
	/* stores init function (either for SYS_INIT or DEVICE*)
	union init_function init_fn;
	/* stores device pointer for DEVICE*, NULL for SYS_INIT. Allows
	 * to know which union entry to call.
	 */
	const struct device *dev;
}
```

This solution **does not increase ROM usage**, and allows to offer clean
public APIs for both SYS_INIT and DEVICE*. Note that however, init
machinery keeps a coupling with devices.

**NOTE**: This is a breaking change! All `SYS_INIT` functions will need
to be converted to the new signature. See the script offered in the
following commit.

Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>

init: convert SYS_INIT functions to the new signature

Conversion scripted using scripts/utils/migrate_sys_init.py.

Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>

manifest: update projects for SYS_INIT changes

Update modules with updated SYS_INIT calls:

- hal_ti
- lvgl
- sof
- TraceRecorderSource

Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>

tests: devicetree: devices: adjust test

Adjust test according to the recently introduced SYS_INIT
infrastructure.

Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>

tests: kernel: threads: adjust SYS_INIT call

Adjust to the new signature: int (*init_fn)(void);

Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
2023-04-12 14:28:07 +00:00

105 lines
2.4 KiB
C

/*
* Copyright (c) 2018 Intel Corporation
* Copyright (c) 2021 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/kernel.h>
#include <zephyr/device.h>
#include <zephyr/ztest.h>
ZTEST(no_multithreading, test_k_busy_wait)
{
int64_t now = k_uptime_get();
uint32_t watchdog = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
while (k_uptime_get() != now) {
/* Wait until uptime progresses */
watchdog--;
if (watchdog == 0) {
zassert_false(true, "No progress in uptime");
}
}
now = k_uptime_get();
/* Check that k_busy_wait is working as expected. */
k_busy_wait(10000);
int64_t diff = k_uptime_get() - now;
zassert_within(diff, 10, 2);
}
static void timeout_handler(struct k_timer *timer)
{
bool *flag = k_timer_user_data_get(timer);
*flag = true;
}
K_TIMER_DEFINE(timer, timeout_handler, NULL);
ZTEST(no_multithreading, test_irq_locking)
{
volatile bool timeout_run = false;
k_timer_user_data_set(&timer, (void *)&timeout_run);
k_timer_start(&timer, K_MSEC(10), K_NO_WAIT);
unsigned int key = irq_lock();
k_busy_wait(15000);
zassert_false(timeout_run, "Timeout should not expire because irq is locked");
irq_unlock(key);
zassert_true(timeout_run, "Timeout should expire because irq got unlocked");
}
ZTEST(no_multithreading, test_cpu_idle)
{
volatile bool timeout_run = false;
int64_t now, diff;
k_timer_user_data_set(&timer, (void *)&timeout_run);
now = k_uptime_get();
/* Start timer and go to idle, cpu should sleep until it is waken up
* by sys clock interrupt.
*/
k_timer_start(&timer, K_MSEC(10), K_NO_WAIT);
k_cpu_idle();
diff = k_uptime_get() - now;
zassert_true(timeout_run, "Timeout should expire");
zassert_within(diff, 10, 2, "Unexpected time passed: %d ms", (int)diff);
}
#define IDX_PRE_KERNEL_1 0
#define IDX_PRE_KERNEL_2 1
#define IDX_POST_KERNEL 2
#define SYS_INIT_CREATE(level) \
static int pre_kernel_##level##_init_func(void) \
{ \
if (init_order != IDX_##level && sys_init_result == 0) { \
sys_init_result = -1; \
return -EIO; \
} \
init_order++; \
return 0;\
} \
SYS_INIT(pre_kernel_##level##_init_func, level, 0)
static int init_order;
static int sys_init_result;
FOR_EACH(SYS_INIT_CREATE, (;), PRE_KERNEL_1, PRE_KERNEL_2, POST_KERNEL);
ZTEST(no_multithreading, test_sys_init)
{
zassert_equal(init_order, 3, "SYS_INIT failed: %d", init_order);
}
ZTEST_SUITE(no_multithreading, NULL, NULL, NULL, NULL, NULL);