diff --git a/include/zephyr/kernel/smp.h b/include/zephyr/kernel/smp.h index 1672c58bfa0..883f4820a74 100644 --- a/include/zephyr/kernel/smp.h +++ b/include/zephyr/kernel/smp.h @@ -7,6 +7,8 @@ #ifndef ZEPHYR_INCLUDE_KERNEL_SMP_H_ #define ZEPHYR_INCLUDE_KERNEL_SMP_H_ +#include + typedef void (*smp_init_fn)(void *arg); /** @@ -25,6 +27,11 @@ typedef void (*smp_init_fn)(void *arg); * Detection of such state(s) must be provided by * the platform layers. * + * @note This initializes per-CPU kernel structs and also + * initializes timers needed for MP operations. + * Use @ref k_smp_cpu_resume if these are not + * desired. + * * @param id ID of target CPU. * @param fn Function to be called before letting scheduler * run. @@ -32,4 +39,33 @@ typedef void (*smp_init_fn)(void *arg); */ void k_smp_cpu_start(int id, smp_init_fn fn, void *arg); +/** + * @brief Resume a previously suspended CPU. + * + * This function works like @ref k_smp_cpu_start, but does not + * re-initialize the kernel's internal tracking data for + * the target CPU. Therefore, @ref k_smp_cpu_start must have + * previously been called for the target CPU, and it must have + * verifiably reached an idle/off state (detection of which + * must be provided by the platform layers). It may be used + * in cases where platform layers require, for example, that + * data on the interrupt or idle stack be preserved. + * + * @note This function must not be used on currently running + * CPU. The target CPU must be in suspended state, or + * in certain architectural state(s) where the CPU is + * permitted to go through the resume process. + * Detection of such state(s) must be provided by + * the platform layers. + * + * @param id ID of target CPU. + * @param fn Function to be called before resuming context. + * @param arg Argument to @a fn. + * @param reinit_timer True if timer needs to be re-initialized. + * @param invoke_sched True if scheduler is invoked after the CPU + * has started. + */ +void k_smp_cpu_resume(int id, smp_init_fn fn, void *arg, + bool reinit_timer, bool invoke_sched); + #endif /* ZEPHYR_INCLUDE_KERNEL_SMP_H_ */ diff --git a/include/zephyr/sys/arch_interface.h b/include/zephyr/sys/arch_interface.h index dd5755c1e23..0ffc95c663b 100644 --- a/include/zephyr/sys/arch_interface.h +++ b/include/zephyr/sys/arch_interface.h @@ -216,7 +216,7 @@ void arch_cpu_atomic_idle(unsigned int key); * * @param data context parameter, implementation specific */ -typedef FUNC_NORETURN void (*arch_cpustart_t)(void *data); +typedef void (*arch_cpustart_t)(void *data); /** * @brief Start a numbered CPU on a MP-capable system diff --git a/kernel/smp.c b/kernel/smp.c index 7fe4f8503e7..ff231710e53 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -42,6 +42,14 @@ static struct cpu_start_cb { /** Argument to @ref cpu_start_fn.fn. */ void *arg; + + /** Invoke scheduler after CPU has started if true. */ + bool invoke_sched; + +#ifdef CONFIG_SYS_CLOCK_EXISTS + /** True if smp_timer_init() needs to be called. */ + bool reinit_timer; +#endif } cpu_start_fn; static struct k_spinlock cpu_start_lock; @@ -111,7 +119,7 @@ void z_smp_thread_swap(void) } #endif -static inline FUNC_NORETURN void smp_init_top(void *arg) +static inline void smp_init_top(void *arg) { struct k_thread dummy_thread; struct cpu_start_cb *csc = arg; @@ -124,13 +132,10 @@ static inline FUNC_NORETURN void smp_init_top(void *arg) */ wait_for_start_signal(&cpu_start_flag); - /* Initialize the dummy thread struct so that - * the scheduler can schedule actual threads to run. - */ - z_dummy_thread_init(&dummy_thread); - #ifdef CONFIG_SYS_CLOCK_EXISTS - smp_timer_init(); + if ((csc == NULL) || csc->reinit_timer) { + smp_timer_init(); + } #endif /* Do additional initialization steps if needed. */ @@ -138,6 +143,16 @@ static inline FUNC_NORETURN void smp_init_top(void *arg) csc->fn(csc->arg); } + if ((csc != NULL) && !csc->invoke_sched) { + /* Don't invoke scheduler. */ + return; + } + + /* Initialize the dummy thread struct so that + * the scheduler can schedule actual threads to run. + */ + z_dummy_thread_init(&dummy_thread); + /* Let scheduler decide what thread to run next. */ z_swap_unlocked(); @@ -146,9 +161,6 @@ static inline FUNC_NORETURN void smp_init_top(void *arg) static void start_cpu(int id, struct cpu_start_cb *csc) { - /* Initialize various CPU structs related to this CPU. */ - z_init_cpu(id); - /* Clear the ready flag so the newly powered up CPU can * signal that it has powered up. */ @@ -172,6 +184,40 @@ void k_smp_cpu_start(int id, smp_init_fn fn, void *arg) cpu_start_fn.fn = fn; cpu_start_fn.arg = arg; + cpu_start_fn.invoke_sched = true; + +#ifdef CONFIG_SYS_CLOCK_EXISTS + cpu_start_fn.reinit_timer = true; +#endif + + /* We are only starting one CPU so we do not need to synchronize + * across all CPUs using the start_flag. So just set it to 1. + */ + (void)atomic_set(&cpu_start_flag, 1); /* async, don't care */ + + /* Initialize various CPU structs related to this CPU. */ + z_init_cpu(id); + + /* Start the CPU! */ + start_cpu(id, &cpu_start_fn); + + k_spin_unlock(&cpu_start_lock, key); +} + +void k_smp_cpu_resume(int id, smp_init_fn fn, void *arg, + bool reinit_timer, bool invoke_sched) +{ + k_spinlock_key_t key = k_spin_lock(&cpu_start_lock); + + cpu_start_fn.fn = fn; + cpu_start_fn.arg = arg; + cpu_start_fn.invoke_sched = invoke_sched; + +#ifdef CONFIG_SYS_CLOCK_EXISTS + cpu_start_fn.reinit_timer = reinit_timer; +#else + ARG_UNUSED(reinit_timer); +#endif /* We are only starting one CPU so we do not need to synchronize * across all CPUs using the start_flag. So just set it to 1. @@ -195,6 +241,7 @@ void z_smp_init(void) unsigned int num_cpus = arch_num_cpus(); for (int i = 1; i < num_cpus; i++) { + z_init_cpu(i); start_cpu(i, NULL); }