diff --git a/include/zephyr/kernel.h b/include/zephyr/kernel.h index 24f88da2885..a42c72aee66 100644 --- a/include/zephyr/kernel.h +++ b/include/zephyr/kernel.h @@ -946,6 +946,26 @@ __syscall void k_thread_priority_set(k_tid_t thread, int prio); __syscall void k_thread_deadline_set(k_tid_t thread, int deadline); #endif +/** + * @brief Invoke the scheduler + * + * This routine invokes the scheduler to force a schedule point on the current + * CPU. If invoked from within a thread, the scheduler will be invoked + * immediately (provided interrupts were not locked when invoked). If invoked + * from within an ISR, the scheduler will be invoked upon exiting the ISR. + * + * Invoking the scheduler allows the kernel to make an immediate determination + * as to what the next thread to execute should be. Unlike yielding, this + * routine is not guaranteed to switch to a thread of equal or higher priority + * if any are available. For example, if the current thread is cooperative and + * there is a still higher priority cooperative thread that is ready, then + * yielding will switch to that higher priority thread whereas this routine + * will not. + * + * Most applications will never use this routine. + */ +__syscall void k_reschedule(void); + #ifdef CONFIG_SCHED_CPU_MASK /** * @brief Sets all CPU enable masks to zero diff --git a/kernel/sched.c b/kernel/sched.c index c7b835faec8..76a2113b230 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1050,6 +1050,24 @@ static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline) #endif /* CONFIG_USERSPACE */ #endif /* CONFIG_SCHED_DEADLINE */ +void z_impl_k_reschedule(void) +{ + k_spinlock_key_t key; + + key = k_spin_lock(&_sched_spinlock); + + update_cache(0); + + z_reschedule(&_sched_spinlock, key); +} + +#ifdef CONFIG_USERSPACE +static inline void z_vrfy_k_reschedule(void) +{ + z_impl_k_reschedule(); +} +#endif /* CONFIG_USERSPACE */ + bool k_can_yield(void) { return !(k_is_pre_kernel() || k_is_in_isr() ||