From 2b5012a5d9f0faa7d323bd64f2b412289046bfb0 Mon Sep 17 00:00:00 2001 From: Tom Burdick Date: Fri, 25 Oct 2024 12:45:38 -0500 Subject: [PATCH] kernel: Move run queue initialization Move the initialization of the priority q for running out of sched.c to remove one more ifdef from sched.c. No change in functionality but better matches the rest of sched.c and priority_q.h such that the ifdefry needed is done in in priority_q.h. Signed-off-by: Tom Burdick --- kernel/include/priority_q.h | 25 ++++++++++++++++++++++++- kernel/sched.c | 14 +------------- 2 files changed, 25 insertions(+), 14 deletions(-) diff --git a/kernel/include/priority_q.h b/kernel/include/priority_q.h index a2edc26088f..679e3f9dbdc 100644 --- a/kernel/include/priority_q.h +++ b/kernel/include/priority_q.h @@ -17,6 +17,7 @@ bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b); /* Dumb Scheduling */ #if defined(CONFIG_SCHED_DUMB) +#define _priq_run_init z_priq_dumb_init #define _priq_run_add z_priq_dumb_add #define _priq_run_remove z_priq_dumb_remove # if defined(CONFIG_SCHED_CPU_MASK) @@ -26,6 +27,7 @@ bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b); # endif /* CONFIG_SCHED_CPU_MASK */ /* Scalable Scheduling */ #elif defined(CONFIG_SCHED_SCALABLE) +#define _priq_run_init z_priq_rb_init #define _priq_run_add z_priq_rb_add #define _priq_run_remove z_priq_rb_remove #define _priq_run_best z_priq_rb_best @@ -37,7 +39,7 @@ bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b); #else #define NBITS 32 #endif /* CONFIG_64BIT */ - +#define _priq_run_init z_priq_mq_init #define _priq_run_add z_priq_mq_add #define _priq_run_remove z_priq_mq_remove #define _priq_run_best z_priq_mq_best @@ -57,6 +59,11 @@ static ALWAYS_INLINE void z_priq_mq_remove(struct _priq_mq *pq, struct k_thread #define _priq_wait_best z_priq_dumb_best #endif +static ALWAYS_INLINE void z_priq_dumb_init(sys_dlist_t *pq) +{ + sys_dlist_init(pq); +} + static ALWAYS_INLINE void z_priq_dumb_remove(sys_dlist_t *pq, struct k_thread *thread) { ARG_UNUSED(pq); @@ -75,6 +82,15 @@ static ALWAYS_INLINE struct k_thread *z_priq_dumb_best(sys_dlist_t *pq) return thread; } +static ALWAYS_INLINE void z_priq_rb_init(struct _priq_rb *pq) +{ + *pq = (struct _priq_rb) { + .tree = { + .lessthan_fn = z_priq_rb_lessthan, + } + }; +} + static ALWAYS_INLINE void z_priq_rb_add(struct _priq_rb *pq, struct k_thread *thread) { struct k_thread *t; @@ -163,6 +179,13 @@ static ALWAYS_INLINE struct prio_info get_prio_info(int8_t old_prio) return ret; } +static ALWAYS_INLINE void z_priq_mq_init(struct _priq_mq *q) +{ + for (int i = 0; i < ARRAY_SIZE(q->queues); i++) { + sys_dlist_init(&q->queues[i]); + } +} + static ALWAYS_INLINE void z_priq_mq_add(struct _priq_mq *pq, struct k_thread *thread) { diff --git a/kernel/sched.c b/kernel/sched.c index 7db9c1b59fd..eda1a3e0908 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -968,19 +968,7 @@ int z_unpend_all(_wait_q_t *wait_q) void init_ready_q(struct _ready_q *ready_q) { -#if defined(CONFIG_SCHED_SCALABLE) - ready_q->runq = (struct _priq_rb) { - .tree = { - .lessthan_fn = z_priq_rb_lessthan, - } - }; -#elif defined(CONFIG_SCHED_MULTIQ) - for (int i = 0; i < ARRAY_SIZE(_kernel.ready_q.runq.queues); i++) { - sys_dlist_init(&ready_q->runq.queues[i]); - } -#else - sys_dlist_init(&ready_q->runq); -#endif + _priq_run_init(&ready_q->runq); } void z_sched_init(void)