From ec4df64dc70bbcdb96cbdf288defd5e8fef31aea Mon Sep 17 00:00:00 2001 From: Peter Mitsis Date: Wed, 5 Mar 2025 11:37:44 -0800 Subject: [PATCH] kernel: make order_key field in thread conditional The 'order_key' field in the thread structure '_thread_base' is only required when CONFIG_SCHED_SCALABLE and/or CONFIG_WAITQ_SCALABLE are enabled (neither of which is a default setting). Making the existence of this field conditional slightly reduces the size of the k_thread structure when neither of those Kconfig options are selected. Signed-off-by: Peter Mitsis --- include/zephyr/kernel/thread.h | 2 ++ kernel/CMakeLists.txt | 6 +++++- kernel/include/priority_q.h | 6 ++++-- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/include/zephyr/kernel/thread.h b/include/zephyr/kernel/thread.h index fd8e4c02f23..e8c062e4498 100644 --- a/include/zephyr/kernel/thread.h +++ b/include/zephyr/kernel/thread.h @@ -105,7 +105,9 @@ struct _thread_base { int prio_deadline; #endif /* CONFIG_SCHED_DEADLINE */ +#if defined(CONFIG_SCHED_SCALABLE) || defined(CONFIG_WAITQ_SCALABLE) uint32_t order_key; +#endif #ifdef CONFIG_SMP /* True for the per-CPU idle threads */ diff --git a/kernel/CMakeLists.txt b/kernel/CMakeLists.txt index 8ba95f6c570..539d94028df 100644 --- a/kernel/CMakeLists.txt +++ b/kernel/CMakeLists.txt @@ -80,10 +80,14 @@ list(APPEND kernel_files system_work_q.c work.c condvar.c - priority_queues.c thread.c sched.c ) + +if (CONFIG_SCHED_SCALABLE OR CONFIG_WAITQ_SCALABLE) +list(APPEND kernel_files priority_queues.c) +endif() + # FIXME: Once the prior pipe implementation is removed, this should be included in the above list if(NOT CONFIG_PIPES) list(APPEND kernel_files pipe.c) diff --git a/kernel/include/priority_q.h b/kernel/include/priority_q.h index 259d689dda7..98c1584d990 100644 --- a/kernel/include/priority_q.h +++ b/kernel/include/priority_q.h @@ -10,8 +10,6 @@ #include #include -bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b); - /* Dumb Scheduling */ #if defined(CONFIG_SCHED_DUMB) #define _priq_run_init z_priq_dumb_init @@ -185,8 +183,11 @@ static ALWAYS_INLINE struct k_thread *z_priq_dumb_mask_best(sys_dlist_t *pq) } #endif /* CONFIG_SCHED_CPU_MASK */ +#if defined(CONFIG_SCHED_SCALABLE) || defined(CONFIG_WAITQ_SCALABLE) static ALWAYS_INLINE void z_priq_rb_init(struct _priq_rb *pq) { + bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b); + *pq = (struct _priq_rb) { .tree = { .lessthan_fn = z_priq_rb_lessthan, @@ -244,6 +245,7 @@ static ALWAYS_INLINE struct k_thread *z_priq_rb_best(struct _priq_rb *pq) } return thread; } +#endif struct prio_info { uint8_t offset_prio;