From 9bedfd82a2ea8e9c8bdce625a58e18aa4df788d4 Mon Sep 17 00:00:00 2001 From: Peter Mitsis Date: Tue, 23 May 2023 18:36:04 -0400 Subject: [PATCH] kernel: Refactor CPU usage Refactors CPU usage (thread runtime stats) to make it easier to integrate with the object core statistics framework. Signed-off-by: Peter Mitsis --- include/zephyr/kernel_structs.h | 5 ++++- kernel/init.c | 3 ++- kernel/usage.c | 36 ++++++++++++++++----------------- 3 files changed, 24 insertions(+), 20 deletions(-) diff --git a/include/zephyr/kernel_structs.h b/include/zephyr/kernel_structs.h index 728986686aa..4ead72479f8 100644 --- a/include/zephyr/kernel_structs.h +++ b/include/zephyr/kernel_structs.h @@ -141,7 +141,7 @@ struct _cpu { uint32_t usage0; #ifdef CONFIG_SCHED_THREAD_USAGE_ALL - struct k_cycle_stats usage; + struct k_cycle_stats *usage; #endif #endif @@ -183,6 +183,9 @@ struct z_kernel { #if defined(CONFIG_THREAD_MONITOR) struct k_thread *threads; /* singly linked list of ALL threads */ #endif +#ifdef CONFIG_SCHED_THREAD_USAGE_ALL + struct k_cycle_stats usage[CONFIG_MP_MAX_NUM_CPUS]; +#endif #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED) /* Need to signal an IPI at the next scheduling point */ diff --git a/kernel/init.c b/kernel/init.c index 3d5991f1fbe..dccbad6cb41 100644 --- a/kernel/init.c +++ b/kernel/init.c @@ -399,7 +399,8 @@ void z_init_cpu(int id) (Z_KERNEL_STACK_BUFFER(z_interrupt_stacks[id]) + K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[id])); #ifdef CONFIG_SCHED_THREAD_USAGE_ALL - _kernel.cpus[id].usage.track_usage = + _kernel.cpus[id].usage = &_kernel.usage[id]; + _kernel.cpus[id].usage->track_usage = CONFIG_SCHED_THREAD_USAGE_AUTO_ENABLE; #endif diff --git a/kernel/usage.c b/kernel/usage.c index a44c7552f5c..57a55a75759 100644 --- a/kernel/usage.c +++ b/kernel/usage.c @@ -35,22 +35,22 @@ static uint32_t usage_now(void) #ifdef CONFIG_SCHED_THREAD_USAGE_ALL static void sched_cpu_update_usage(struct _cpu *cpu, uint32_t cycles) { - if (!cpu->usage.track_usage) { + if (!cpu->usage->track_usage) { return; } if (cpu->current != cpu->idle_thread) { - cpu->usage.total += cycles; + cpu->usage->total += cycles; #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS - cpu->usage.current += cycles; + cpu->usage->current += cycles; - if (cpu->usage.longest < cpu->usage.current) { - cpu->usage.longest = cpu->usage.current; + if (cpu->usage->longest < cpu->usage->current) { + cpu->usage->longest = cpu->usage->current; } } else { - cpu->usage.current = 0; - cpu->usage.num_windows++; + cpu->usage->current = 0; + cpu->usage->num_windows++; #endif } } @@ -148,16 +148,16 @@ void z_sched_cpu_usage(uint8_t cpu_id, struct k_thread_runtime_stats *stats) cpu->usage0 = now; } - stats->total_cycles = cpu->usage.total; + stats->total_cycles = cpu->usage->total; #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS - stats->current_cycles = cpu->usage.current; - stats->peak_cycles = cpu->usage.longest; + stats->current_cycles = cpu->usage->current; + stats->peak_cycles = cpu->usage->longest; - if (cpu->usage.num_windows == 0) { + if (cpu->usage->num_windows == 0) { stats->average_cycles = 0; } else { stats->average_cycles = stats->total_cycles / - cpu->usage.num_windows; + cpu->usage->num_windows; } #endif @@ -282,7 +282,7 @@ void k_sys_runtime_stats_enable(void) key = k_spin_lock(&usage_lock); - if (_current_cpu->usage.track_usage) { + if (_current_cpu->usage->track_usage) { /* * Usage tracking is already enabled on the current CPU @@ -299,10 +299,10 @@ void k_sys_runtime_stats_enable(void) unsigned int num_cpus = arch_num_cpus(); for (uint8_t i = 0; i < num_cpus; i++) { - _kernel.cpus[i].usage.track_usage = true; + _kernel.cpus[i].usage->track_usage = true; #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS - _kernel.cpus[i].usage.num_windows++; - _kernel.cpus[i].usage.current = 0; + _kernel.cpus[i].usage->num_windows++; + _kernel.cpus[i].usage->current = 0; #endif } @@ -316,7 +316,7 @@ void k_sys_runtime_stats_disable(void) key = k_spin_lock(&usage_lock); - if (!_current_cpu->usage.track_usage) { + if (!_current_cpu->usage->track_usage) { /* * Usage tracking is already disabled on the current CPU @@ -337,7 +337,7 @@ void k_sys_runtime_stats_disable(void) if (cpu->usage0 != 0) { sched_cpu_update_usage(cpu, now - cpu->usage0); } - cpu->usage.track_usage = false; + cpu->usage->track_usage = false; } k_spin_unlock(&usage_lock, key);