From 5deaffb2eee2e39618bc3c50ce2f428c6581477a Mon Sep 17 00:00:00 2001 From: Peter Mitsis Date: Tue, 14 Dec 2021 10:56:14 -0500 Subject: [PATCH] kernel: update z_sched_thread_usage() This commit does two things to the z_sched_thread_usage(). First, it updates the API so that it accepts a pointer to the runtime stats instead of simply returning the usage cycles. This gives it the flexibility to retrieve additional statistics in the future. Second, the runtime stats are only updated if the specified thread is the current thread running on the current core. Signed-off-by: Peter Mitsis --- kernel/include/ksched.h | 6 +++++- kernel/thread.c | 6 +++--- kernel/usage.c | 33 +++++++++++++++++++++++---------- 3 files changed, 31 insertions(+), 14 deletions(-) diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index 9127113555c..71faef73abc 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h @@ -385,7 +385,11 @@ void z_sched_usage_stop(void); void z_sched_usage_start(struct k_thread *thread); -uint64_t z_sched_thread_usage(struct k_thread *thread); +/** + * @brief Retrieves thread cycle usage data for specified thread + */ +void z_sched_thread_usage(struct k_thread *thread, + struct k_thread_runtime_stats *stats); static inline void z_sched_usage_switch(struct k_thread *thread) { diff --git a/kernel/thread.c b/kernel/thread.c index fa45f9c4c43..526eb604c36 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -1026,10 +1026,10 @@ int k_thread_runtime_stats_get(k_tid_t thread, return -EINVAL; } - *stats = (k_thread_runtime_stats_t) {}; - #ifdef CONFIG_SCHED_THREAD_USAGE - stats->execution_cycles = z_sched_thread_usage(thread); + z_sched_thread_usage(thread, stats); +#else + *stats = (k_thread_runtime_stats_t) {}; #endif return 0; diff --git a/kernel/usage.c b/kernel/usage.c index d695bf51ec8..fae72ce1dcf 100644 --- a/kernel/usage.c +++ b/kernel/usage.c @@ -63,15 +63,28 @@ void z_sched_usage_stop(void) k_spin_unlock(&usage_lock, k); } -uint64_t z_sched_thread_usage(struct k_thread *thread) +void z_sched_thread_usage(struct k_thread *thread, + struct k_thread_runtime_stats *stats) { - k_spinlock_key_t k = k_spin_lock(&usage_lock); - uint32_t u0 = _current_cpu->usage0, now = usage_now(); - uint64_t ret = thread->base.usage; + uint32_t u0; + uint32_t now; + struct _cpu *cpu; + k_spinlock_key_t key; - if (u0 != 0) { + cpu = _current_cpu; + key = k_spin_lock(&usage_lock); + + u0 = cpu->usage0; + now = usage_now(); + + if ((u0 != 0) && (thread == cpu->current)) { uint32_t dt = now - u0; + /* + * Update the thread's usage stats if it is the current thread + * running on the current core. + */ + #ifdef CONFIG_SCHED_THREAD_USAGE_ALL if (z_is_idle_thread_object(thread)) { _kernel.idle_thread_usage += dt; @@ -80,11 +93,11 @@ uint64_t z_sched_thread_usage(struct k_thread *thread) } #endif - ret += dt; - thread->base.usage = ret; - _current_cpu->usage0 = now; + thread->base.usage += dt; + cpu->usage0 = now; } - k_spin_unlock(&usage_lock, k); - return ret; + stats->execution_cycles = thread->base.usage; + + k_spin_unlock(&usage_lock, key); }