kernel: events: fix waitq timeout race condition
Updates events to prevent a timeout from corrupting the list of threads that needs to be waken up. Signed-off-by: Aastha Grover <aastha.grover@intel.com>
This commit is contained in:
parent
5537776898
commit
877fc3d508
@ -264,6 +264,9 @@ struct k_thread {
|
||||
|
||||
uint32_t events;
|
||||
uint32_t event_options;
|
||||
|
||||
/** true if timeout should not wake the thread */
|
||||
bool no_wake_on_timeout;
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_THREAD_MONITOR)
|
||||
|
||||
@ -98,13 +98,22 @@ static int event_walk_op(struct k_thread *thread, void *data)
|
||||
|
||||
if (are_wait_conditions_met(thread->events, event_data->events,
|
||||
wait_condition)) {
|
||||
|
||||
/*
|
||||
* Events create a list of threads to wake up. We do
|
||||
* not want z_thread_timeout to wake these threads; they
|
||||
* will be woken up by k_event_post_internal once they
|
||||
* have been processed.
|
||||
*/
|
||||
thread->no_wake_on_timeout = true;
|
||||
|
||||
/*
|
||||
* The wait conditions have been satisfied. Add this
|
||||
* thread to the list of threads to unpend.
|
||||
*/
|
||||
|
||||
thread->next_event_link = event_data->head;
|
||||
event_data->head = thread;
|
||||
z_abort_timeout(&thread->base.timeout);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -129,11 +138,10 @@ static void k_event_post_internal(struct k_event *event, uint32_t events,
|
||||
data.events = events;
|
||||
/*
|
||||
* Posting an event has the potential to wake multiple pended threads.
|
||||
* It is desirable to unpend all affected threads simultaneously. To
|
||||
* do so, this must be done in three steps as it is unsafe to unpend
|
||||
* threads from within the _WAIT_Q_FOR_EACH() loop.
|
||||
* It is desirable to unpend all affected threads simultaneously. This
|
||||
* is done in three steps:
|
||||
*
|
||||
* 1. Create a linked list of threads to unpend.
|
||||
* 1. Walk the waitq and create a linked list of threads to unpend.
|
||||
* 2. Unpend each of the threads in the linked list
|
||||
* 3. Ready each of the threads in the linked list
|
||||
*/
|
||||
@ -142,12 +150,13 @@ static void k_event_post_internal(struct k_event *event, uint32_t events,
|
||||
|
||||
if (data.head != NULL) {
|
||||
thread = data.head;
|
||||
struct k_thread *next;
|
||||
do {
|
||||
z_unpend_thread(thread);
|
||||
arch_thread_return_value_set(thread, 0);
|
||||
thread->events = events;
|
||||
z_ready_thread(thread);
|
||||
thread = thread->next_event_link;
|
||||
next = thread->next_event_link;
|
||||
z_sched_wake_thread(thread, false);
|
||||
thread = next;
|
||||
} while (thread != NULL);
|
||||
}
|
||||
|
||||
|
||||
@ -805,6 +805,16 @@ void z_sched_wake_thread(struct k_thread *thread, bool is_timeout)
|
||||
bool killed = ((thread->base.thread_state & _THREAD_DEAD) ||
|
||||
(thread->base.thread_state & _THREAD_ABORTING));
|
||||
|
||||
#ifdef CONFIG_EVENTS
|
||||
bool do_nothing = thread->no_wake_on_timeout && is_timeout;
|
||||
|
||||
thread->no_wake_on_timeout = false;
|
||||
|
||||
if (do_nothing) {
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (!killed) {
|
||||
/* The thread is not being killed */
|
||||
if (thread->base.pended_on != NULL) {
|
||||
|
||||
@ -578,6 +578,9 @@ char *z_setup_new_thread(struct k_thread *new_thread,
|
||||
/* Initialize custom data field (value is opaque to kernel) */
|
||||
new_thread->custom_data = NULL;
|
||||
#endif
|
||||
#ifdef CONFIG_EVENTS
|
||||
new_thread->no_wake_on_timeout = false;
|
||||
#endif
|
||||
#ifdef CONFIG_THREAD_MONITOR
|
||||
new_thread->entry.pEntry = entry;
|
||||
new_thread->entry.parameter1 = p1;
|
||||
|
||||
Loading…
Reference in New Issue
Block a user