diff --git a/include/zephyr/kernel.h b/include/zephyr/kernel.h index adf121f72ee..9f2b750d166 100644 --- a/include/zephyr/kernel.h +++ b/include/zephyr/kernel.h @@ -3289,7 +3289,7 @@ void k_work_init(struct k_work *work, * @param work pointer to the work item. * * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED, - * K_WORK_RUNNING, and K_WORK_CANCELING. + * K_WORK_RUNNING, K_WORK_CANCELING, and K_WORK_FLUSHING. */ int k_work_busy_get(const struct k_work *work); @@ -3545,9 +3545,9 @@ k_work_delayable_from_work(struct k_work *work); * * @param dwork pointer to the delayable work item. * - * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED, K_WORK_RUNNING, and - * K_WORK_CANCELING. A zero return value indicates the work item appears to - * be idle. + * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED, K_WORK_RUNNING, + * K_WORK_CANCELING, and K_WORK_FLUSHING. A zero return value indicates the + * work item appears to be idle. */ int k_work_delayable_busy_get(const struct k_work_delayable *dwork); @@ -3795,9 +3795,10 @@ enum { K_WORK_CANCELING_BIT = 1, K_WORK_QUEUED_BIT = 2, K_WORK_DELAYED_BIT = 3, + K_WORK_FLUSHING_BIT = 4, K_WORK_MASK = BIT(K_WORK_DELAYED_BIT) | BIT(K_WORK_QUEUED_BIT) - | BIT(K_WORK_RUNNING_BIT) | BIT(K_WORK_CANCELING_BIT), + | BIT(K_WORK_RUNNING_BIT) | BIT(K_WORK_CANCELING_BIT) | BIT(K_WORK_FLUSHING_BIT), /* Static work flags */ K_WORK_DELAYABLE_BIT = 8, @@ -3848,6 +3849,12 @@ enum { * Accessed via k_work_busy_get(). May co-occur with other flags. */ K_WORK_DELAYED = BIT(K_WORK_DELAYED_BIT), + + /** @brief Flag indicating a synced work item that is being flushed. + * + * Accessed via k_work_busy_get(). May co-occur with other flags. + */ + K_WORK_FLUSHING = BIT(K_WORK_FLUSHING_BIT), }; /** @brief A structure used to submit work. */ diff --git a/kernel/work.c b/kernel/work.c index c5d7c9b5bb8..a56aa67829c 100644 --- a/kernel/work.c +++ b/kernel/work.c @@ -63,18 +63,14 @@ static inline uint32_t flags_get(const uint32_t *flagp) static struct k_spinlock lock; /* Invoked by work thread */ -static void handle_flush(struct k_work *work) -{ - struct z_work_flusher *flusher - = CONTAINER_OF(work, struct z_work_flusher, work); - - k_sem_give(&flusher->sem); -} +static void handle_flush(struct k_work *work) { } static inline void init_flusher(struct z_work_flusher *flusher) { + struct k_work *work = &flusher->work; k_sem_init(&flusher->sem, 0, 1); k_work_init(&flusher->work, handle_flush); + flag_set(&work->flags, K_WORK_FLUSHING_BIT); } /* List of pending cancellations. */ @@ -96,6 +92,26 @@ static inline void init_work_cancel(struct z_work_canceller *canceler, sys_slist_append(&pending_cancels, &canceler->node); } +/* Comeplete flushing of a work item. + * + * Invoked with work lock held. + * + * Invoked from a work queue thread. + * + * Reschedules. + * + * @param work the work structure that has completed flushing. + */ +static void finalize_flush_locked(struct k_work *work) +{ + struct z_work_flusher *flusher + = CONTAINER_OF(work, struct z_work_flusher, work); + + flag_clear(&work->flags, K_WORK_FLUSHING_BIT); + + k_sem_give(&flusher->sem); +}; + /* Complete cancellation of a work item and unlock held lock. * * Invoked with work lock held. @@ -672,13 +688,16 @@ static void work_queue_main(void *workq_ptr, void *p2, void *p3) handler(work); /* Mark the work item as no longer running and deal - * with any cancellation issued while it was running. - * Clear the BUSY flag and optionally yield to prevent - * starving other threads. + * with any cancellation and flushing issued while it + * was running. Clear the BUSY flag and optionally + * yield to prevent starving other threads. */ key = k_spin_lock(&lock); flag_clear(&work->flags, K_WORK_RUNNING_BIT); + if (flag_test(&work->flags, K_WORK_FLUSHING_BIT)) { + finalize_flush_locked(work); + } if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) { finalize_cancel_locked(work); }