From 84db641de66463550fb41f083dbdf0b02fc78284 Mon Sep 17 00:00:00 2001 From: Luiz Augusto von Dentz Date: Thu, 13 Jul 2017 12:43:59 +0300 Subject: [PATCH] queue: Use k_poll if enabled This makes use of POLL_EVENT in case k_poll is enabled which is preferable over wait_q as that allows objects to be removed for the data_q at any time. Signed-off-by: Luiz Augusto von Dentz --- include/kernel.h | 7 ++- kernel/queue.c | 88 +++++++++++++++++++++++--------- tests/kernel/queue/prj_poll.conf | 3 ++ tests/kernel/queue/testcase.yaml | 3 ++ 4 files changed, 74 insertions(+), 27 deletions(-) create mode 100644 tests/kernel/queue/prj_poll.conf diff --git a/include/kernel.h b/include/kernel.h index a51b59737a7..3aa5aa508de 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -1313,9 +1313,12 @@ extern u32_t k_uptime_delta_32(s64_t *reftime); */ struct k_queue { - _wait_q_t wait_q; sys_slist_t data_q; - _POLL_EVENT; + union { + _wait_q_t wait_q; + + _POLL_EVENT; + }; _OBJECT_TRACING_NEXT_PTR(k_queue); }; diff --git a/kernel/queue.c b/kernel/queue.c index 1ff44677df1..0aa70c54aba 100644 --- a/kernel/queue.c +++ b/kernel/queue.c @@ -57,12 +57,14 @@ void k_queue_init(struct k_queue *queue) SYS_TRACING_OBJ_INIT(k_queue, queue); } +#if !defined(CONFIG_POLL) static void prepare_thread_to_run(struct k_thread *thread, void *data) { _abort_thread_timeout(thread); _ready_thread(thread); _set_thread_return_value_with_data(thread, 0, data); } +#endif /* CONFIG_POLL */ /* returns 1 if a reschedule must take place, 0 otherwise */ static inline int handle_poll_event(struct k_queue *queue) @@ -79,10 +81,9 @@ static inline int handle_poll_event(struct k_queue *queue) void k_queue_cancel_wait(struct k_queue *queue) { + unsigned int key = irq_lock(); +#if !defined(CONFIG_POLL) struct k_thread *first_pending_thread; - unsigned int key; - - key = irq_lock(); first_pending_thread = _unpend_first_thread(&queue->wait_q); @@ -92,22 +93,22 @@ void k_queue_cancel_wait(struct k_queue *queue) (void)_Swap(key); return; } - } else { - if (handle_poll_event(queue)) { - (void)_Swap(key); - return; - } } +#else + if (handle_poll_event(queue)) { + (void)_Swap(key); + return; + } +#endif /* !CONFIG_POLL */ irq_unlock(key); } void k_queue_insert(struct k_queue *queue, void *prev, void *data) { + unsigned int key = irq_lock(); +#if !defined(CONFIG_POLL) struct k_thread *first_pending_thread; - unsigned int key; - - key = irq_lock(); first_pending_thread = _unpend_first_thread(&queue->wait_q); @@ -117,13 +118,19 @@ void k_queue_insert(struct k_queue *queue, void *prev, void *data) (void)_Swap(key); return; } - } else { - sys_slist_insert(&queue->data_q, prev, data); - if (handle_poll_event(queue)) { - (void)_Swap(key); - return; - } + irq_unlock(key); + return; } +#endif /* !CONFIG_POLL */ + + sys_slist_insert(&queue->data_q, prev, data); + +#if defined(CONFIG_POLL) + if (handle_poll_event(queue)) { + (void)_Swap(key); + return; + } +#endif /* CONFIG_POLL */ irq_unlock(key); } @@ -142,10 +149,9 @@ void k_queue_append_list(struct k_queue *queue, void *head, void *tail) { __ASSERT(head && tail, "invalid head or tail"); + unsigned int key = irq_lock(); +#if !defined(CONFIG_POLL) struct k_thread *first_thread, *thread; - unsigned int key; - - key = irq_lock(); first_thread = _peek_first_pending_thread(&queue->wait_q); while (head && ((thread = _unpend_first_thread(&queue->wait_q)))) { @@ -162,12 +168,14 @@ void k_queue_append_list(struct k_queue *queue, void *head, void *tail) (void)_Swap(key); return; } - } else { - if (handle_poll_event(queue)) { - (void)_Swap(key); - return; - } } +#else + sys_slist_append_list(&queue->data_q, head, tail); + if (handle_poll_event(queue)) { + (void)_Swap(key); + return; + } +#endif /* !CONFIG_POLL */ irq_unlock(key); } @@ -186,6 +194,29 @@ void k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list) sys_slist_init(list); } +#if defined(CONFIG_POLL) +static void *k_queue_poll(struct k_queue *queue, s32_t timeout) +{ + struct k_poll_event event; + int err; + + k_poll_event_init(&event, K_POLL_TYPE_FIFO_DATA_AVAILABLE, + K_POLL_MODE_NOTIFY_ONLY, queue); + + event.state = K_POLL_STATE_NOT_READY; + + err = k_poll(&event, 1, timeout); + if (err == -EAGAIN) { + return NULL; + } + + __ASSERT_NO_MSG(err == 0); + __ASSERT_NO_MSG(event.state == K_POLL_STATE_FIFO_DATA_AVAILABLE); + + return sys_slist_get(&queue->data_q); +} +#endif /* CONFIG_POLL */ + void *k_queue_get(struct k_queue *queue, s32_t timeout) { unsigned int key; @@ -204,7 +235,14 @@ void *k_queue_get(struct k_queue *queue, s32_t timeout) return NULL; } +#if defined(CONFIG_POLL) + irq_unlock(key); + + return k_queue_poll(queue, timeout); + +#else _pend_current_thread(&queue->wait_q, timeout); return _Swap(key) ? NULL : _current->base.swap_data; +#endif /* CONFIG_POLL */ } diff --git a/tests/kernel/queue/prj_poll.conf b/tests/kernel/queue/prj_poll.conf new file mode 100644 index 00000000000..ff5574c46a5 --- /dev/null +++ b/tests/kernel/queue/prj_poll.conf @@ -0,0 +1,3 @@ +CONFIG_ZTEST=y +CONFIG_IRQ_OFFLOAD=y +CONFIG_POLL=y diff --git a/tests/kernel/queue/testcase.yaml b/tests/kernel/queue/testcase.yaml index 76dee5ef4ea..b229f6fb9dc 100644 --- a/tests/kernel/queue/testcase.yaml +++ b/tests/kernel/queue/testcase.yaml @@ -1,3 +1,6 @@ tests: - test: tags: kernel +- test_poll: + extra_args: CONF_FILE="prj_poll.conf" + tags: kernel