Add a way of finding if a microkernel task is the idle task instead of of duplicating: task_priority_get() == (CONFIG_NUM_TASK_PRIORITIES - 1) which could be subject to change. Only available for microkernel, since there is no such concept in the nanokernel. Change-Id: Ie8930981f1a2ac5ff16f905f4eb4e333c8b59c5d Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
171 lines
5.2 KiB
C
171 lines
5.2 KiB
C
/* wait queue for multiple fibers on nanokernel objects */
|
|
|
|
/*
|
|
* Copyright (c) 2015 Wind River Systems, Inc.
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef _kernel_nanokernel_include_wait_q__h_
|
|
#define _kernel_nanokernel_include_wait_q__h_
|
|
|
|
#include <nano_private.h>
|
|
|
|
#ifdef __cplusplus
|
|
extern "C" {
|
|
#endif
|
|
|
|
/* reset a wait queue, call during operation */
|
|
static inline void _nano_wait_q_reset(struct _nano_queue *wait_q)
|
|
{
|
|
wait_q->head = (void *)0;
|
|
wait_q->tail = (void *)&(wait_q->head);
|
|
}
|
|
|
|
/* initialize a wait queue: call only during object initialization */
|
|
static inline void _nano_wait_q_init(struct _nano_queue *wait_q)
|
|
{
|
|
_nano_wait_q_reset(wait_q);
|
|
}
|
|
|
|
/*
|
|
* Remove first fiber from a wait queue and put it on the ready queue, knowing
|
|
* that the wait queue is not empty.
|
|
*/
|
|
static inline
|
|
struct tcs *_nano_wait_q_remove_no_check(struct _nano_queue *wait_q)
|
|
{
|
|
struct tcs *tcs = wait_q->head;
|
|
|
|
if (wait_q->tail == wait_q->head) {
|
|
_nano_wait_q_reset(wait_q);
|
|
} else {
|
|
wait_q->head = tcs->link;
|
|
}
|
|
tcs->link = 0;
|
|
|
|
_nano_fiber_ready(tcs);
|
|
return tcs;
|
|
}
|
|
|
|
/*
|
|
* Remove first fiber from a wait queue and put it on the ready queue.
|
|
* Abort and return NULL if the wait queue is empty.
|
|
*/
|
|
static inline struct tcs *_nano_wait_q_remove(struct _nano_queue *wait_q)
|
|
{
|
|
return wait_q->head ? _nano_wait_q_remove_no_check(wait_q) : NULL;
|
|
}
|
|
|
|
/* put current fiber on specified wait queue */
|
|
static inline void _nano_wait_q_put(struct _nano_queue *wait_q)
|
|
{
|
|
((struct tcs *)wait_q->tail)->link = _nanokernel.current;
|
|
wait_q->tail = _nanokernel.current;
|
|
}
|
|
|
|
#if defined(CONFIG_NANO_TIMEOUTS)
|
|
static inline void _nano_timeout_remove_tcs_from_wait_q(
|
|
struct tcs *tcs, struct _nano_queue *wait_q)
|
|
{
|
|
if (wait_q->head == tcs) {
|
|
if (wait_q->tail == wait_q->head) {
|
|
_nano_wait_q_reset(wait_q);
|
|
} else {
|
|
wait_q->head = tcs->link;
|
|
}
|
|
} else {
|
|
struct tcs *prev = wait_q->head;
|
|
|
|
while (prev->link != tcs) {
|
|
prev = prev->link;
|
|
}
|
|
prev->link = tcs->link;
|
|
if (wait_q->tail == tcs) {
|
|
wait_q->tail = prev;
|
|
}
|
|
}
|
|
|
|
tcs->nano_timeout.wait_q = NULL;
|
|
}
|
|
#include <timeout_q.h>
|
|
|
|
#define _NANO_TIMEOUT_TICK_GET() sys_tick_get()
|
|
|
|
#define _NANO_TIMEOUT_ADD(pq, ticks) \
|
|
do { \
|
|
if ((ticks) != TICKS_UNLIMITED) { \
|
|
_nano_timeout_add(_nanokernel.current, (pq), (ticks)); \
|
|
} \
|
|
} while (0)
|
|
#define _NANO_TIMEOUT_SET_TASK_TIMEOUT(ticks) \
|
|
_nanokernel.task_timeout = (ticks)
|
|
|
|
#define _NANO_TIMEOUT_UPDATE(timeout, limit, cur_ticks) \
|
|
do { \
|
|
if ((timeout) != TICKS_UNLIMITED) { \
|
|
(timeout) = (int32_t)((limit) - (cur_ticks)); \
|
|
} \
|
|
} while (0)
|
|
|
|
#elif defined(CONFIG_NANO_TIMERS)
|
|
#include <timeout_q.h>
|
|
#define _nano_timeout_tcs_init(tcs) do { } while ((0))
|
|
#define _nano_timeout_abort(tcs) do { } while ((0))
|
|
|
|
#define _NANO_TIMEOUT_TICK_GET() 0
|
|
#define _NANO_TIMEOUT_ADD(pq, ticks) do { } while (0)
|
|
#define _NANO_TIMEOUT_SET_TASK_TIMEOUT(ticks) do { } while ((0))
|
|
#define _NANO_TIMEOUT_UPDATE(timeout, limit, cur_ticks) do { } while (0)
|
|
#else
|
|
#define _nano_timeout_tcs_init(tcs) do { } while ((0))
|
|
#define _nano_timeout_abort(tcs) do { } while ((0))
|
|
#define _nano_get_earliest_timeouts_deadline() ((uint32_t)TICKS_UNLIMITED)
|
|
|
|
#define _NANO_TIMEOUT_TICK_GET() 0
|
|
#define _NANO_TIMEOUT_ADD(pq, ticks) do { } while (0)
|
|
#define _NANO_TIMEOUT_SET_TASK_TIMEOUT(ticks) do { } while ((0))
|
|
#define _NANO_TIMEOUT_UPDATE(timeout, limit, cur_ticks) do { } while (0)
|
|
#endif
|
|
|
|
#ifdef CONFIG_MICROKERNEL
|
|
extern void _task_nano_pend_task(struct _nano_queue *, int32_t);
|
|
extern uint32_t task_priority_get(void);
|
|
|
|
#define _NANO_OBJECT_WAIT(queue, data, timeout, key) \
|
|
do { \
|
|
if (_IS_IDLE_TASK()) { \
|
|
_NANO_TIMEOUT_SET_TASK_TIMEOUT(timeout); \
|
|
nano_cpu_atomic_idle(key); \
|
|
key = irq_lock(); \
|
|
} else { \
|
|
_task_nano_pend_task(queue, timeout); \
|
|
} \
|
|
} while (0)
|
|
|
|
#else
|
|
#define _NANO_OBJECT_WAIT(queue, data, timeout, key) \
|
|
do { \
|
|
_NANO_TIMEOUT_SET_TASK_TIMEOUT(timeout); \
|
|
nano_cpu_atomic_idle(key); \
|
|
key = irq_lock(); \
|
|
} while (0)
|
|
|
|
#endif
|
|
|
|
#ifdef __cplusplus
|
|
}
|
|
#endif
|
|
|
|
#endif /* _kernel_nanokernel_include_wait_q__h_ */
|