zephyr/kernel/unified/include/timeout_q.h
Benjamin Walsh 456c6daa9f unified: initial unified kernel implementation
Summary of what this includes:

    initialization:

    Copy from nano_init.c, with the following changes:

    - the main thread is the continuation of the init thread, but an idle
      thread is created as well

    - _main() initializes threads in groups and starts the EXE group

    - the ready queues are initialized

    - the main thread is marked as non-essential once the system init is
      done

    - a weak main() symbol is provided if the application does not provide a
      main() function

    scheduler:

    Not an exhaustive list, but basically provide primitives for:

    - adding/removing a thread to/from a wait queue
    - adding/removing a thread to/from the ready queue
    - marking thread as ready
    - locking/unlocking the scheduler
      - instead of locking interrupts
    - getting/setting thread priority
      - checking what state (coop/preempt) a thread is currenlty running in
    - rescheduling threads
    - finding what thread is the next to run
    - yielding/sleeping/aborting sleep
    - finding the current thread

    threads:

    - Add operationns on threads, such as creating and starting them.

    standardized handling of kernel object return codes:

    - Kernel objects now cause _Swap() to return the following values:
         0      => operation successful
        -EAGAIN => operation timed out
        -Exxxxx => operation failed for another reason

    - The thread's swap_data field can be used to return any additional
    information required to complete the operation, such as the actual
    result of a successful operation.

    timeouts:

    - same as nano timeouts, renamed to simply 'timeouts'
    - the kernel is still tick-based, but objects take timeout values in
      ms for forward compatibility with a tickless kernel.

    semaphores:

      - Port of the nanokernel semaphores, which have the same basic behaviour
      as the microkernel ones. Semaphore groups are not yet implemented.

      - These semaphores are enhanced in that they accept an initial count and a
      count limit. This allows configuring them as binary semaphores, and also
      provisioning them without having to "give" the semaphore multiple times
      before using them.

    mutexes:

    - Straight port of the microkernel mutexes. An init function is added to
    allow defining them at runtime.

    pipes:

    - straight port

    timers:

    - amalgamation of nano and micro timers, with all functionalities
      intact.

    events:

    - re-implementation, using semaphores and workqueues.

    mailboxes:

    - straight port

    message queues:

    - straight port of  microkernel FIFOs

    memory maps:

    - straight port

    workqueues:

    - Basically, have all APIs follow the k_ naming rule, and use the _timeout
    subsystem from the unified kernel directory, and not the _nano_timeout
    one.

    stacks:

    - Port of the nanokernel stacks. They can now have multiple threads
    pending on them and threads can wait with a timeout.

    LIFOs:

    - Straight port of the nanokernel LIFOs.

    FIFOs:

    - Straight port of the nanokernel FIFOs.

Work by: Dmitriy Korovkin <dmitriy.korovkin@windriver.com>
         Peter Mitsis <peter.mitsis@windriver.com>
         Allan Stephens <allan.stephens@windriver.com>
         Benjamin Walsh <benjamin.walsh@windriver.com>

Change-Id: Id3cadb3694484ab2ca467889cfb029be3cd3a7d6
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-09-13 17:12:55 -04:00

296 lines
7.7 KiB
C

/** @file
* @brief timeout queue for fibers on nanokernel objects
*
* This file is meant to be included by nanokernel/include/wait_q.h only
*/
/*
* Copyright (c) 2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _kernel_nanokernel_include_timeout_q__h_
#define _kernel_nanokernel_include_timeout_q__h_
#include <misc/dlist.h>
#ifdef __cplusplus
extern "C" {
#endif
static inline int _do_timeout_abort(struct _timeout *t);
static inline void _do_timeout_add(struct tcs *tcs,
struct _timeout *t,
_wait_q_t *wait_q,
int32_t timeout);
#if defined(CONFIG_NANO_TIMEOUTS)
/* initialize the nano timeouts part of TCS when enabled in the kernel */
static inline void _timeout_init(struct _timeout *t, _timeout_func_t func)
{
/*
* Must be initialized here and when dequeueing a timeout so that code
* not dealing with timeouts does not have to handle this, such as when
* waiting forever on a semaphore.
*/
t->delta_ticks_from_prev = -1;
/*
* Must be initialized here so that the _fiber_wakeup family of APIs can
* verify the fiber is not on a wait queue before aborting a timeout.
*/
t->wait_q = NULL;
/*
* Must be initialized here, so the _timeout_handle_one_timeout()
* routine can check if there is a fiber waiting on this timeout
*/
t->tcs = NULL;
/*
* Function must be initialized before being potentially called.
*/
t->func = func;
/*
* These are initialized when enqueing on the timeout queue:
*
* tcs->timeout.node.next
* tcs->timeout.node.prev
*/
}
static inline void _timeout_tcs_init(struct tcs *tcs)
{
_timeout_init(&tcs->timeout, NULL);
}
/*
* XXX - backwards compatibility until the arch part is updated to call
* _timeout_tcs_init()
*/
static inline void _nano_timeout_tcs_init(struct tcs *tcs)
{
_timeout_tcs_init(tcs);
}
/**
* @brief Remove the thread from nanokernel object wait queue
*
* If a thread waits on a nanokernel object with timeout,
* remove the thread from the wait queue
*
* @param tcs Waiting thread
* @param t nano timer
*
* @return N/A
*/
static inline void _timeout_object_dequeue(struct tcs *tcs, struct _timeout *t)
{
if (t->wait_q) {
_timeout_remove_tcs_from_wait_q(tcs);
}
}
/* abort a timeout for a specified fiber */
static inline int _timeout_abort(struct tcs *tcs)
{
return _do_timeout_abort(&tcs->timeout);
}
/* put a fiber on the timeout queue and record its wait queue */
static inline void _timeout_add(struct tcs *tcs, _wait_q_t *wait_q,
int32_t timeout)
{
_do_timeout_add(tcs, &tcs->timeout, wait_q, timeout);
}
#else
#define _timeout_object_dequeue(tcs, t) do { } while (0)
#endif /* CONFIG_NANO_TIMEOUTS */
/*
* Handle one expired timeout.
* This removes the fiber from the timeout queue head, and also removes it
* from the wait queue it is on if waiting for an object. In that case, it
* also sets the return value to 0/NULL.
*/
/* must be called with interrupts locked */
static inline struct _timeout *_timeout_handle_one_timeout(
sys_dlist_t *timeout_q)
{
struct _timeout *t = (void *)sys_dlist_get(timeout_q);
struct tcs *tcs = t->tcs;
K_DEBUG("timeout %p\n", t);
if (tcs != NULL) {
_timeout_object_dequeue(tcs, t);
_ready_thread(tcs);
} else if (t->func) {
t->func(t);
}
/*
* Note: t->func() may add timeout again. Make sure that
* delta_ticks_from_prev is set to -1 only if timeout is
* still expired (delta_ticks_from_prev == 0)
*/
if (t->delta_ticks_from_prev == 0) {
t->delta_ticks_from_prev = -1;
}
return (struct _timeout *)sys_dlist_peek_head(timeout_q);
}
/* loop over all expired timeouts and handle them one by one */
/* must be called with interrupts locked */
static inline void _timeout_handle_timeouts(void)
{
sys_dlist_t *timeout_q = &_nanokernel.timeout_q;
struct _timeout *next;
next = (struct _timeout *)sys_dlist_peek_head(timeout_q);
while (next && next->delta_ticks_from_prev == 0) {
next = _timeout_handle_one_timeout(timeout_q);
}
}
/**
*
* @brief abort a timeout
*
* @param t Timeout to abort
*
* @return 0 in success and -1 if the timer has expired
*/
static inline int _do_timeout_abort(struct _timeout *t)
{
sys_dlist_t *timeout_q = &_nanokernel.timeout_q;
if (-1 == t->delta_ticks_from_prev) {
return -1;
}
if (!sys_dlist_is_tail(timeout_q, &t->node)) {
struct _timeout *next =
(struct _timeout *)sys_dlist_peek_next(timeout_q,
&t->node);
next->delta_ticks_from_prev += t->delta_ticks_from_prev;
}
sys_dlist_remove(&t->node);
t->delta_ticks_from_prev = -1;
return 0;
}
static inline int _nano_timer_timeout_abort(struct _timeout *t)
{
return _do_timeout_abort(t);
}
/*
* callback for sys_dlist_insert_at():
*
* Returns 1 if the timeout to insert is lower or equal than the next timeout
* in the queue, signifying that it should be inserted before the next.
* Returns 0 if it is greater.
*
* If it is greater, the timeout to insert is decremented by the next timeout,
* since the timeout queue is a delta queue. If it lower or equal, decrement
* the timeout of the insert point to update its delta queue value, since the
* current timeout will be inserted before it.
*/
static int _timeout_insert_point_test(sys_dnode_t *test, void *timeout)
{
struct _timeout *t = (void *)test;
int32_t *timeout_to_insert = timeout;
if (*timeout_to_insert > t->delta_ticks_from_prev) {
*timeout_to_insert -= t->delta_ticks_from_prev;
return 0;
}
t->delta_ticks_from_prev -= *timeout_to_insert;
return 1;
}
/**
*
* @brief Put timeout on the timeout queue, record waiting fiber and wait queue
*
* @param tcs Fiber waiting on a timeout
* @param t Timeout structure to be added to the nanokernel queue
* @wait_q nanokernel object wait queue
* @timeout Timeout in ticks
*
* @return N/A
*/
static inline void _do_timeout_add(struct tcs *tcs, struct _timeout *t,
_wait_q_t *wait_q, int32_t timeout)
{
K_DEBUG("thread %p on wait_q %p, for timeout: %d\n",
tcs, wait_q, timeout);
sys_dlist_t *timeout_q = &_nanokernel.timeout_q;
K_DEBUG("timeout_q %p before: head: %p, tail: %p\n",
&_nanokernel.timeout_q,
sys_dlist_peek_head(&_nanokernel.timeout_q),
_nanokernel.timeout_q.tail);
K_DEBUG("timeout %p before: next: %p, prev: %p\n",
t, t->node.next, t->node.prev);
t->tcs = tcs;
t->delta_ticks_from_prev = timeout;
t->wait_q = (sys_dlist_t *)wait_q;
sys_dlist_insert_at(timeout_q, (void *)t,
_timeout_insert_point_test,
&t->delta_ticks_from_prev);
K_DEBUG("timeout_q %p after: head: %p, tail: %p\n",
&_nanokernel.timeout_q,
sys_dlist_peek_head(&_nanokernel.timeout_q),
_nanokernel.timeout_q.tail);
K_DEBUG("timeout %p after: next: %p, prev: %p\n",
t, t->node.next, t->node.prev);
}
static inline void _nano_timer_timeout_add(struct _timeout *t,
_wait_q_t *wait_q,
int32_t timeout)
{
_do_timeout_add(NULL, t, wait_q, timeout);
}
/* find the closest deadline in the timeout queue */
static inline uint32_t _nano_get_earliest_timeouts_deadline(void)
{
sys_dlist_t *q = &_nanokernel.timeout_q;
struct _timeout *t =
(struct _timeout *)sys_dlist_peek_head(q);
return t ? min((uint32_t)t->delta_ticks_from_prev,
(uint32_t)_nanokernel.task_timeout)
: (uint32_t)_nanokernel.task_timeout;
}
#ifdef __cplusplus
}
#endif
#endif /* _kernel_nanokernel_include_timeout_q__h_ */