Summary of what this includes:
initialization:
Copy from nano_init.c, with the following changes:
- the main thread is the continuation of the init thread, but an idle
thread is created as well
- _main() initializes threads in groups and starts the EXE group
- the ready queues are initialized
- the main thread is marked as non-essential once the system init is
done
- a weak main() symbol is provided if the application does not provide a
main() function
scheduler:
Not an exhaustive list, but basically provide primitives for:
- adding/removing a thread to/from a wait queue
- adding/removing a thread to/from the ready queue
- marking thread as ready
- locking/unlocking the scheduler
- instead of locking interrupts
- getting/setting thread priority
- checking what state (coop/preempt) a thread is currenlty running in
- rescheduling threads
- finding what thread is the next to run
- yielding/sleeping/aborting sleep
- finding the current thread
threads:
- Add operationns on threads, such as creating and starting them.
standardized handling of kernel object return codes:
- Kernel objects now cause _Swap() to return the following values:
0 => operation successful
-EAGAIN => operation timed out
-Exxxxx => operation failed for another reason
- The thread's swap_data field can be used to return any additional
information required to complete the operation, such as the actual
result of a successful operation.
timeouts:
- same as nano timeouts, renamed to simply 'timeouts'
- the kernel is still tick-based, but objects take timeout values in
ms for forward compatibility with a tickless kernel.
semaphores:
- Port of the nanokernel semaphores, which have the same basic behaviour
as the microkernel ones. Semaphore groups are not yet implemented.
- These semaphores are enhanced in that they accept an initial count and a
count limit. This allows configuring them as binary semaphores, and also
provisioning them without having to "give" the semaphore multiple times
before using them.
mutexes:
- Straight port of the microkernel mutexes. An init function is added to
allow defining them at runtime.
pipes:
- straight port
timers:
- amalgamation of nano and micro timers, with all functionalities
intact.
events:
- re-implementation, using semaphores and workqueues.
mailboxes:
- straight port
message queues:
- straight port of microkernel FIFOs
memory maps:
- straight port
workqueues:
- Basically, have all APIs follow the k_ naming rule, and use the _timeout
subsystem from the unified kernel directory, and not the _nano_timeout
one.
stacks:
- Port of the nanokernel stacks. They can now have multiple threads
pending on them and threads can wait with a timeout.
LIFOs:
- Straight port of the nanokernel LIFOs.
FIFOs:
- Straight port of the nanokernel FIFOs.
Work by: Dmitriy Korovkin <dmitriy.korovkin@windriver.com>
Peter Mitsis <peter.mitsis@windriver.com>
Allan Stephens <allan.stephens@windriver.com>
Benjamin Walsh <benjamin.walsh@windriver.com>
Change-Id: Id3cadb3694484ab2ca467889cfb029be3cd3a7d6
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
202 lines
5.4 KiB
C
202 lines
5.4 KiB
C
/* system clock support for nanokernel-only systems */
|
|
|
|
/*
|
|
* Copyright (c) 1997-2015 Wind River Systems, Inc.
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
|
|
#include <nano_private.h>
|
|
#include <toolchain.h>
|
|
#include <sections.h>
|
|
#include <wait_q.h>
|
|
#include <drivers/system_timer.h>
|
|
|
|
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
|
int sys_clock_us_per_tick = 1000000 / sys_clock_ticks_per_sec;
|
|
int sys_clock_hw_cycles_per_tick =
|
|
CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / sys_clock_ticks_per_sec;
|
|
#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
|
|
int sys_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
|
|
#endif
|
|
#else
|
|
/* don't initialize to avoid division-by-zero error */
|
|
int sys_clock_us_per_tick;
|
|
int sys_clock_hw_cycles_per_tick;
|
|
#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
|
|
int sys_clock_hw_cycles_per_sec;
|
|
#endif
|
|
#endif
|
|
|
|
/* updated by timer driver for tickless, stays at 1 for non-tickless */
|
|
int32_t _sys_idle_elapsed_ticks = 1;
|
|
|
|
int64_t _sys_clock_tick_count;
|
|
|
|
/**
|
|
*
|
|
* @brief Return the lower part of the current system tick count
|
|
*
|
|
* @return the current system tick count
|
|
*
|
|
*/
|
|
uint32_t sys_tick_get_32(void)
|
|
{
|
|
return (uint32_t)_sys_clock_tick_count;
|
|
}
|
|
|
|
/**
|
|
*
|
|
* @brief Return the current system tick count
|
|
*
|
|
* @return the current system tick count
|
|
*
|
|
*/
|
|
int64_t sys_tick_get(void)
|
|
{
|
|
int64_t tmp_sys_clock_tick_count;
|
|
/*
|
|
* Lock the interrupts when reading _sys_clock_tick_count 64-bit
|
|
* variable. Some architectures (x86) do not handle 64-bit atomically,
|
|
* so we have to lock the timer interrupt that causes change of
|
|
* _sys_clock_tick_count
|
|
*/
|
|
unsigned int imask = irq_lock();
|
|
|
|
tmp_sys_clock_tick_count = _sys_clock_tick_count;
|
|
irq_unlock(imask);
|
|
return tmp_sys_clock_tick_count;
|
|
}
|
|
|
|
/**
|
|
*
|
|
* @brief Return number of ticks since a reference time
|
|
*
|
|
* This function is meant to be used in contained fragments of code. The first
|
|
* call to it in a particular code fragment fills in a reference time variable
|
|
* which then gets passed and updated every time the function is called. From
|
|
* the second call on, the delta between the value passed to it and the current
|
|
* tick count is the return value. Since the first call is meant to only fill in
|
|
* the reference time, its return value should be discarded.
|
|
*
|
|
* Since a code fragment that wants to use sys_tick_delta() passes in its
|
|
* own reference time variable, multiple code fragments can make use of this
|
|
* function concurrently.
|
|
*
|
|
* e.g.
|
|
* uint64_t reftime;
|
|
* (void) sys_tick_delta(&reftime); /# prime it #/
|
|
* [do stuff]
|
|
* x = sys_tick_delta(&reftime); /# how long since priming #/
|
|
* [do more stuff]
|
|
* y = sys_tick_delta(&reftime); /# how long since [do stuff] #/
|
|
*
|
|
* @return tick count since reference time; undefined for first invocation
|
|
*
|
|
* NOTE: We use inline function for both 64-bit and 32-bit functions.
|
|
* Compiler optimizes out 64-bit result handling in 32-bit version.
|
|
*/
|
|
static ALWAYS_INLINE int64_t _nano_tick_delta(int64_t *reftime)
|
|
{
|
|
int64_t delta;
|
|
int64_t saved;
|
|
|
|
/*
|
|
* Lock the interrupts when reading _sys_clock_tick_count 64-bit
|
|
* variable. Some architectures (x86) do not handle 64-bit atomically,
|
|
* so we have to lock the timer interrupt that causes change of
|
|
* _sys_clock_tick_count
|
|
*/
|
|
unsigned int imask = irq_lock();
|
|
|
|
saved = _sys_clock_tick_count;
|
|
irq_unlock(imask);
|
|
delta = saved - (*reftime);
|
|
*reftime = saved;
|
|
|
|
return delta;
|
|
}
|
|
|
|
/**
|
|
*
|
|
* @brief Return number of ticks since a reference time
|
|
*
|
|
* @return tick count since reference time; undefined for first invocation
|
|
*/
|
|
int64_t sys_tick_delta(int64_t *reftime)
|
|
{
|
|
return _nano_tick_delta(reftime);
|
|
}
|
|
|
|
|
|
uint32_t sys_tick_delta_32(int64_t *reftime)
|
|
{
|
|
return (uint32_t)_nano_tick_delta(reftime);
|
|
}
|
|
|
|
/* handle the expired timeouts in the nano timeout queue */
|
|
|
|
#if defined(CONFIG_NANO_TIMEOUTS) || defined(CONFIG_NANO_TIMERS)
|
|
#include <wait_q.h>
|
|
|
|
static inline void handle_expired_timeouts(int32_t ticks)
|
|
{
|
|
struct _timeout *head =
|
|
(struct _timeout *)sys_dlist_peek_head(&_timeout_q);
|
|
|
|
_nanokernel.task_timeout = TICKS_UNLIMITED;
|
|
|
|
K_DEBUG("head: %p, delta: %d\n",
|
|
head, head ? head->delta_ticks_from_prev : -2112);
|
|
|
|
if (head) {
|
|
head->delta_ticks_from_prev -= ticks;
|
|
_timeout_handle_timeouts();
|
|
}
|
|
}
|
|
#else
|
|
#define handle_expired_timeouts(ticks) do { } while ((0))
|
|
#endif
|
|
|
|
/**
|
|
*
|
|
* @brief Announce a tick to the nanokernel
|
|
*
|
|
* This function is only to be called by the system clock timer driver when a
|
|
* tick is to be announced to the nanokernel. It takes care of dequeuing the
|
|
* timers that have expired and wake up the fibers pending on them.
|
|
*
|
|
* @return N/A
|
|
*/
|
|
void _nano_sys_clock_tick_announce(int32_t ticks)
|
|
{
|
|
unsigned int key;
|
|
|
|
K_DEBUG("ticks: %d\n", ticks);
|
|
|
|
key = irq_lock();
|
|
_sys_clock_tick_count += ticks;
|
|
handle_expired_timeouts(ticks);
|
|
irq_unlock(key);
|
|
}
|
|
|
|
/*
|
|
* Get closest nano timeouts/timers deadline expiry, (uint32_t)TICKS_UNLIMITED
|
|
* if none.
|
|
*/
|
|
uint32_t _nano_get_earliest_deadline(void)
|
|
{
|
|
return _nano_get_earliest_timeouts_deadline();
|
|
}
|