The core kernel computes the initial stack pointer for a thread, properly aligning it and subtracting out any random offsets or thread-local storage areas. arch_new_thread() no longer needs to make any calculations, an initial stack frame may be placed at the bounds of the new 'stack_ptr' parameter passed in. This parameter replaces 'stack_size'. thread->stack_info is now set before arch_new_thread() is invoked, z_new_thread_init() has been removed. The values populated may need to be adjusted on arches which carve-out MPU guard space from the actual stack buffer. thread->stack_info now has a new member 'delta' which indicates any offset applied for TLS or random offset. It's used so the calculations don't need to be repeated if the thread later drops to user mode. CONFIG_INIT_STACKS logic is now performed inside z_setup_new_thread(), before arch_new_thread() is called. thread->stack_info is now defined as the canonical user-accessible area within the stack object, including random offsets and TLS. It will never include any carved-out memory for MPU guards and must be updated at runtime if guards are removed. Available stack space is now optimized. Some arches may need to significantly round up the buffer size to account for page-level granularity or MPU power-of-two requirements. This space is now accounted for and used by virtue of the Z_THREAD_STACK_SIZE_ADJUST() call in z_setup_new_thread. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
134 lines
3.3 KiB
C
134 lines
3.3 KiB
C
/*
|
|
* Copyright (c) 2010-2012, 2014-2015 Wind River Systems, Inc.
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
|
|
/**
|
|
* @file
|
|
* @brief Architecture-independent private kernel APIs
|
|
*
|
|
* This file contains private kernel APIs that are not architecture-specific.
|
|
*/
|
|
|
|
#ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_
|
|
#define ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_
|
|
|
|
#include <kernel.h>
|
|
#include <kernel_arch_interface.h>
|
|
#include <string.h>
|
|
|
|
#ifndef _ASMLANGUAGE
|
|
|
|
#ifdef __cplusplus
|
|
extern "C" {
|
|
#endif
|
|
|
|
/* Early boot functions */
|
|
|
|
void z_bss_zero(void);
|
|
#ifdef CONFIG_XIP
|
|
void z_data_copy(void);
|
|
#else
|
|
static inline void z_data_copy(void)
|
|
{
|
|
/* Do nothing */
|
|
}
|
|
#endif
|
|
FUNC_NORETURN void z_cstart(void);
|
|
|
|
extern FUNC_NORETURN void z_thread_entry(k_thread_entry_t entry,
|
|
void *p1, void *p2, void *p3);
|
|
|
|
extern void z_setup_new_thread(struct k_thread *new_thread,
|
|
k_thread_stack_t *stack, size_t stack_size,
|
|
k_thread_entry_t entry,
|
|
void *p1, void *p2, void *p3,
|
|
int prio, uint32_t options, const char *name);
|
|
|
|
/**
|
|
* @brief Allocate some memory from the current thread's resource pool
|
|
*
|
|
* Threads may be assigned a resource pool, which will be used to allocate
|
|
* memory on behalf of certain kernel and driver APIs. Memory reserved
|
|
* in this way should be freed with k_free().
|
|
*
|
|
* If called from an ISR, the k_malloc() system heap will be used if it exists.
|
|
*
|
|
* @param size Memory allocation size
|
|
* @return A pointer to the allocated memory, or NULL if there is insufficient
|
|
* RAM in the pool or there is no pool to draw memory from
|
|
*/
|
|
void *z_thread_malloc(size_t size);
|
|
|
|
/* set and clear essential thread flag */
|
|
|
|
extern void z_thread_essential_set(void);
|
|
extern void z_thread_essential_clear(void);
|
|
|
|
/* clean up when a thread is aborted */
|
|
|
|
#if defined(CONFIG_THREAD_MONITOR)
|
|
extern void z_thread_monitor_exit(struct k_thread *thread);
|
|
#else
|
|
#define z_thread_monitor_exit(thread) \
|
|
do {/* nothing */ \
|
|
} while (false)
|
|
#endif /* CONFIG_THREAD_MONITOR */
|
|
|
|
#ifdef CONFIG_USE_SWITCH
|
|
/* This is a arch function traditionally, but when the switch-based
|
|
* z_swap() is in use it's a simple inline provided by the kernel.
|
|
*/
|
|
static ALWAYS_INLINE void
|
|
arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
|
|
{
|
|
thread->swap_retval = value;
|
|
}
|
|
#endif
|
|
|
|
static ALWAYS_INLINE void
|
|
z_thread_return_value_set_with_data(struct k_thread *thread,
|
|
unsigned int value,
|
|
void *data)
|
|
{
|
|
arch_thread_return_value_set(thread, value);
|
|
thread->base.swap_data = data;
|
|
}
|
|
|
|
extern void z_smp_init(void);
|
|
|
|
extern void smp_timer_init(void);
|
|
|
|
extern void z_early_boot_rand_get(uint8_t *buf, size_t length);
|
|
|
|
#if CONFIG_STACK_POINTER_RANDOM
|
|
extern int z_stack_adjust_initialized;
|
|
#endif
|
|
|
|
#ifdef CONFIG_BOOT_TIME_MEASUREMENT
|
|
extern uint32_t z_timestamp_main; /* timestamp when main task starts */
|
|
extern uint32_t z_timestamp_idle; /* timestamp when CPU goes idle */
|
|
#endif
|
|
|
|
extern struct k_thread z_main_thread;
|
|
|
|
|
|
#ifdef CONFIG_MULTITHREADING
|
|
extern struct k_thread z_idle_threads[CONFIG_MP_NUM_CPUS];
|
|
#endif
|
|
extern K_THREAD_STACK_ARRAY_DEFINE(z_interrupt_stacks, CONFIG_MP_NUM_CPUS,
|
|
CONFIG_ISR_STACK_SIZE);
|
|
|
|
#ifdef CONFIG_GEN_PRIV_STACKS
|
|
extern uint8_t *z_priv_stack_find(k_thread_stack_t *stack);
|
|
#endif
|
|
|
|
#ifdef __cplusplus
|
|
}
|
|
#endif
|
|
|
|
#endif /* _ASMLANGUAGE */
|
|
|
|
#endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_ */
|