The core kernel computes the initial stack pointer for a thread, properly aligning it and subtracting out any random offsets or thread-local storage areas. arch_new_thread() no longer needs to make any calculations, an initial stack frame may be placed at the bounds of the new 'stack_ptr' parameter passed in. This parameter replaces 'stack_size'. thread->stack_info is now set before arch_new_thread() is invoked, z_new_thread_init() has been removed. The values populated may need to be adjusted on arches which carve-out MPU guard space from the actual stack buffer. thread->stack_info now has a new member 'delta' which indicates any offset applied for TLS or random offset. It's used so the calculations don't need to be repeated if the thread later drops to user mode. CONFIG_INIT_STACKS logic is now performed inside z_setup_new_thread(), before arch_new_thread() is called. thread->stack_info is now defined as the canonical user-accessible area within the stack object, including random offsets and TLS. It will never include any carved-out memory for MPU guards and must be updated at runtime if guards are removed. Available stack space is now optimized. Some arches may need to significantly round up the buffer size to account for page-level granularity or MPU power-of-two requirements. This space is now accounted for and used by virtue of the Z_THREAD_STACK_SIZE_ADJUST() call in z_setup_new_thread. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
136 lines
3.5 KiB
C
136 lines
3.5 KiB
C
/*
|
|
* Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
|
|
#include <kernel.h>
|
|
#include <ksched.h>
|
|
|
|
void z_thread_entry_wrapper(k_thread_entry_t thread,
|
|
void *arg1,
|
|
void *arg2,
|
|
void *arg3);
|
|
|
|
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|
char *stack_ptr, k_thread_entry_t entry,
|
|
void *p1, void *p2, void *p3)
|
|
{
|
|
struct __esf *stack_init;
|
|
|
|
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
|
|
const struct soc_esf soc_esf_init = {SOC_ESF_INIT};
|
|
#endif
|
|
|
|
/* Initial stack frame for thread */
|
|
stack_init = Z_STACK_PTR_TO_FRAME(struct __esf, stack_ptr);
|
|
|
|
/* Setup the initial stack frame */
|
|
stack_init->a0 = (ulong_t)entry;
|
|
stack_init->a1 = (ulong_t)p1;
|
|
stack_init->a2 = (ulong_t)p2;
|
|
stack_init->a3 = (ulong_t)p3;
|
|
/*
|
|
* Following the RISC-V architecture,
|
|
* the MSTATUS register (used to globally enable/disable interrupt),
|
|
* as well as the MEPC register (used to by the core to save the
|
|
* value of the program counter at which an interrupt/exception occcurs)
|
|
* need to be saved on the stack, upon an interrupt/exception
|
|
* and restored prior to returning from the interrupt/exception.
|
|
* This shall allow to handle nested interrupts.
|
|
*
|
|
* Given that context switching is performed via a system call exception
|
|
* within the RISCV architecture implementation, initially set:
|
|
* 1) MSTATUS to MSTATUS_DEF_RESTORE in the thread stack to enable
|
|
* interrupts when the newly created thread will be scheduled;
|
|
* 2) MEPC to the address of the z_thread_entry_wrapper in the thread
|
|
* stack.
|
|
* Hence, when going out of an interrupt/exception/context-switch,
|
|
* after scheduling the newly created thread:
|
|
* 1) interrupts will be enabled, as the MSTATUS register will be
|
|
* restored following the MSTATUS value set within the thread stack;
|
|
* 2) the core will jump to z_thread_entry_wrapper, as the program
|
|
* counter will be restored following the MEPC value set within the
|
|
* thread stack.
|
|
*/
|
|
stack_init->mstatus = MSTATUS_DEF_RESTORE;
|
|
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
|
if ((thread->base.user_options & K_FP_REGS) != 0) {
|
|
stack_init->mstatus |= MSTATUS_FS_INIT;
|
|
}
|
|
stack_init->fp_state = 0;
|
|
#endif
|
|
stack_init->mepc = (ulong_t)z_thread_entry_wrapper;
|
|
|
|
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
|
|
stack_init->soc_context = soc_esf_init;
|
|
#endif
|
|
|
|
thread->callee_saved.sp = (ulong_t)stack_init;
|
|
}
|
|
|
|
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
|
int arch_float_disable(struct k_thread *thread)
|
|
{
|
|
unsigned int key;
|
|
|
|
if (thread != _current) {
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (arch_is_in_isr()) {
|
|
return -EINVAL;
|
|
}
|
|
|
|
/* Ensure a preemptive context switch does not occur */
|
|
key = irq_lock();
|
|
|
|
/* Disable all floating point capabilities for the thread */
|
|
thread->base.user_options &= ~K_FP_REGS;
|
|
|
|
/* Clear the FS bits to disable the FPU. */
|
|
__asm__ volatile (
|
|
"mv t0, %0\n"
|
|
"csrrc x0, mstatus, t0\n"
|
|
:
|
|
: "r" (MSTATUS_FS_MASK)
|
|
);
|
|
|
|
irq_unlock(key);
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
int arch_float_enable(struct k_thread *thread)
|
|
{
|
|
unsigned int key;
|
|
|
|
if (thread != _current) {
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (arch_is_in_isr()) {
|
|
return -EINVAL;
|
|
}
|
|
|
|
/* Ensure a preemptive context switch does not occur */
|
|
key = irq_lock();
|
|
|
|
/* Enable all floating point capabilities for the thread. */
|
|
thread->base.user_options |= K_FP_REGS;
|
|
|
|
/* Set the FS bits to Initial to enable the FPU. */
|
|
__asm__ volatile (
|
|
"mv t0, %0\n"
|
|
"csrrs x0, mstatus, t0\n"
|
|
:
|
|
: "r" (MSTATUS_FS_INIT)
|
|
);
|
|
|
|
irq_unlock(key);
|
|
|
|
return 0;
|
|
}
|
|
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|