kernel: Add arch_coprocessors_disable()
The intent of arch_coprocessors_disable() is to replace arch_float_disable() in halt_thread() for the FPU will not always be the only coprocessor that will need to be disabled. Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
This commit is contained in:
parent
decedcabf6
commit
d397a91c62
@ -276,6 +276,15 @@ int arch_float_enable(struct k_thread *thread, unsigned int options)
|
||||
}
|
||||
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|
||||
|
||||
int arch_coprocessors_disable(struct k_thread *thread)
|
||||
{
|
||||
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
||||
return arch_float_disable(thread);
|
||||
#else
|
||||
return -ENOTSUP;
|
||||
#endif
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_MULTITHREADING)
|
||||
|
||||
K_KERNEL_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS, CONFIG_ISR_STACK_SIZE);
|
||||
|
||||
@ -415,3 +415,12 @@ int arch_float_enable(struct k_thread *thread, unsigned int options)
|
||||
return -ENOTSUP;
|
||||
}
|
||||
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|
||||
|
||||
int arch_coprocessors_disable(struct k_thread *thread)
|
||||
{
|
||||
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
||||
return arch_float_disable(thread);
|
||||
#else
|
||||
return -ENOTSUP;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -462,6 +462,15 @@ int arch_float_enable(struct k_thread *thread, unsigned int options)
|
||||
}
|
||||
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|
||||
|
||||
int arch_coprocessors_disable(struct k_thread *thread)
|
||||
{
|
||||
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
||||
return arch_float_disable(thread);
|
||||
#else
|
||||
return -ENOTSUP;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Internal function for Cortex-M initialization,
|
||||
* applicable to either case of running Zephyr
|
||||
* with or without multi-threading support.
|
||||
|
||||
@ -199,3 +199,12 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
#endif
|
||||
|
||||
int arch_coprocessors_disable(struct k_thread *thread)
|
||||
{
|
||||
#if defined(CONFIG_FPU_SHARING)
|
||||
return arch_float_disable(thread);
|
||||
#else
|
||||
return -ENOTSUP;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -39,3 +39,8 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
|
||||
thread->callee_saved.sp = (unsigned long)stack_init;
|
||||
}
|
||||
|
||||
int arch_coprocessors_disable(struct k_thread *thread)
|
||||
{
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
@ -115,6 +115,12 @@ int arch_float_enable(struct k_thread *thread, unsigned int options)
|
||||
}
|
||||
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|
||||
|
||||
int arch_coprocessors_disable(struct k_thread *thread)
|
||||
{
|
||||
/* Posix does not support coprocessors */
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_ARCH_HAS_THREAD_ABORT)
|
||||
void z_impl_k_thread_abort(k_tid_t thread)
|
||||
{
|
||||
|
||||
@ -255,3 +255,12 @@ FUNC_NORETURN void z_riscv_switch_to_main_no_multithreading(k_thread_entry_t mai
|
||||
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
|
||||
}
|
||||
#endif /* !CONFIG_MULTITHREADING */
|
||||
|
||||
int arch_coprocessors_disable(struct k_thread *thread)
|
||||
{
|
||||
#ifdef CONFIG_FPU_SHARING
|
||||
return arch_float_disable(thread);
|
||||
#else
|
||||
return -ENOTSUP;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -51,3 +51,8 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, char *sta
|
||||
|
||||
thread->switch_handle = (void *)iframe;
|
||||
}
|
||||
|
||||
int arch_coprocessors_disable(struct k_thread *thread)
|
||||
{
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
@ -77,3 +77,8 @@ int arch_float_enable(struct k_thread *thread, unsigned int options)
|
||||
return -ENOTSUP;
|
||||
}
|
||||
#endif /* CONFIG_FPU_SHARING */
|
||||
|
||||
int arch_coprocessors_disable(struct k_thread *thread)
|
||||
{
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
@ -72,6 +72,15 @@ int arch_float_enable(struct k_thread *thread, unsigned int options)
|
||||
}
|
||||
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|
||||
|
||||
int arch_coprocessors_disable(struct k_thread *thread)
|
||||
{
|
||||
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
||||
return arch_float_disable(thread);
|
||||
#else
|
||||
return -ENOTSUP;
|
||||
#endif
|
||||
}
|
||||
|
||||
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
char *stack_ptr, k_thread_entry_t entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
|
||||
@ -83,3 +83,11 @@ int arch_float_enable(struct k_thread *thread, unsigned int options)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arch_coprocessors_disable(struct k_thread *thread)
|
||||
{
|
||||
/* x86-64 always has FP/SSE enabled so cannot be disabled */
|
||||
ARG_UNUSED(thread);
|
||||
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
@ -145,6 +145,51 @@ int arch_float_enable(struct k_thread *thread, unsigned int options)
|
||||
}
|
||||
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|
||||
|
||||
|
||||
#if defined(CONFIG_XTENSA_LAZY_HIFI_SHARING)
|
||||
void xtensa_hifi_disown(struct k_thread *thread)
|
||||
{
|
||||
unsigned int cpu_id = 0;
|
||||
struct k_thread *owner;
|
||||
|
||||
#if CONFIG_MP_MAX_NUM_CPUS > 1
|
||||
cpu_id = thread->base.cpu;
|
||||
#endif
|
||||
|
||||
owner = atomic_ptr_get(&_kernel.cpus[cpu_id].arch.hifi_owner);
|
||||
|
||||
if (owner == thread) {
|
||||
atomic_ptr_set(&_kernel.cpus[cpu_id].arch.hifi_owner, NULL);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
int arch_coprocessors_disable(struct k_thread *thread)
|
||||
{
|
||||
bool enotsup = true;
|
||||
|
||||
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
||||
arch_float_disable(thread);
|
||||
enotsup = false;
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_XTENSA_LAZY_HIFI_SHARING)
|
||||
xtensa_hifi_disown(thread);
|
||||
|
||||
/*
|
||||
* This routine is only called when aborting a thread and we
|
||||
* deliberately do not disable the HiFi coprocessor here.
|
||||
* 1. Such disabling can only be done for the current CPU, and we do
|
||||
* not have control over which CPU the thread is running on.
|
||||
* 2. If the thread (being deleted) is a currently executing thread,
|
||||
* there will be a context switch to another thread and that CPU
|
||||
* will automatically disable the HiFi coprocessor upon the switch.
|
||||
*/
|
||||
enotsup = false;
|
||||
#endif
|
||||
return enotsup ? -ENOTSUP : 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
|
||||
@ -212,6 +212,14 @@ int arch_float_disable(struct k_thread *thread);
|
||||
int arch_float_enable(struct k_thread *thread, unsigned int options);
|
||||
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|
||||
|
||||
/**
|
||||
* @brief Disable coprocessor context preservation
|
||||
*
|
||||
* This function serves as a catchall for disabling the preservation of
|
||||
* coprocessor context information when aborting a thread.
|
||||
*/
|
||||
int arch_coprocessors_disable(struct k_thread *thread);
|
||||
|
||||
#if defined(CONFIG_USERSPACE) && defined(CONFIG_ARCH_HAS_THREAD_PRIV_STACK_SPACE_GET)
|
||||
/**
|
||||
* @brief Obtain privileged stack usage information for the specified thread
|
||||
|
||||
@ -1281,9 +1281,7 @@ static ALWAYS_INLINE void halt_thread(struct k_thread *thread, uint8_t new_state
|
||||
return;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
||||
arch_float_disable(thread);
|
||||
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|
||||
arch_coprocessors_disable(thread);
|
||||
|
||||
SYS_PORT_TRACING_FUNC(k_thread, sched_abort, thread);
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user