zephyr/arch/x86/include/kernel_arch_func.h
Andrew Boie 80a0d9d16b kernel: interrupt/idle stacks/threads as array
The set of interrupt stacks is now expressed as an array. We
also define the idle threads and their associated stacks this
way. This allows for iteration in cases where we have multiple
CPUs.

There is now a centralized declaration in kernel_internal.h.

On uniprocessor systems, z_interrupt_stacks has one element
and can be used in the same way as _interrupt_stack.

The IRQ stack for CPU 0 is now set in init.c instead of in
arch code.

The extern definition of the main thread stack is now removed,
this doesn't need to be in a header.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2020-03-16 23:17:36 +02:00

121 lines
3.4 KiB
C

/*
* Copyright (c) 2019 Intel Corporation
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_ARCH_X86_INCLUDE_KERNEL_ARCH_FUNC_H_
#define ZEPHYR_ARCH_X86_INCLUDE_KERNEL_ARCH_FUNC_H_
#include <kernel_arch_data.h>
#include <arch/x86/mmustructs.h>
#ifdef CONFIG_X86_64
#include <intel64/kernel_arch_func.h>
#else
#include <ia32/kernel_arch_func.h>
#endif
#ifndef _ASMLANGUAGE
static inline bool arch_is_in_isr(void)
{
#ifdef CONFIG_SMP
/* On SMP, there is a race vs. the current CPU changing if we
* are preempted. Need to mask interrupts while inspecting
* (note deliberate lack of gcc size suffix on the
* instructions, we need to work with both architectures here)
*/
bool ret;
__asm__ volatile ("pushf; cli");
ret = arch_curr_cpu()->nested != 0;
__asm__ volatile ("popf");
return ret;
#else
return _kernel.nested != 0U;
#endif
}
/* stack alignment related macros: STACK_ALIGN is defined in arch.h */
#define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN)
#define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN)
struct multiboot_info;
extern FUNC_NORETURN void z_x86_prep_c(void *arg);
#ifdef CONFIG_X86_VERY_EARLY_CONSOLE
/* Setup ultra-minimal serial driver for printk() */
void z_x86_early_serial_init(void);
#endif /* CONFIG_X86_VERY_EARLY_CONSOLE */
#ifdef CONFIG_X86_MMU
/* Create all page tables with boot configuration and enable paging */
void z_x86_paging_init(void);
static inline struct x86_page_tables *
z_x86_thread_page_tables_get(struct k_thread *thread)
{
#ifdef CONFIG_USERSPACE
return thread->arch.ptables;
#else
return &z_x86_kernel_ptables;
#endif
}
#endif /* CONFIG_X86_MMU */
/* Called upon CPU exception that is unhandled and hence fatal; dump
* interesting info and call z_x86_fatal_error()
*/
FUNC_NORETURN void z_x86_unhandled_cpu_exception(uintptr_t vector,
const z_arch_esf_t *esf);
/* Called upon unrecoverable error; dump registers and transfer control to
* kernel via z_fatal_error()
*/
FUNC_NORETURN void z_x86_fatal_error(unsigned int reason,
const z_arch_esf_t *esf);
/* Common handling for page fault exceptions */
void z_x86_page_fault_handler(z_arch_esf_t *esf);
#ifdef CONFIG_THREAD_STACK_INFO
/**
* @brief Check if a memory address range falls within the stack
*
* Given a memory address range, ensure that it falls within the bounds
* of the faulting context's stack.
*
* @param addr Starting address
* @param size Size of the region, or 0 if we just want to see if addr is
* in bounds
* @param cs Code segment of faulting context
* @return true if addr/size region is not within the thread stack
*/
bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, u16_t cs);
#endif /* CONFIG_THREAD_STACK_INFO */
#ifdef CONFIG_USERSPACE
extern FUNC_NORETURN void z_x86_userspace_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3,
uintptr_t stack_end,
uintptr_t stack_start);
/* Preparation steps needed for all threads if user mode is turned on.
*
* Returns the initial entry point to swap into.
*/
void *z_x86_userspace_prepare_thread(struct k_thread *thread);
void z_x86_thread_pt_init(struct k_thread *thread);
void z_x86_apply_mem_domain(struct x86_page_tables *ptables,
struct k_mem_domain *mem_domain);
#endif /* CONFIG_USERSPACE */
void z_x86_do_kernel_oops(const z_arch_esf_t *esf);
#endif /* !_ASMLANGUAGE */
#endif /* ZEPHYR_ARCH_X86_INCLUDE_KERNEL_ARCH_FUNC_H_ */