arch: Initial support for RX architecture support

This commit add require code for supporting RX architecture
to Zephyr, it include:
- Add require config and CMakelists for RX arch
- Intialization startup code for RX
- Interrupt and exception handling
- Thread creation adn thread context switch
- irq offload using SW interrupt

Signed-off-by: Duy Nguyen <duy.nguyen.xa@renesas.com>
This commit is contained in:
Duy Nguyen 2025-03-31 16:30:38 +07:00 committed by Benjamin Cabé
parent f00608ef56
commit fb7bdf0df4
26 changed files with 1802 additions and 0 deletions

View File

@ -166,6 +166,15 @@ config ARCH_POSIX
help
POSIX (native) architecture
config RX
bool
select ARCH_IS_SET
select ATOMIC_OPERATIONS_C
select USE_SWITCH
select USE_SWITCH_SUPPORTED
help
Renesas RX architecture
config ARCH_IS_SET
bool
help

View File

@ -19,3 +19,5 @@ archs:
path: xtensa
- name: x86
path: x86
- name: rx
path: rx

6
arch/rx/CMakeLists.txt Normal file
View File

@ -0,0 +1,6 @@
# Copyright (c) 2020 KT-Elektronik, Klaucke und Partner GmbH
# Copyright (c) 2024 Renesas Electronics Corporation
# SPDX-License-Identifier: Apache-2.0
add_subdirectory(core)
set_property(GLOBAL PROPERTY PROPERTY_OUTPUT_FORMAT elf32-rx-le) # needed for e.g. objcopy

74
arch/rx/Kconfig Normal file
View File

@ -0,0 +1,74 @@
# Renesas RX architecture configuration options
# Copyright (c) 2020 KT-Elektronik, Klaucke und Partner GmbH
# Copyright (c) 2024 Renesas Electronics Corporation
# SPDX-License-Identifier: Apache-2.0
menu "Renesas RX Options"
depends on RX
config ARCH
string
default "rx"
config CPU_RXV1
bool
help
Set if the processor supports the Renesas RXv1 instruction set.
config CPU_RXV2
bool
help
Set if the processor supports the Renesas RXv2 instruction set.
config CPU_RXV3
bool
help
Set if the processor supports the Renesas RXv3 instruction set.
config HAS_EXCEPT_VECTOR_TABLE
bool
help
Set if the processor has the exception vector table.
config XIP
default y
config NUM_IRQ_PRIO_LEVELS
int "Number of supported interrupt priority levels"
range 1 16
default 16
help
Interrupt priorities available will be 0 to NUM_IRQ_PRIO_LEVELS-1. The
minimum value is 1. The BSP must provide a valid default for proper
operation.
config NUM_IRQS
int
default 256
config GEN_ISR_TABLES
default y
config GEN_SW_ISR_TABLE
default y
config GEN_IRQ_VECTOR_TABLE
default n
config GEN_IRQ_START_VECTOR
default 16
config DYNAMIC_INTERRUPTS
default y
config MAIN_STACK_SIZE
default 1024
config INITIALIZATION_STACK_SIZE
int "Initialization stack size (in bytes)"
default 512
help
Stack size for initialization process of kernel (in bytes)
endmenu

View File

@ -0,0 +1,16 @@
# SPDX-License-Identifier: Apache-2.0
zephyr_library()
zephyr_library_sources(
switch.S
cpu_idle.c
prep_c.c
irq_manage.c
reset.S
thread.c
vects.c
isr_exit.S
)
zephyr_library_sources_ifdef(CONFIG_IRQ_OFFLOAD irq_offload.c)

48
arch/rx/core/cpu_idle.c Normal file
View File

@ -0,0 +1,48 @@
/*
* Copyright (c) 2021 KT-Elektronik, Klaucke und Partner GmbH
* Copyright (c) 2024 Renesas Electronics Corporation
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/irq.h>
#include <zephyr/tracing/tracing.h>
void arch_cpu_idle(void)
{
sys_trace_idle();
/* The assembler instruction "wait" switches the processor to sleep mode,
* which stops program execution until an interrupt is triggered.
* All clocks that are not in a stop state continue operating, including
* the system timer.
*
* Also, "wait" sets the PSW I bit, activating
* interrupts (otherwise, the processor would never return from sleep
* mode). This is consistent with the Zephyr API description, according
* to which "In some architectures, before returning, the function
* unmasks interrupts unconditionally." - this is such an architecture.
*/
__asm__ volatile("wait");
}
void arch_cpu_atomic_idle(unsigned int key)
{
sys_trace_idle();
/* The assembler instruction "wait" switches the processor to sleep mode,
* which stops program execution until an interrupt is triggered.
* All clocks that are not in a stop state continue operating, including
* the system timer.
*/
__asm__ volatile("wait");
/* "wait" unconditionally unlocks interrupts. To restore the interrupt
* lockout state before calling arch_cpu_atomic_idle, interrupts have
* to be locked after returning from "wait" if irq_lock would NOT have
* unlocked interrupts (i.e. if the key indicates nested interrupt
* locks)
*/
if (key == 0) {
irq_lock();
}
}

178
arch/rx/core/irq_manage.c Normal file
View File

@ -0,0 +1,178 @@
/*
* Copyright (c) 2021 KT-Elektronik, Klaucke und Partner GmbH
* Copyright (c) 2024 Renesas Electronics Corporation
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/fatal.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/sys/util.h>
#include <zephyr/irq.h>
#define IR_BASE_ADDRESS DT_REG_ADDR_BY_NAME(DT_NODELABEL(icu), IR)
#define IER_BASE_ADDRESS DT_REG_ADDR_BY_NAME(DT_NODELABEL(icu), IER)
#define IPR_BASE_ADDRESS DT_REG_ADDR_BY_NAME(DT_NODELABEL(icu), IPR)
#define NUM_IRQS_PER_REG 8
#define REG_FROM_IRQ(irq) (irq / NUM_IRQS_PER_REG)
#define BIT_FROM_IRQ(irq) (irq % NUM_IRQS_PER_REG)
#define REG(addr) *((uint8_t *)(addr))
/**
* @brief Enable an IRQ by setting the corresponding IEN bit.
*
* Note that this will have no effect for IRQs 0-15 as the
* Renesas rx chip ignores write operations on the corresponding
* Registers
*
* @param irq interrupt to enable (16-255)
*/
void arch_irq_enable(unsigned int irq)
{
__ASSERT(irq < CONFIG_NUM_IRQS, "trying to enable invalid interrupt (%u)", irq);
__ASSERT(irq >= CONFIG_GEN_IRQ_START_VECTOR, "trying to enable reserved interrupt (%u)",
irq);
uint32_t key = irq_lock();
/* reset interrupt before activating */
WRITE_BIT(REG(IR_BASE_ADDRESS + irq), 0, false);
WRITE_BIT(REG(IER_BASE_ADDRESS + REG_FROM_IRQ(irq)), BIT_FROM_IRQ(irq), true);
irq_unlock(key);
}
/**
* @brief Disable an IRQ by clearing the corresponding IEN bit.
*
* Note that this will have no effect for IRQs 0-15 as the
* Renesas rx chip ignores write operations on the corresponding
* Registers.
*
* @param irq interrupt to disable (16-255)
*/
void arch_irq_disable(unsigned int irq)
{
__ASSERT(irq < CONFIG_NUM_IRQS, "trying to disable invalid interrupt (%u)", irq);
__ASSERT(irq >= CONFIG_GEN_IRQ_START_VECTOR, "trying to disable reserved interrupt (%u)",
irq);
uint32_t key = irq_lock();
WRITE_BIT(REG(IER_BASE_ADDRESS + REG_FROM_IRQ(irq)), BIT_FROM_IRQ(irq), false);
irq_unlock(key);
}
/**
* @brief Determine if an IRQ is enabled by reading the corresponding IEN bit.
*
* @param irq interrupt number
*
* @return true if the interrupt is enabled
*/
int arch_irq_is_enabled(unsigned int irq)
{
__ASSERT(irq < CONFIG_NUM_IRQS, "is_enabled on invalid interrupt (%u)", irq);
__ASSERT(irq >= CONFIG_GEN_IRQ_START_VECTOR, "is_enabled on reserved interrupt (%u)", irq);
return (REG(IER_BASE_ADDRESS + REG_FROM_IRQ(irq)) & BIT(BIT_FROM_IRQ(irq))) != 0;
}
/*
* @brief Spurious interrupt handler
*
* Installed in all dynamic interrupt slots at boot time. Throws an error if
* called.
*
* @return N/A
*/
void z_irq_spurious(const void *unused)
{
ARG_UNUSED(unused);
z_fatal_error(K_ERR_SPURIOUS_IRQ, NULL);
}
/*
* @internal
*
* @brief Set an interrupt's priority
*
* Higher values take priority over lower values.
*
* @return N/A
*/
void z_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
{
__ASSERT(irq < CONFIG_NUM_IRQS, "irq_priority_set on invalid interrupt (%u)", irq);
__ASSERT(irq >= CONFIG_GEN_IRQ_START_VECTOR, "irq_priority_set on reserved interrupt (%u)",
irq);
__ASSERT(prio < CONFIG_NUM_IRQ_PRIO_LEVELS, "invalid priority (%u) for interrupt %u", prio,
irq);
uint32_t key = irq_lock();
if (irq >= 34) {
/* for interrupts >= 34, the IPR is regular */
REG(IPR_BASE_ADDRESS + irq) = prio;
} else {
switch (irq) {
/* 0-15: no IPR */
case 16:
/* 17: no IPR */
case 18:
REG(IPR_BASE_ADDRESS) = prio;
break;
/* 19,20: no IPR */
case 21:
REG(IPR_BASE_ADDRESS + 1) = prio;
break;
/* 22: no IPR */
case 23:
REG(IPR_BASE_ADDRESS + 2) = prio;
break;
/* 24,25: no IPR */
case 26:
case 27:
REG(IPR_BASE_ADDRESS + 3) = prio;
break;
case 28:
REG(IPR_BASE_ADDRESS + 4) = prio;
break;
case 29:
REG(IPR_BASE_ADDRESS + 5) = prio;
break;
case 30:
REG(IPR_BASE_ADDRESS + 6) = prio;
break;
case 31:
REG(IPR_BASE_ADDRESS + 7) = prio;
break;
/* 32,33: no IPR */
}
}
irq_unlock(key);
}
#ifdef CONFIG_DYNAMIC_INTERRUPTS
/**
* @brief connect a callback function to an interrupt at runtime
*
* @param irq interrupt number
* @param priority priority of the interrupt
* @param routine routine to call when the interrupt is triggered
* @param parameter parameter to supply to the routine on call
* @param flags flags for the interrupt
*
* @return the interrupt number
*/
int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(const void *parameter), const void *parameter,
uint32_t flags)
{
z_isr_install(irq, routine, parameter);
z_irq_priority_set(irq, priority, flags);
return irq;
}
#endif /* CONFIG_DYNAMIC_INTERRUPTS */

View File

@ -0,0 +1,60 @@
/*
* Copyright (c) 2021 KT-Elektronik, Klaucke und Partner GmbH
* Copyright (c) 2024 Renesas Electronics Corporation
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Software interrupts utility code - Renesas rx architecture implementation.
*
* The code is using the first software interrupt (SWINT) of the RX processor
* should this interrupt ever be used for something else, this has to be
* changed - maybe to the second software interrupt (SWINT2).
*/
#include <zephyr/kernel.h>
#include <zephyr/irq_offload.h>
#include <zephyr/sys/util.h>
#define SWINT1_IRQ_LINE 27
#define SWINT1_PRIO 14
/* Address of the software interrupt trigger register for SWINT1 */
#define SWINT_REGISTER_ADDRESS 0x872E0
#define SWINTR_SWINT *(uint8_t *)(SWINT_REGISTER_ADDRESS)
static irq_offload_routine_t _offload_routine;
static const void *offload_param;
void z_irq_do_offload(void)
{
irq_offload_routine_t tmp;
if (!_offload_routine) {
return;
}
tmp = _offload_routine;
_offload_routine = NULL;
tmp((const void *)offload_param);
}
static void swi0_handler(void)
{
z_irq_do_offload();
}
void arch_irq_offload(irq_offload_routine_t routine, const void *parameter)
{
_offload_routine = routine;
offload_param = parameter;
SWINTR_SWINT = 1;
}
void arch_irq_offload_init(void)
{
IRQ_CONNECT(SWINT1_IRQ_LINE, SWINT1_PRIO, swi0_handler, NULL, 0);
irq_enable(SWINT1_IRQ_LINE);
}

39
arch/rx/core/isr_exit.S Normal file
View File

@ -0,0 +1,39 @@
/*
* Copyright (c) 2024 Renesas Electronics Corporation
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/toolchain.h>
.list
.section .text
GTEXT(_z_rx_irq_exit)
_z_rx_irq_exit:
mov #__kernel, r1 ; Load the base address of _kernel into r1
mov r1, r3 ; Load the base address of _kernel into r1
add #___cpu_t_current_OFFSET, r1 ; Add the offset for the 'current' field to r1
mov [r1], r2 ; Load the value of _kernel.cpus[0].current into r2
push r2 ; Save old_thread to the stack
; Get the next thread to schedule
mov #0,r1 ; Use r1 to pass NULL since we haven't saved the context yet
bsr _z_get_next_switch_handle ; Call the function
; The return value of z_get_next_switch_handle will now be in r1
; Restore old_thread from the stack
pop r2 ; Restore old_thread from the stack
; Check if a switch is necessary
cmp #0, r1
bz no_switch ; If new_thread (in r1) is NULL, jump to no_switch
add #___thread_t_switch_handle_OFFSET, r2
; Call arch_switch to perform the context switch
bsr _z_rx_arch_switch ; r1: new_thread->switch_handle, r2: old_thread->switch_handle
no_switch:
rts

View File

@ -0,0 +1,37 @@
/*
* Copyright (c) 2021 KT-Elektronik, Klaucke und Partner GmbH
* Copyright (c) 2024 Renesas Electronics Corporation
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief RX Kernel structure member offset definition file
*
* This module is responsible for the generation of the absolute symbols whose
* value represents the member offsets for various structures.
*
* All of the absolute symbols defined by this module will be present in the
* final kernel ELF image (due to the linker's reference to the _OffsetAbsSyms
* symbol).
*
* INTERNAL
* It is NOT necessary to define the offset for every member of a structure.
* Typically, only those members that are accessed by assembly language routines
* are defined; however, it doesn't hurt to define all fields for the sake of
* completeness.
*/
#ifndef _RX_OFFSETS_INC_
#define _RX_OFFSETS_INC_
#include <gen_offset.h>
#include <zephyr/kernel.h>
#include <kernel_arch_data.h>
#include <kernel_offsets.h>
GEN_ABSOLUTE_SYM(__callee_saved_t_SIZEOF, sizeof(_callee_saved_t));
GEN_ABSOLUTE_SYM(__thread_arch_t_SIZEOF, sizeof(_thread_arch_t));
GEN_ABS_SYM_END
#endif /* _RX_OFFSETS_INC_ */

41
arch/rx/core/prep_c.c Normal file
View File

@ -0,0 +1,41 @@
/*
* Copyright (c) 2021 KT-Elektronik, Klaucke und Partner GmbH
* Copyright (c) 2024 Renesas Electronics Corporation
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Full C support initialization
*
*
* Initialization of full C support: zero the .bss and call z_cstart().
*
* Stack is available in this module, but not the global data/bss until their
* initialization is performed.
*/
#include <kernel_internal.h>
#include <zephyr/kernel.h>
#include <zephyr/logging/log.h>
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
K_KERNEL_PINNED_STACK_ARRAY_DEFINE(z_initialization_process_stacks, CONFIG_MP_MAX_NUM_CPUS,
CONFIG_INITIALIZATION_STACK_SIZE);
/**
* @brief Prepare to and run C code
*
* This routine prepares for the execution of and runs C code.
*
* @return N/A
*/
void z_prep_c(void)
{
z_bss_zero();
z_data_copy();
z_cstart();
CODE_UNREACHABLE;
}

171
arch/rx/core/reset.S Normal file
View File

@ -0,0 +1,171 @@
/*
* Copyright (c) 2021 KT-Elektronik, Klaucke und Partner GmbH
* Copyright (c) 2024 Renesas Electronics Corporation
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/toolchain.h>
.list
.section .text
GTEXT(__start)
__start :
/* during initialization (before the main thread is started), z_initialization_process_stacks
* is used to do the kernel initialization.
*/
mvtc #(_z_initialization_process_stacks + CONFIG_INITIALIZATION_STACK_SIZE),USP
/* initialise interrupt stack pointer */
mvtc #(_z_interrupt_stacks + CONFIG_ISR_STACK_SIZE),ISP
/* set exception vector address (_ExceptVectors is defined in vects.c) */
#if CONFIG_HAS_EXCEPT_VECTOR_TABLE
mvtc #_ExceptVectors, extb
#endif
/* set interrupt vector address (_rvectors_start is defined in vects.c) */
mvtc #_rvectors_start, intb
/* load data section from ROM to RAM */
mov #_mdata,r2 /* src ROM address of data section in R2 */
mov #_data,r1 /* dest start RAM address of data section in R1 */
mov #_edata,r3 /* end RAM address of data section in R3 */
sub r1,r3 /* size of data section in R3 (R3=R3-R1) */
#ifdef __RX_ALLOW_STRING_INSNS__
smovf /* block copy R3 bytes from R2 to R1 */
#else
cmp #0, r3
beq 2f
1: mov.b [r2+], r5
mov.b r5, [r1+]
sub #1, r3
bne 1b
2:
#endif
/* bss initialisation: zero out bss */
mov #0,r2 /* load R2 reg with zero */
mov #_ebss, r3 /* store the end address of bss in R3 */
mov #_bss, r1 /* store the start address of bss in R1 */
sub r1,r3 /* size of bss section in R3 (R3=R3-R1) */
sstr.b
#ifdef CONFIG_INIT_STACKS
/* initialize the irq stack (it is located in the bss section) */
mov #0xaa,r2 /* initialization value 0xaa */
mov #_z_interrupt_stacks, r1 /* start address */
mov #CONFIG_ISR_STACK_SIZE, r3 /* stack size */
sstr.b
#endif
/* setup PSW - use user stack register and lock interrupts during initialization */
mvtc #0x20000, psw
#ifdef CPPAPP
bsr __rx_init
#endif
/* start user program */
bsr _z_cstart
bsr _exit
#ifdef CPPAPP
.global _rx_run_preinit_array
.type _rx_run_preinit_array,@function
_rx_run_preinit_array:
mov #__preinit_array_start,r1
mov #__preinit_array_end,r2
mov #_rx_run_inilist,r7
jsr r7
.global _rx_run_init_array
.type _rx_run_init_array,@function
_rx_run_init_array:
mov #__init_array_start,r1
mov #__init_array_end,r2
mov #4, r3
mov #_rx_run_inilist,r7
jsr r7
.global _rx_run_fini_array
.type _rx_run_fini_array,@function
_rx_run_fini_array:
mov #__fini_array_start,r2
mov #__fini_array_end,r1
mov #-4, r3
/* fall through */
_rx_run_inilist:
next_inilist:
cmp r1,r2
beq.b done_inilist
mov.l [r1],r4
cmp #-1, r4
beq.b skip_inilist
cmp #0, r4
beq.b skip_inilist
pushm r1-r3
jsr r4
popm r1-r3
skip_inilist:
add r3,r1
mov #next_inilist,r7
jsr r7
done_inilist:
rts
.section .init,"ax"
.balign 4
.global __rx_init
__rx_init:
.section .fini,"ax"
.balign 4
.global __rx_fini
__rx_fini:
mov #_rx_run_fini_array,r7
jsr r7
.section .sdata
.balign 4
.global __gp
.weak __gp
__gp:
.section .data
.global ___dso_handle
.weak ___dso_handle
___dso_handle:
.long 0
.section .init,"ax"
mov #_rx_run_preinit_array,r7
jsr r7
mov #_rx_run_init_array,r7
jsr r7
rts
.global __rx_init_end
__rx_init_end:
.section .fini,"ax"
rts
.global __rx_fini_end
__rx_fini_end:
#endif
/* call to exit*/
_exit:
bra _loop_here
_loop_here:
bra _loop_here
.text
.end

137
arch/rx/core/switch.S Normal file
View File

@ -0,0 +1,137 @@
/*
* Copyright (c) 2021 KT-Elektronik, Klaucke und Partner GmbH
* Copyright (c) 2024 Renesas Electronics Corporation
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <offsets_short.h>
GTEXT(_z_rx_arch_switch)
GTEXT(_switch_isr_wrapper)
/* void z_rx_arch_switch(void *switch_to, void **switched_from)
*
* @brief switch between threads
*
* @param switch_to (r1) pointer to switch handle of the new thread
* @param switched_from (r2) pointer to pointer to switch handle of the old
* thread
*
* Thread-switching is treated differently depending on whether it is a
* cooperative switch triggered by old thread itself or a preemptive switch
* triggered by an interrupt (in this case the function has been called from an
* ISR).
*/
.section .text._z_rx_arch_switch
.align 4
_z_rx_arch_switch:
mvfc psw,r3
tst #0x130000, r3 /* test if PM, U or I bit are set*/
bz _z_rx_context_switch_isr /* if none of them are set, this is an isr */
mov #_coop_switch_to,r3
mov r1,[r3]
mov #_coop_switched_from,r3
mov r2,[r3]
/* trigger unconditional interrupt dedicated to thread switching. The content of r1 and r2
* will not change by invoking the interrupt so the parameters switch_to and switched_from
* will be available in _z_rx_context_switch_isr, which has been entered into the vector
* table as ISR for interrupt 1
*/
int #1
/* at this point, r0 points to the entry point, so RTS will enter it */
rts
/* void switch_isr_wrapper(void)
*
* @brief isr for interrupt 1 as wrapper for _z_rx_context_switch_isr
*
* _z_rx_context_switch_isr ends in rts, so it does not return from the interrupt context
*/
.section .text._switch_isr_wrapper
.align 4
_switch_isr_wrapper:
pushm r1-r15
/* Save the accumulator. */
mvfachi r15 /* Accumulator high 32 bits. */
push r15
mvfacmi r15 /* Accumulator middle 32 bits. */
shll #16, r15 /* Shifted left as it is restored to the low order word.*/
push r15
mov #_coop_switch_to,r3
mov [r3],r1
mov #_coop_switched_from,r3
mov [r3],r2
bsr _z_rx_context_switch_isr
/* Restore the registers from the stack of the task pointed to by
pxCurrentTCB. */
pop r15
mvtaclo r15 /* Accumulator low 32 bits. */
pop r15
mvtachi r15 /* Accumulator high 32 bits. */
popm r1-r15
rte
/* void z_rx_context_switch_isr(void *switch_to, void **switched_from)
*
* @brief switch between threads in the interrupt context
*
* @param switch_to (r1) pointer to switch handle of the new thread
* @param switched_from (r2) pointer to pointer to switch handle of the old thread
*
* since this is part of an ISR, PSW, PC and general registers of the old thread are already
* stored in the interrupt stack, so copy the corresponding part of the interrupt stack to the
* stack of the interrupted thread
*/
_z_rx_context_switch_isr:
/* store arguments switch_to and switched_from to registers r4 and r5 as
* registers r2 and r3 are needed for the smovf operation */
mov r1,r4
mov r2,r5
/* set r2 (smovb source address) to the beginning of the interrupt stack */
mov #(_z_interrupt_stacks + CONFIG_ISR_STACK_SIZE)-1,r2
mvfc usp,r1 /* set r1 (smovb dest) to USP */
sub #1,r1 /* correct by one byte to use smovb compared to push/pop */
/* set r3 to number of bytes to move
* Accumulator 64bit (4byte * 2)
* 15*4 byte for 15 general registers
* + PSW (4 byte)
* + PC (4 byte)
*/
mov #76,r3
smovb /* block copy from interrupt stack to old thread stack */
add #1,r1 /* smovb leaves r1 pointing 1 byte before the stack */
add #1,r2 /* same with r2 */
mov r1,[r5] /* store stack pointer of old thread in *switched_from */
mov r2,r1 /* set r1 (smovf dest) to the beginning of the interrupt stack */
mov r4,r2 /* set r2 (smovf source) to the sp of the new thread*/
mov #76,r3 /* set r3 to number of bytes to move */
smovf /* block copy from new thread stack to interrupt stack */
mvtc r2,usp /* set USP to the new thread stack */
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
bsr _z_thread_mark_switched_in
#endif
rts

53
arch/rx/core/thread.c Normal file
View File

@ -0,0 +1,53 @@
/*
* Copyright (c) 2021 KT-Elektronik, Klaucke und Partner GmbH
* Copyright (c) 2024 Renesas Electronics Corporation
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/kernel.h>
#include <kernel_internal.h>
#include <ksched.h>
#include <zephyr/logging/log.h>
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
/* variables to store the arguments of z_rx_context_switch_isr() (zephyr\arch\rx\core\switch.S)
* when performing a cooperative thread switch. In that case, z_rx_context_switch_isr() triggerss
* unmaskable interrupt 1 to actually perform the switch. The ISR to interrupt 1
* (switch_isr_wrapper()) reads the arguments from these variables.
*/
void *coop_switch_to;
void **coop_switched_from;
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, char *stack_ptr,
k_thread_entry_t entry, void *arg1, void *arg2, void *arg3)
{
struct arch_esf *iframe;
iframe = Z_STACK_PTR_TO_FRAME(struct arch_esf, stack_ptr);
/* initial value for the PSW (bits U and I are set) */
iframe->psw = 0x30000;
/* the initial entry point is the function z_thread_entry */
iframe->entry_point = (uint32_t)z_thread_entry;
/* arguments for the call of z_thread_entry (to be written to r1-r4) */
iframe->r1 = (uint32_t)entry;
iframe->r2 = (uint32_t)arg1;
iframe->r3 = (uint32_t)arg2;
iframe->r4 = (uint32_t)arg3;
/* for debugging: */
iframe->r5 = 5;
iframe->r6 = 6;
iframe->r7 = 7;
iframe->r8 = 8;
iframe->r9 = 9;
iframe->r10 = 10;
iframe->r11 = 11;
iframe->r12 = 12;
iframe->r13 = 13;
iframe->r14 = 14;
iframe->r15 = 15;
iframe->acc_l = 16;
iframe->acc_h = 17;
thread->switch_handle = (void *)iframe;
}

499
arch/rx/core/vects.c Normal file
View File

@ -0,0 +1,499 @@
/*
* Copyright (c) 2021 KT-Elektronik, Klaucke und Partner GmbH
* Copyright (c) 2024 Renesas Electronics Corporation
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/sw_isr_table.h>
#include <zephyr/irq.h>
#include <kswap.h>
#include <zephyr/tracing/tracing.h>
typedef void (*fp)(void);
extern void _start(void);
extern void z_rx_irq_exit(void);
/* this is mainly to give Visual Studio Code peace of mind */
#ifndef CONFIG_GEN_IRQ_START_VECTOR
#define CONFIG_GEN_IRQ_START_VECTOR 0
#endif
#define EXVECT_SECT __attribute__((section(".exvectors")))
#define RVECT_SECT __attribute__((section(".rvectors")))
#define FVECT_SECT __attribute__((section(".fvectors")))
#define __ISR__ __attribute__((interrupt, naked))
static ALWAYS_INLINE void REGISTER_SAVE(void)
{
__asm volatile(
/* Save the Registers to ISP at the top of ISR. */
/* This code is relate on arch_new_thread() at thread.c */
/* You should store the registers at the same registers arch_new_thread() */
/* except PC and PSW. */
"PUSHM R1-R15\n"
"MVFACHI R15\n"
"PUSH.L R15\n"
"MVFACMI R15\n"
"SHLL #16, R15\n"
"PUSH.L R15\n");
}
static ALWAYS_INLINE void REGISTER_RESTORE_EXIT(void)
{
__asm volatile(
/* Restore the registers and do the RTE at the End of ISR. */
"POP R15\n"
"MVTACLO R15\n"
"POP R15\n"
"MVTACHI R15\n"
"POPM R1-R15\n"
"RTE\n");
}
/* Privileged instruction execption */
static void __ISR__ INT_Excep_SuperVisorInst(void)
{
REGISTER_SAVE();
ISR_DIRECT_HEADER();
z_fatal_error(K_ERR_CPU_EXCEPTION, NULL);
ISR_DIRECT_FOOTER(1);
REGISTER_RESTORE_EXIT();
}
/* Access exception */
static void __ISR__ INT_Excep_AccessInst(void)
{
REGISTER_SAVE();
ISR_DIRECT_HEADER();
z_fatal_error(K_ERR_CPU_EXCEPTION, NULL);
ISR_DIRECT_FOOTER(1);
REGISTER_RESTORE_EXIT();
}
/* Undefined instruction exception */
static void __ISR__ INT_Excep_UndefinedInst(void)
{
REGISTER_SAVE();
ISR_DIRECT_HEADER();
z_fatal_error(K_ERR_CPU_EXCEPTION, NULL);
ISR_DIRECT_FOOTER(1);
REGISTER_RESTORE_EXIT();
}
/* floating point exception */
static void __ISR__ INT_Excep_FloatingPoint(void)
{
REGISTER_SAVE();
ISR_DIRECT_HEADER();
z_fatal_error(K_ERR_CPU_EXCEPTION, NULL);
ISR_DIRECT_FOOTER(1);
REGISTER_RESTORE_EXIT();
}
/* Non-maskable interrupt */
static void __ISR__ INT_NonMaskableInterrupt(void)
{
REGISTER_SAVE();
ISR_DIRECT_HEADER();
z_fatal_error(K_ERR_CPU_EXCEPTION, NULL);
ISR_DIRECT_FOOTER(1);
REGISTER_RESTORE_EXIT();
}
/* dummy function */
static void __ISR__ Dummy(void)
{
REGISTER_SAVE();
ISR_DIRECT_HEADER();
ISR_DIRECT_FOOTER(1);
REGISTER_RESTORE_EXIT();
}
/**
* @brief select Zephyr ISR and argument from software ISR table and call
* function
*
* @param irq interrupt to handle
*/
static ALWAYS_INLINE void handle_interrupt(uint8_t irq)
{
ISR_DIRECT_HEADER();
_sw_isr_table[irq].isr(_sw_isr_table[irq].arg);
ISR_DIRECT_FOOTER(1);
}
/**
* @brief isr for reserved interrupts (0-15) that are not handled through
* the zephyr sw isr table
*/
static void __ISR__ reserved_isr(void)
{
REGISTER_SAVE();
ISR_DIRECT_HEADER();
z_fatal_error(K_ERR_CPU_EXCEPTION, NULL);
ISR_DIRECT_FOOTER(1);
REGISTER_RESTORE_EXIT();
}
/* wrapper for z_rx_context_switch_isr, defined in switch.S */
extern void __ISR__ switch_isr_wrapper(void);
/* this macro is used to define "demuxing" ISRs for all interrupts that are
* handled through Zephyr's software isr table.
*/
#define INT_DEMUX(irq) \
static __attribute__((interrupt, naked)) void int_demux_##irq(void) \
{ \
REGISTER_SAVE(); \
handle_interrupt(irq - CONFIG_GEN_IRQ_START_VECTOR); \
REGISTER_RESTORE_EXIT(); \
}
INT_DEMUX(16);
INT_DEMUX(17);
INT_DEMUX(18);
INT_DEMUX(19);
INT_DEMUX(20);
INT_DEMUX(21);
INT_DEMUX(22);
INT_DEMUX(23);
INT_DEMUX(24);
INT_DEMUX(25);
INT_DEMUX(27);
INT_DEMUX(26);
INT_DEMUX(28);
INT_DEMUX(29);
INT_DEMUX(30);
INT_DEMUX(31);
INT_DEMUX(32);
INT_DEMUX(33);
INT_DEMUX(34);
INT_DEMUX(35);
INT_DEMUX(36);
INT_DEMUX(37);
INT_DEMUX(38);
INT_DEMUX(39);
INT_DEMUX(40);
INT_DEMUX(41);
INT_DEMUX(42);
INT_DEMUX(43);
INT_DEMUX(44);
INT_DEMUX(45);
INT_DEMUX(46);
INT_DEMUX(47);
INT_DEMUX(48);
INT_DEMUX(49);
INT_DEMUX(50);
INT_DEMUX(51);
INT_DEMUX(52);
INT_DEMUX(53);
INT_DEMUX(54);
INT_DEMUX(55);
INT_DEMUX(56);
INT_DEMUX(57);
INT_DEMUX(58);
INT_DEMUX(59);
INT_DEMUX(60);
INT_DEMUX(61);
INT_DEMUX(62);
INT_DEMUX(63);
INT_DEMUX(64);
INT_DEMUX(65);
INT_DEMUX(66);
INT_DEMUX(67);
INT_DEMUX(68);
INT_DEMUX(69);
INT_DEMUX(70);
INT_DEMUX(71);
INT_DEMUX(72);
INT_DEMUX(73);
INT_DEMUX(74);
INT_DEMUX(75);
INT_DEMUX(76);
INT_DEMUX(77);
INT_DEMUX(78);
INT_DEMUX(79);
INT_DEMUX(80);
INT_DEMUX(81);
INT_DEMUX(82);
INT_DEMUX(83);
INT_DEMUX(84);
INT_DEMUX(85);
INT_DEMUX(86);
INT_DEMUX(87);
INT_DEMUX(88);
INT_DEMUX(89);
INT_DEMUX(90);
INT_DEMUX(91);
INT_DEMUX(92);
INT_DEMUX(93);
INT_DEMUX(94);
INT_DEMUX(95);
INT_DEMUX(96);
INT_DEMUX(97);
INT_DEMUX(98);
INT_DEMUX(99);
INT_DEMUX(100)
INT_DEMUX(101);
INT_DEMUX(102);
INT_DEMUX(103);
INT_DEMUX(104);
INT_DEMUX(105);
INT_DEMUX(106);
INT_DEMUX(107);
INT_DEMUX(108);
INT_DEMUX(109);
INT_DEMUX(110);
INT_DEMUX(111);
INT_DEMUX(112);
INT_DEMUX(113);
INT_DEMUX(114);
INT_DEMUX(115);
INT_DEMUX(116);
INT_DEMUX(117);
INT_DEMUX(118);
INT_DEMUX(119);
INT_DEMUX(120);
INT_DEMUX(121);
INT_DEMUX(122);
INT_DEMUX(123);
INT_DEMUX(124);
INT_DEMUX(125);
INT_DEMUX(126);
INT_DEMUX(127);
INT_DEMUX(128);
INT_DEMUX(129);
INT_DEMUX(130);
INT_DEMUX(131);
INT_DEMUX(132);
INT_DEMUX(133);
INT_DEMUX(134);
INT_DEMUX(135);
INT_DEMUX(136);
INT_DEMUX(137);
INT_DEMUX(138);
INT_DEMUX(139);
INT_DEMUX(140);
INT_DEMUX(141);
INT_DEMUX(142);
INT_DEMUX(143);
INT_DEMUX(144);
INT_DEMUX(145);
INT_DEMUX(146);
INT_DEMUX(147);
INT_DEMUX(148);
INT_DEMUX(149);
INT_DEMUX(150);
INT_DEMUX(151);
INT_DEMUX(152);
INT_DEMUX(153);
INT_DEMUX(154);
INT_DEMUX(155);
INT_DEMUX(156);
INT_DEMUX(157);
INT_DEMUX(158);
INT_DEMUX(159);
INT_DEMUX(160);
INT_DEMUX(161);
INT_DEMUX(162);
INT_DEMUX(163);
INT_DEMUX(164);
INT_DEMUX(165);
INT_DEMUX(166);
INT_DEMUX(167);
INT_DEMUX(168);
INT_DEMUX(169);
INT_DEMUX(170);
INT_DEMUX(171);
INT_DEMUX(172);
INT_DEMUX(173);
INT_DEMUX(174);
INT_DEMUX(175);
INT_DEMUX(176);
INT_DEMUX(177);
INT_DEMUX(178);
INT_DEMUX(179);
INT_DEMUX(180);
INT_DEMUX(181);
INT_DEMUX(182);
INT_DEMUX(183);
INT_DEMUX(184);
INT_DEMUX(185);
INT_DEMUX(186);
INT_DEMUX(187);
INT_DEMUX(188);
INT_DEMUX(189);
INT_DEMUX(190);
INT_DEMUX(191);
INT_DEMUX(192);
INT_DEMUX(193);
INT_DEMUX(194);
INT_DEMUX(195);
INT_DEMUX(196);
INT_DEMUX(197);
INT_DEMUX(198);
INT_DEMUX(199);
INT_DEMUX(200);
INT_DEMUX(201);
INT_DEMUX(202);
INT_DEMUX(203);
INT_DEMUX(204);
INT_DEMUX(205);
INT_DEMUX(206);
INT_DEMUX(207);
INT_DEMUX(208);
INT_DEMUX(209);
INT_DEMUX(210);
INT_DEMUX(211);
INT_DEMUX(212);
INT_DEMUX(213);
INT_DEMUX(214);
INT_DEMUX(215);
INT_DEMUX(216);
INT_DEMUX(217);
INT_DEMUX(218);
INT_DEMUX(219);
INT_DEMUX(220);
INT_DEMUX(221);
INT_DEMUX(222);
INT_DEMUX(223);
INT_DEMUX(224);
INT_DEMUX(225);
INT_DEMUX(226);
INT_DEMUX(227);
INT_DEMUX(228);
INT_DEMUX(229);
INT_DEMUX(230);
INT_DEMUX(231);
INT_DEMUX(232);
INT_DEMUX(233);
INT_DEMUX(234);
INT_DEMUX(235);
INT_DEMUX(236);
INT_DEMUX(237);
INT_DEMUX(238);
INT_DEMUX(239);
INT_DEMUX(240);
INT_DEMUX(241);
INT_DEMUX(242);
INT_DEMUX(243);
INT_DEMUX(244);
INT_DEMUX(245);
INT_DEMUX(246);
INT_DEMUX(247);
INT_DEMUX(248);
INT_DEMUX(249);
INT_DEMUX(250);
INT_DEMUX(251);
INT_DEMUX(252);
INT_DEMUX(253);
INT_DEMUX(254);
INT_DEMUX(255);
const void *FixedVectors[] FVECT_SECT = {
/* 0x00-0x4c: Reserved, must be 0xff (according to e2 studio example) */
/* Reserved for OFSM */
(fp)0xFFFFFFFF,
(fp)0xFFFFFFFF,
(fp)0xFFFFFFFF,
(fp)0xFFFFFFFF,
/* Reserved area */
(fp)0xFFFFFFFF,
(fp)0xFFFFFFFF,
(fp)0xFFFFFFFF,
(fp)0xFFFFFFFF,
/* Reserved for ID Code */
(fp)0xFFFFFFFF,
(fp)0xFFFFFFFF,
(fp)0xFFFFFFFF,
(fp)0xFFFFFFFF,
/* Reserved area */
(fp)0xFFFFFFFF,
(fp)0xFFFFFFFF,
(fp)0xFFFFFFFF,
(fp)0xFFFFFFFF,
/* Reserved area */
(fp)0xFFFFFFFF,
(fp)0xFFFFFFFF,
(fp)0xFFFFFFFF,
(fp)0xFFFFFFFF,
/* 0x50: Privileged instruction exception */
INT_Excep_SuperVisorInst,
/* 0x54: Access exception */
INT_Excep_AccessInst,
/* 0x58: Reserved */
Dummy,
/* 0x5c: Undefined Instruction Exception */
INT_Excep_UndefinedInst,
/* 0x60: Reserved */
Dummy,
/* 0x64: Floating Point Exception */
INT_Excep_FloatingPoint,
/* 0x68-0x74: Reserved */
Dummy,
Dummy,
Dummy,
Dummy,
/* 0x78: Non-maskable interrupt */
INT_NonMaskableInterrupt,
_start,
};
const fp RelocatableVectors[] RVECT_SECT = {
reserved_isr, switch_isr_wrapper, reserved_isr, reserved_isr, reserved_isr,
reserved_isr, reserved_isr, reserved_isr, reserved_isr, reserved_isr,
reserved_isr, reserved_isr, reserved_isr, reserved_isr, reserved_isr,
reserved_isr, int_demux_16, int_demux_17, int_demux_18, int_demux_19,
int_demux_20, int_demux_21, int_demux_22, int_demux_23, int_demux_24,
int_demux_25, int_demux_26, int_demux_27, int_demux_28, int_demux_29,
int_demux_30, int_demux_31, int_demux_32, int_demux_33, int_demux_34,
int_demux_35, int_demux_36, int_demux_37, int_demux_38, int_demux_39,
int_demux_40, int_demux_41, int_demux_42, int_demux_43, int_demux_44,
int_demux_45, int_demux_46, int_demux_47, int_demux_48, int_demux_49,
int_demux_50, int_demux_51, int_demux_52, int_demux_53, int_demux_54,
int_demux_55, int_demux_56, int_demux_57, int_demux_58, int_demux_59,
int_demux_60, int_demux_61, int_demux_62, int_demux_63, int_demux_64,
int_demux_65, int_demux_66, int_demux_67, int_demux_68, int_demux_69,
int_demux_70, int_demux_71, int_demux_72, int_demux_73, int_demux_74,
int_demux_75, int_demux_76, int_demux_77, int_demux_78, int_demux_79,
int_demux_80, int_demux_81, int_demux_82, int_demux_83, int_demux_84,
int_demux_85, int_demux_86, int_demux_87, int_demux_88, int_demux_89,
int_demux_90, int_demux_91, int_demux_92, int_demux_93, int_demux_94,
int_demux_95, int_demux_96, int_demux_97, int_demux_98, int_demux_99,
int_demux_100, int_demux_101, int_demux_102, int_demux_103, int_demux_104,
int_demux_105, int_demux_106, int_demux_107, int_demux_108, int_demux_109,
int_demux_110, int_demux_111, int_demux_112, int_demux_113, int_demux_114,
int_demux_115, int_demux_116, int_demux_117, int_demux_118, int_demux_119,
int_demux_120, int_demux_121, int_demux_122, int_demux_123, int_demux_124,
int_demux_125, int_demux_126, int_demux_127, int_demux_128, int_demux_129,
int_demux_130, int_demux_131, int_demux_132, int_demux_133, int_demux_134,
int_demux_135, int_demux_136, int_demux_137, int_demux_138, int_demux_139,
int_demux_140, int_demux_141, int_demux_142, int_demux_143, int_demux_144,
int_demux_145, int_demux_146, int_demux_147, int_demux_148, int_demux_149,
int_demux_150, int_demux_151, int_demux_152, int_demux_153, int_demux_154,
int_demux_155, int_demux_156, int_demux_157, int_demux_158, int_demux_159,
int_demux_160, int_demux_161, int_demux_162, int_demux_163, int_demux_164,
int_demux_165, int_demux_166, int_demux_167, int_demux_168, int_demux_169,
int_demux_170, int_demux_171, int_demux_172, int_demux_173, int_demux_174,
int_demux_175, int_demux_176, int_demux_177, int_demux_178, int_demux_179,
int_demux_180, int_demux_181, int_demux_182, int_demux_183, int_demux_184,
int_demux_185, int_demux_186, int_demux_187, int_demux_188, int_demux_189,
int_demux_190, int_demux_191, int_demux_192, int_demux_193, int_demux_194,
int_demux_195, int_demux_196, int_demux_197, int_demux_198, int_demux_199,
int_demux_200, int_demux_201, int_demux_202, int_demux_203, int_demux_204,
int_demux_205, int_demux_206, int_demux_207, int_demux_208, int_demux_209,
int_demux_210, int_demux_211, int_demux_212, int_demux_213, int_demux_214,
int_demux_215, int_demux_216, int_demux_217, int_demux_218, int_demux_219,
int_demux_220, int_demux_221, int_demux_222, int_demux_223, int_demux_224,
int_demux_225, int_demux_226, int_demux_227, int_demux_228, int_demux_229,
int_demux_230, int_demux_231, int_demux_232, int_demux_233, int_demux_234,
int_demux_235, int_demux_236, int_demux_237, int_demux_238, int_demux_239,
int_demux_240, int_demux_241, int_demux_242, int_demux_243, int_demux_244,
int_demux_245, int_demux_246, int_demux_247, int_demux_248, int_demux_249,
int_demux_250, int_demux_251, int_demux_252, int_demux_253, int_demux_254,
int_demux_255,
};

View File

@ -0,0 +1,45 @@
/*
* Copyright (c) 2021 KT-Elektronik, Klaucke und Partner GmbH
* Copyright (c) 2024 Renesas Electronics Corporation
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Private kernel definitions (rx)
*
* This file contains private kernel structures definitions and various
* other definitions for the Renesas rx architecture.
*
* This file is also included by assembly language files which must #define
* _ASMLANGUAGE before including this header file. Note that kernel
* assembly source files obtains structure offset values via "absolute symbols"
* in the offsets.o module.
*/
#ifndef ZEPHYR_ARCH_RX_INCLUDE_KERNEL_ARCH_DATA_H_
#define ZEPHYR_ARCH_RX_INCLUDE_KERNEL_ARCH_DATA_H_
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <zephyr/arch/cpu.h>
#ifndef _ASMLANGUAGE
#include <zephyr/kernel.h>
#include <zephyr/types.h>
#include <zephyr/sys/dlist.h>
#include <zephyr/sys/util.h>
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
/* place C-code here */
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_ARCH_RX_INCLUDE_KERNEL_ARCH_DATA_H_ */

View File

@ -0,0 +1,40 @@
/*
* Copyright (c) 2021 KT-Elektronik, Klaucke und Partner GmbH
* Copyright (c) 2024 Renesas Electronics Corporation
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_ARCH_RX_INCLUDE_KERNEL_ARCH_FUNC_H_
#define ZEPHYR_ARCH_RX_INCLUDE_KERNEL_ARCH_FUNC_H_
#ifndef _ASMLANGUAGE
#include <kernel_arch_data.h>
#ifdef __cplusplus
extern "C" {
#endif
static ALWAYS_INLINE void arch_kernel_init(void)
{
/* check if: further device initialization functions must be called here */
}
static inline bool arch_is_in_isr(void)
{
return arch_curr_cpu()->nested != 0U;
}
extern void z_rx_arch_switch(void *switch_to, void **switched_from);
static inline void arch_switch(void *switch_to, void **switched_from)
{
z_rx_arch_switch(switch_to, switched_from);
}
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_ARCH_RX_INCLUDE_KERNEL_ARCH_FUNC_H_ */

View File

@ -0,0 +1,39 @@
/*
* Copyright (c) 2021 KT-Elektronik, Klaucke und Partner GmbH
* Copyright (c) 2024 Renesas Electronics Corporation
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_ARCH_RX_INCLUDE_OFFSETS_SHORT_ARCH_H_
#define ZEPHYR_ARCH_RX_INCLUDE_OFFSETS_SHORT_ARCH_H_
#include <offsets.h>
/* kernel */
#define KERNEL_OFFSET(field) _kernel_offset_to_##field
#define _kernel_offset_to_flags (___kernel_t_arch_OFFSET + ___kernel_arch_t_flags_OFFSET)
/* end - kernel */
/* threads */
#define THREAD_OFFSET(field) _thread_offset_to_##field
#define _thread_offset_to_sp (___thread_t_callee_saved_OFFSET + ___callee_saved_t_topOfStack_OFFSET)
#define _thread_offset_to_retval (___thread_t_callee_saved_OFFSET + ___callee_saved_t_retval_OFFSET)
#define _thread_offset_to_coopCoprocReg \
(___thread_t_arch_OFFSET + ___thread_arch_t_coopCoprocReg_OFFSET)
#define _thread_offset_to_preempCoprocReg \
(___thread_t_arch_OFFSET + ___thread_arch_t_preempCoprocReg_OFFSET)
#define _thread_offset_to_cpStack \
(_thread_offset_to_preempCoprocReg + __tPreempCoprocReg_cpStack_OFFSET)
#define _thread_offset_to_cpEnable (_thread_offset_to_cpStack + XT_CPENABLE)
/* end - threads */
#endif /* ZEPHYR_ARCH_RX_INCLUDE_OFFSETS_SHORT_ARCH_H_ */

View File

@ -0,0 +1,8 @@
# Copyright (c) 2021 KT-Elektronik Klaucke und Partner GmbH
# SPDX-License-Identifier: Apache-2.0
description: Renesas RX CPU
compatible: "renesas,rx"
include: cpu.yaml

View File

@ -32,6 +32,8 @@
#include <zephyr/arch/posix/arch_inlines.h>
#elif defined(CONFIG_SPARC)
#include <zephyr/arch/sparc/arch_inlines.h>
#elif defined(CONFIG_RX)
#include <zephyr/arch/rx/arch_inlines.h>
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_INLINES_H_ */

View File

@ -31,6 +31,8 @@
#include <zephyr/arch/posix/arch.h>
#elif defined(CONFIG_SPARC)
#include <zephyr/arch/sparc/arch.h>
#elif defined(CONFIG_RX)
#include <zephyr/arch/rx/arch.h>
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_CPU_H_ */

View File

@ -0,0 +1,146 @@
/*
* Copyright (c) 2021 KT-Elektronik, Klaucke und Partner GmbH
* Copyright (c) 2024 Renesas Electronics Corporation
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_ARCH_RX_ARCH_H_
#define ZEPHYR_INCLUDE_ARCH_RX_ARCH_H_
/* Add include for DTS generated information */
#include <zephyr/arch/rx/exception.h>
#include <zephyr/devicetree.h>
#include <zephyr/arch/rx/thread.h>
#include <zephyr/arch/rx/misc.h>
#include <zephyr/arch/rx/arch_inlines.h>
#include <zephyr/arch/common/sys_bitops.h>
#include <zephyr/arch/common/sys_io.h>
#include <zephyr/arch/common/ffs.h>
#include <zephyr/sw_isr_table.h>
#include <zephyr/kernel_structs.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/sys/util.h>
#include <zephyr/irq.h>
#define ARCH_STACK_PTR_ALIGN 4
#ifndef _ASMLANGUAGE
#ifdef __cplusplus
extern "C" {
#endif
#define REG(addr) *((uint8_t *)(addr))
/* isr for undefined interrupts (results in a fatal error) */
void z_irq_spurious(const void *unused);
/* internal routine documented in C file, needed by IRQ_CONNECT() macro */
extern void z_irq_priority_set(uint32_t irq, uint32_t prio, uint32_t flags);
/* Z_ISR_DECLARE will populate the .intList section with the interrupt's
* parameters, which will then be used by gen_irq_tables.py to create
* the vector table and the software ISR table. This is all done at
* build-time.
*
* We additionally set the priority in the interrupt controller at
* runtime.
*/
#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
{ \
Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
z_irq_priority_set(irq_p, priority_p, flags_p); \
}
#if CONFIG_TRACING_ISR
#define ARCH_ISR_DIRECT_HEADER() \
{ \
_kernel.cpus[0].nested++; \
sys_trace_isr_enter(); \
}
#else
#define ARCH_ISR_DIRECT_HEADER() \
{ \
_kernel.cpus[0].nested++; \
}
#endif
#if CONFIG_TRACING_ISR
#define ARCH_ISR_DIRECT_FOOTER(check_reschedule) \
{ \
if (IS_ENABLED(CONFIG_STACK_SENTINEL)) { \
z_check_stack_sentinel(); \
} \
sys_trace_isr_exit(); \
irq_lock(); \
if (check_reschedule && _kernel.cpus[0].nested == 1) { \
if (_kernel.cpus->current->base.prio >= 0 || \
CONFIG_NUM_METAIRQ_PRIORITIES > 0) { \
if (_kernel.ready_q.cache != _kernel.cpus->current) { \
z_rx_irq_exit(); \
} \
} \
} \
_kernel.cpus[0].nested--; \
}
#else
#define ARCH_ISR_DIRECT_FOOTER(check_reschedule) \
{ \
if (IS_ENABLED(CONFIG_STACK_SENTINEL)) { \
z_check_stack_sentinel(); \
} \
irq_lock(); \
if (check_reschedule && _kernel.cpus[0].nested == 1) { \
if (_kernel.cpus->current->base.prio >= 0 || \
CONFIG_NUM_METAIRQ_PRIORITIES > 0) { \
if (_kernel.ready_q.cache != _kernel.cpus->current) { \
z_rx_irq_exit(); \
} \
} \
} \
_kernel.cpus[0].nested--; \
}
#endif
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
{
uint32_t key;
/* deactivate interrupts by clearing the PSW-i flag */
__asm__ volatile("MVFC psw, %0\n"
"CLRPSW i"
: "=r"(key)
:
: "cc");
/* return the value of the i-flag before clearing
* if irqs were locked already, it was 0 and calling
* arch_irq_unlock(key) will not actually unlock irqs, as this was a
* nested irq lock
*/
return key & BIT(16);
}
static inline void arch_irq_unlock(unsigned int key)
{
if (key != 0) {
/* re-activate interrupts by setting the PSW i-flag*/
__asm__ volatile("SETPSW i" ::: "cc");
}
}
static inline bool arch_irq_unlocked(unsigned int key)
{
return key != 0;
}
static ALWAYS_INLINE _cpu_t *arch_curr_cpu(void)
{
return &_kernel.cpus[0];
}
#ifdef __cplusplus
}
#endif
#endif /* !_ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_RX_ARCH_H_ */

View File

@ -0,0 +1,17 @@
/*
* Copyright (c) 2024 Renesas Electronics Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_ARCH_RX_INLINES_H
#define ZEPHYR_INCLUDE_ARCH_RX_INLINES_H
#include <zephyr/kernel_structs.h>
static ALWAYS_INLINE unsigned int arch_num_cpus(void)
{
return CONFIG_MP_MAX_NUM_CPUS;
}
#endif /* ZEPHYR_INCLUDE_ARCH_RX_INLINES_H */

View File

@ -0,0 +1,45 @@
/*
* Copyright (c) 2024 Renesas Electronics Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_ARCH_RX_INLINES_H_
#define ZEPHYR_INCLUDE_ARCH_RX_INLINES_H_
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
struct arch_esf {
uint32_t acc_l;
uint32_t acc_h;
uint32_t r1;
uint32_t r2;
uint32_t r3;
uint32_t r4;
uint32_t r5;
uint32_t r6;
uint32_t r7;
uint32_t r8;
uint32_t r9;
uint32_t r10;
uint32_t r11;
uint32_t r12;
uint32_t r13;
uint32_t r14;
uint32_t r15;
uint32_t entry_point;
uint32_t psw;
};
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_RX_INLINES_H_ */

View File

@ -0,0 +1,52 @@
/*
* Copyright (c) 2021 KT-Elektronik, Klaucke und Partner GmbH
* Copyright (c) 2024 Renesas Electronics Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Renesas RX public kernel miscellaneous
*
* Renesas RX-specific kernel miscellaneous interface. Included by arch/rx/arch.h.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_RX_MISC_H_
#define ZEPHYR_INCLUDE_ARCH_RX_MISC_H_
#ifdef __cplusplus
extern "C" {
#endif
#ifndef _ASMLANGUAGE
extern uint32_t sys_clock_cycle_get_32(void);
static inline uint32_t arch_k_cycle_get_32(void)
{
return sys_clock_cycle_get_32();
}
extern uint64_t sys_clock_cycle_get_64(void);
static inline uint64_t arch_k_cycle_get_64(void)
{
return sys_clock_cycle_get_64();
}
static ALWAYS_INLINE void arch_nop(void)
{
__asm__ volatile("nop;");
}
#define arch_brk() __asm__ volatile("brk;")
#define arch_wait() __asm__ volatile("wait;")
#endif
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_RX_MISC_H_ */

View File

@ -0,0 +1,36 @@
/*
* Copyright (c) 2021 KT-Elektronik, Klaucke und Partner GmbH
* Copyright (c) 2024 Renesas Electronics Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_ARCH_RX_THREAD_H_
#define ZEPHYR_INCLUDE_ARCH_RX_THREAD_H_
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
struct _callee_saved {
/* General purpose callee-saved registers */
};
typedef struct _callee_saved _callee_saved_t;
struct _thread_arch {
/* empty */
};
typedef struct _thread_arch _thread_arch_t;
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_RX_THREAD_H_ */