From fb2bf23ec19b486579964bd8a2aeb06f76e9a1cc Mon Sep 17 00:00:00 2001 From: Carlo Caione Date: Sat, 5 Sep 2020 17:37:46 +0200 Subject: [PATCH] arch: arm64: Remove EL2/EL3 code Zephyr is only supposed to be running at EL1 (+ EL0). Now that we drop in EL1 from ELn at start we can remove all the EL2/EL3 unused code. Signed-off-by: Carlo Caione --- arch/arm/core/aarch64/fatal.c | 21 +-------------------- arch/arm/core/aarch64/macro_priv.inc | 23 +---------------------- arch/arm/core/aarch64/switch.S | 21 +-------------------- 3 files changed, 3 insertions(+), 62 deletions(-) diff --git a/arch/arm/core/aarch64/fatal.c b/arch/arm/core/aarch64/fatal.c index 79c3ef675e3..eae6f858f66 100644 --- a/arch/arm/core/aarch64/fatal.c +++ b/arch/arm/core/aarch64/fatal.c @@ -162,36 +162,17 @@ void z_arm64_fatal_error(unsigned int reason, const z_arch_esf_t *esf) if (reason != K_ERR_SPURIOUS_IRQ) { __asm__ volatile("mrs %0, CurrentEL" : "=r" (el)); - switch (GET_EL(el)) { - case MODE_EL1: + if (GET_EL(el) != MODE_EL0) { __asm__ volatile("mrs %0, esr_el1" : "=r" (esr)); __asm__ volatile("mrs %0, far_el1" : "=r" (far)); __asm__ volatile("mrs %0, elr_el1" : "=r" (elr)); - break; - case MODE_EL2: - __asm__ volatile("mrs %0, esr_el2" : "=r" (esr)); - __asm__ volatile("mrs %0, far_el2" : "=r" (far)); - __asm__ volatile("mrs %0, elr_el2" : "=r" (elr)); - break; - case MODE_EL3: - __asm__ volatile("mrs %0, esr_el3" : "=r" (esr)); - __asm__ volatile("mrs %0, far_el3" : "=r" (far)); - __asm__ volatile("mrs %0, elr_el3" : "=r" (elr)); - break; - default: - /* Just to keep the compiler happy */ - esr = elr = far = 0; - break; - } - if (GET_EL(el) != MODE_EL0) { LOG_ERR("ESR_ELn: 0x%016llx", esr); LOG_ERR("FAR_ELn: 0x%016llx", far); LOG_ERR("ELR_ELn: 0x%016llx", elr); print_EC_cause(esr); } - } if (esf != NULL) { diff --git a/arch/arm/core/aarch64/macro_priv.inc b/arch/arm/core/aarch64/macro_priv.inc index b355fda0c1d..1ee0b0e5b39 100644 --- a/arch/arm/core/aarch64/macro_priv.inc +++ b/arch/arm/core/aarch64/macro_priv.inc @@ -43,19 +43,8 @@ * Store SPSR_ELn and ELR_ELn. This is needed to support nested * exception handlers */ - switch_el \xreg0, 3f, 2f, 1f -3: - mrs \xreg1, spsr_el3 - mrs \xreg2, elr_el3 - b 0f -2: - mrs \xreg1, spsr_el2 - mrs \xreg2, elr_el2 - b 0f -1: mrs \xreg1, spsr_el1 mrs \xreg2, elr_el1 -0: stp \xreg1, \xreg2, [sp, #-16]! .endm @@ -77,19 +66,9 @@ * exception handlers */ ldp \xreg0, \xreg1, [sp], #16 - switch_el \xreg2, 3f, 2f, 1f -3: - msr spsr_el3, \xreg0 - msr elr_el3, \xreg1 - b 0f -2: - msr spsr_el2, \xreg0 - msr elr_el2, \xreg1 - b 0f -1: msr spsr_el1, \xreg0 msr elr_el1, \xreg1 -0: + /* * In x30 we can have: * diff --git a/arch/arm/core/aarch64/switch.S b/arch/arm/core/aarch64/switch.S index a6ad75c5230..1cc725452ef 100644 --- a/arch/arm/core/aarch64/switch.S +++ b/arch/arm/core/aarch64/switch.S @@ -97,19 +97,9 @@ SECTION_FUNC(TEXT, z_thread_entry_wrapper) * arch_new_thread() */ ldp x0, x1, [sp], #16 - switch_el x3, 3f, 2f, 1f -3: - msr spsr_el3, x0 - msr elr_el3, x1 - b 0f -2: - msr spsr_el2, x0 - msr elr_el2, x1 - b 0f -1: msr spsr_el1, x0 msr elr_el1, x1 -0: + /* * z_thread_entry_wrapper is called for every new thread upon the return * of arch_swap() or ISR. Its address, as well as its input function @@ -140,16 +130,7 @@ GTEXT(z_arm64_svc) SECTION_FUNC(TEXT, z_arm64_svc) z_arm64_enter_exc x2, x3, x4 - switch_el x1, 3f, 2f, 1f -3: - mrs x0, esr_el3 - b 0f -2: - mrs x0, esr_el2 - b 0f -1: mrs x0, esr_el1 -0: lsr x1, x0, #26 cmp x1, #0x15 /* 0x15 = SVC */