From 0f2a352cbdf121bb42acee6935707f8fc794cf18 Mon Sep 17 00:00:00 2001 From: Anas Nashif Date: Wed, 22 Mar 2023 11:31:40 +0000 Subject: [PATCH] Revert "xtensa: remove ELF section address rewriting" This reverts commit 7a85983ebcf283b521331695fc80a49c623cc395. This commit was merged prematurely and is causing issues on multiple platforms. Signed-off-by: Anas Nashif --- soc/xtensa/intel_adsp/common/CMakeLists.txt | 4 ++ soc/xtensa/intel_adsp/common/fix_elf_addrs.py | 51 +++++++++++++++++++ 2 files changed, 55 insertions(+) create mode 100755 soc/xtensa/intel_adsp/common/fix_elf_addrs.py diff --git a/soc/xtensa/intel_adsp/common/CMakeLists.txt b/soc/xtensa/intel_adsp/common/CMakeLists.txt index 1d480f5a0f4..c9ed156b891 100644 --- a/soc/xtensa/intel_adsp/common/CMakeLists.txt +++ b/soc/xtensa/intel_adsp/common/CMakeLists.txt @@ -30,6 +30,7 @@ zephyr_library_link_libraries(INTEL_ADSP_COMMON) target_include_directories(INTEL_ADSP_COMMON INTERFACE include) target_link_libraries(INTEL_ADSP_COMMON INTERFACE intel_adsp_common) +set(ELF_FIX ${PYTHON_EXECUTABLE} ${SOC_DIR}/${ARCH}/${SOC_FAMILY}/common/fix_elf_addrs.py) set(KERNEL_REMAPPED ${CMAKE_BINARY_DIR}/zephyr/${KERNEL_NAME}-remapped.elf) set(EXTMAN ${CMAKE_BINARY_DIR}/zephyr/extman.bin) @@ -73,6 +74,9 @@ add_custom_command( COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_BINARY_DIR}/zephyr/${KERNEL_NAME}.elf ${KERNEL_REMAPPED} + COMMAND ${ELF_FIX} ${CMAKE_OBJCOPY} ${KERNEL_REMAPPED} + ${CONFIG_XTENSA_CACHED_REGION} ${CONFIG_XTENSA_UNCACHED_REGION} + # Extract modules for rimage COMMAND ${CMAKE_OBJCOPY} --only-section .imr diff --git a/soc/xtensa/intel_adsp/common/fix_elf_addrs.py b/soc/xtensa/intel_adsp/common/fix_elf_addrs.py new file mode 100755 index 00000000000..c9a75e1dcd7 --- /dev/null +++ b/soc/xtensa/intel_adsp/common/fix_elf_addrs.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python3 +# +# Copyright (c) 2020-2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# ADSP devices have their RAM regions mapped twice. The first mapping +# is set in the CPU to bypass the L1 cache, and so access through +# pointers in that region is coherent between CPUs (but slow). The +# second region accesses the same memory through the L1 cache and +# requires careful flushing when used with shared data. +# +# This distinction is exposed in the linker script, where some symbols +# (e.g. stack regions) are linked into cached memory, but others +# (general kernel memory) are not. But the rimage signing tool +# doesn't understand that and fails if regions aren't contiguous. +# +# Walk the sections in the ELF file, changing the VMA/LMA of each +# uncached section to the equivalent address in the cached area of +# memory. + +import os +import sys +from elftools.elf.elffile import ELFFile + +objcopy_bin = sys.argv[1] +elffile = sys.argv[2] +cached_reg = int(sys.argv[3]) +uncached_reg = int(sys.argv[4]) + +uc_min = uncached_reg << 29 +uc_max = uc_min | 0x1fffffff +cache_off = "0x%x" % ((cached_reg - uncached_reg) << 29) + +fixup =[] +with open(elffile, "rb") as fd: + elf = ELFFile(fd) + for s in elf.iter_sections(): + addr = s.header.sh_addr + if uc_min <= addr <= uc_max and s.header.sh_size != 0: + print(f"fix_elf_addrs.py: Moving section {s.name} to cached SRAM region") + fixup.append(s.name) + +for s in fixup: + # Note redirect: the sof-derived linker scripts currently emit + # some zero-length sections at address zero. This is benign, and + # the linker is happy, but objcopy will emit an unsilenceable + # error (no --quiet option, no -Werror=no-whatever, nothing). + # Just swallow the error stream for now pending rework to the + # linker framework. + cmd = f"{objcopy_bin} --change-section-address {s}+{cache_off} {elffile} 2>{os.devnull}" + os.system(cmd)