soc: riscv: andes_v5: refine Andes PMA

Refine PMA driver and define MPU_ALIGN() to PMA granularity in
RAM_SECTIONS, otherwise MPU_ALIGN() is defined to PMP granularity.

Signed-off-by: Jimmy Zheng <jimmyzhe@andestech.com>
This commit is contained in:
Jimmy Zheng 2023-06-21 13:56:55 +08:00 committed by Fabio Baltieri
parent de1cd06294
commit bf0e01bee2
2 changed files with 56 additions and 98 deletions

View File

@ -57,22 +57,6 @@
#define RAM_BASE CONFIG_SRAM_BASE_ADDRESS
#define RAM_SIZE KB(CONFIG_SRAM_SIZE)
/* Make linker section alignment comply with PMA granularity. */
#if defined(CONFIG_SOC_ANDES_V5_PMA_REGION_MIN_ALIGN_AND_SIZE)
_region_min_align = CONFIG_SOC_ANDES_V5_PMA_REGION_MIN_ALIGN_AND_SIZE;
#else
_region_min_align = 4;
#endif
#if defined(CONFIG_SOC_ANDES_V5_PMA)
/*
* Andes-V5 PMA needs power-of-2 alignment.
*/
#define MPU_MIN_SIZE_ALIGN . = ALIGN(_region_min_align);
#define MPU_ALIGN(region_size) \
. = ALIGN(_region_min_align); \
. = ALIGN( 1 << LOG2CEIL(region_size))
#else
#ifdef CONFIG_RISCV_PMP
#define MPU_MIN_SIZE CONFIG_PMP_GRANULARITY
#define MPU_MIN_SIZE_ALIGN . = ALIGN(MPU_MIN_SIZE);
@ -88,7 +72,6 @@ _region_min_align = 4;
#define MPU_MIN_SIZE_ALIGN
#define MPU_ALIGN(region_size) . = ALIGN(4)
#endif
#endif
MEMORY
{
@ -214,11 +197,25 @@ SECTIONS
. = RAM_BASE;
_image_ram_start = .;
#ifdef CONFIG_SOC_ANDES_V5_PMA
#pragma push_macro("MPU_ALIGN")
#undef MPU_ALIGN
/* Make linker section alignment comply with PMA granularity. */
#define MPU_ALIGN(region_size) \
. = ALIGN(CONFIG_SOC_ANDES_V5_PMA_REGION_MIN_ALIGN_AND_SIZE); \
. = ALIGN( 1 << LOG2CEIL(region_size))
#endif
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-ram-sections.ld>
#ifdef CONFIG_SOC_ANDES_V5_PMA
#pragma pop_macro("MPU_ALIGN")
#endif
#if defined(CONFIG_USERSPACE)
#define APP_SHARED_ALIGN MPU_MIN_SIZE_ALIGN
#define SMEM_PARTITION_ALIGN MPU_ALIGN

View File

@ -8,6 +8,7 @@
#include <zephyr/kernel.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/linker/linker-defs.h>
#include <zephyr/arch/riscv/csr.h>
#ifndef CONFIG_ASSERT
#define LOG_LEVEL CONFIG_LOG_DEFAULT_LEVEL
@ -16,14 +17,14 @@ LOG_MODULE_REGISTER(pma_init, LOG_LEVEL);
#endif
/* Programmable PMA mechanism is supported */
#define MMSC_CFG_PPMA (1 << 30)
#define MMSC_CFG_PPMA BIT(30)
/*
* PMA Configuration (PMACFG) bitfields
*/
/* ETYPE: Entry address matching mode */
#define PMACFG_ETYPE_MASK 3
#define PMACFG_ETYPE_MASK BIT_MASK(2)
#define PMACFG_ETYPE_OFF 0
#define PMACFG_ETYPE_TOR 1
#define PMACFG_ETYPE_NA4 2
@ -41,9 +42,7 @@ LOG_MODULE_REGISTER(pma_init, LOG_LEVEL);
/* The base address is aligned to size */
#define NAPOT_BASE(start, size) TO_PMA_ADDR((start) & ~((size) - 1))
/* The encoding of size is 0b01...1
* (change the leading bit of bitmask to 0)
*/
/* The encoding of size is 0b01...1, (change the leading bit of bitmask to 0) */
#define NAPOT_SIZE(size) TO_PMA_ADDR(((size) - 1) >> 1)
#define NA4_ENCODING(start) TO_PMA_ADDR(start)
@ -58,12 +57,6 @@ LOG_MODULE_REGISTER(pma_init, LOG_LEVEL);
#endif
#define PMACFG_SHIFT(index) ((index % RV_REGSIZE) * 8)
/* Wrappers of inline assembly */
#define read_csr(var, csr) \
({ __asm__ volatile ("csrr %0, %1" : "=r" (var) : "i" (csr)); })
#define write_csr(csr, val) \
({ __asm__ volatile ("csrw %0, %1" :: "i" (csr), "r" (val)); })
struct pma_region_attr {
/* Attributes belonging to pmacfg{i} */
uint8_t pmacfg;
@ -80,62 +73,32 @@ struct pma_region {
*/
static void write_pmaaddr_csr(const uint32_t index, unsigned long value)
{
#define SWITCH_CASE_PMAADDR_WRITE(x) \
case (x): \
csr_write(NDS_PMAADDR##x, value); break;
switch (index) {
case 0:
write_csr(NDS_PMAADDR0, value); break;
case 1:
write_csr(NDS_PMAADDR1, value); break;
case 2:
write_csr(NDS_PMAADDR2, value); break;
case 3:
write_csr(NDS_PMAADDR3, value); break;
case 4:
write_csr(NDS_PMAADDR4, value); break;
case 5:
write_csr(NDS_PMAADDR5, value); break;
case 6:
write_csr(NDS_PMAADDR6, value); break;
case 7:
write_csr(NDS_PMAADDR7, value); break;
case 8:
write_csr(NDS_PMAADDR8, value); break;
case 9:
write_csr(NDS_PMAADDR9, value); break;
case 10:
write_csr(NDS_PMAADDR10, value); break;
case 11:
write_csr(NDS_PMAADDR11, value); break;
case 12:
write_csr(NDS_PMAADDR12, value); break;
case 13:
write_csr(NDS_PMAADDR13, value); break;
case 14:
write_csr(NDS_PMAADDR14, value); break;
case 15:
write_csr(NDS_PMAADDR15, value); break;
FOR_EACH(SWITCH_CASE_PMAADDR_WRITE, (;), 0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15);
}
}
/*
* Write value to pma{i}cfg entry which are packed into CSRs pmacfg{j}
*/
static void write_pmacfg_entry(const uint32_t entry_index,
uint8_t entry_value)
static void write_pmacfg_entry(const uint32_t entry_index, uint8_t entry_value)
{
/* 1-byte pma{i}cfg entries are packed into XLEN-byte CSRs pmacfg{j} */
uint32_t index = PMACFG_NUM(entry_index);
uint8_t shift = PMACFG_SHIFT(entry_index);
unsigned long pmacfg = 0;
#define SWITCH_CASE_PMACFG_READ(x) \
case (x): \
pmacfg = csr_read(NDS_PMACFG##x); break;
switch (index) {
case 0:
read_csr(pmacfg, NDS_PMACFG0); break;
case 1:
read_csr(pmacfg, NDS_PMACFG1); break;
case 2:
read_csr(pmacfg, NDS_PMACFG2); break;
case 3:
read_csr(pmacfg, NDS_PMACFG3); break;
FOR_EACH(SWITCH_CASE_PMACFG_READ, (;), 0, 1, 2, 3);
}
/* clear old value in pmacfg entry */
@ -143,15 +106,12 @@ static void write_pmacfg_entry(const uint32_t entry_index,
/* set new value to pmacfg entry value */
pmacfg |= entry_value << shift;
#define SWITCH_CASE_PMACFG_WRITE(x) \
case (x): \
csr_write(NDS_PMACFG##x, pmacfg); break;
switch (index) {
case 0:
write_csr(NDS_PMACFG0, pmacfg); break;
case 1:
write_csr(NDS_PMACFG1, pmacfg); break;
case 2:
write_csr(NDS_PMACFG2, pmacfg); break;
case 3:
write_csr(NDS_PMACFG3, pmacfg); break;
FOR_EACH(SWITCH_CASE_PMACFG_WRITE, (;), 0, 1, 2, 3);
}
}
@ -185,19 +145,18 @@ static void region_init(const uint32_t index,
*/
static int pma_region_is_valid(const struct pma_region *region)
{
/* Region size must be power-of-two,
* and greater or equal to the minimum
* PMA region size. Start address of the
* region must align with size.
*/
int region_is_valid =
((region->size & (region->size - 1)) == 0U)
&&
(region->size >= CONFIG_SOC_ANDES_V5_PMA_REGION_MIN_ALIGN_AND_SIZE)
&&
((region->start & (region->size - 1)) == 0U);
/* Region size must greater or equal to the minimum PMA region size */
if (region->size < CONFIG_SOC_ANDES_V5_PMA_REGION_MIN_ALIGN_AND_SIZE) {
return -EINVAL;
}
if (!region_is_valid) {
/* Region size must be power-of-two */
if (region->size & (region->size - 1)) {
return -EINVAL;
}
/* Start address of the region must align with size */
if (region->start & (region->size - 1)) {
return -EINVAL;
}
@ -213,11 +172,13 @@ static void configure_nocache_region(void)
.attr = {PMACFG_MTYPE_MEMORY_NOCACHE_BUFFERABLE},
};
if (nocache_region.size != 0) {
if (pma_region_is_valid(&nocache_region) == -EINVAL) {
__ASSERT(0, "Configuring PMA region of nocache region failed\n");
if (pma_region_is_valid(&nocache_region)) {
/* Skip PMA configuration if nocache region size is 0 */
if (nocache_region.size != 0) {
__ASSERT(0, "Configuring PMA region of nocache region "
"failed\n");
}
} else {
/* Initialize nocache region at PMA region 0 */
region_init(0, &nocache_region);
}
@ -242,7 +203,7 @@ static int pma_init(void)
{
unsigned long mmsc_cfg;
__asm__ volatile ("csrr %0, %1" : "=r" (mmsc_cfg) : "i" (NDS_MMSC_CFG));
mmsc_cfg = csr_read(NDS_MMSC_CFG);
if (!(mmsc_cfg & MMSC_CFG_PPMA)) {
/* This CPU doesn't support PMA */
@ -250,7 +211,8 @@ static int pma_init(void)
__ASSERT(0, "CPU doesn't support PMA. "
"Please disable CONFIG_SOC_ANDES_V5_PMA\n");
#ifndef CONFIG_ASSERT
LOG_ERR("CPU doesn't support PMA. Please disable CONFIG_SOC_ANDES_V5_PMA");
LOG_ERR("CPU doesn't support PMA. "
"Please disable CONFIG_SOC_ANDES_V5_PMA");
#endif
return -ENODEV;
}
@ -260,5 +222,4 @@ static int pma_init(void)
return 0;
}
SYS_INIT(pma_init, PRE_KERNEL_2,
CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
SYS_INIT(pma_init, PRE_KERNEL_2, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);