arch: arm64: mmu: avoid using of set/way cache instructions

Architecturally, Set/Way operations are not guaranteed to affect all
caches prior to the PoC, and may require other IMPLEMENTATION DEFINED
maintenance (e.g. MMIO control of system-level caches).

First of all this patch was designed for Xen domain Zephyr build, set/way
ops are not easily virtualized by Xen. S/W emulation is disabled, because
IP-MMU is active for Dom0. IP-MMU is a IO-MMU made by Renesas, as any good
IO-MMU, it shares page-tables with CPU. Trying to emulate S/W with IP-MMU
active will lead to IO-MMU faults. So if we build Zephyr as a Xen Initial
domain, it won't work with cache management support enabled.

Exposing set/way cache maintenance to a virtual machine is unsafe, not
least because the instructions are not permission-checked, but also
because they are not broadcast between CPUs.

In this commit, VA data invalidate invoked after every mapping instead of
using set/way instructions on init MMU. So, it was easy to delete
sys_cache_data_invd_all from enable MMU function, becase every adding of
a new memory region to xclat tabes will cause invalidating of this memory
and in this way we sure that there are not any stale data inside.

Signed-off-by: Mykola Kvach <mykola_kvach@epam.com>
This commit is contained in:
Mykola Kvach 2023-06-21 13:53:10 +03:00 committed by Carles Cufí
parent 2cdacb3fa8
commit c9b534c4eb

View File

@ -729,6 +729,13 @@ static inline void add_arm_mmu_region(struct arm_mmu_ptables *ptables,
}
}
static inline void inv_dcache_after_map_helper(void *virt, size_t size, uint32_t attrs)
{
if (MT_TYPE(attrs) == MT_NORMAL || MT_TYPE(attrs) == MT_NORMAL_WT) {
sys_cache_data_invd_range(virt, size);
}
}
static void setup_page_tables(struct arm_mmu_ptables *ptables)
{
unsigned int index;
@ -767,6 +774,20 @@ static void setup_page_tables(struct arm_mmu_ptables *ptables)
}
invalidate_tlb_all();
for (index = 0U; index < ARRAY_SIZE(mmu_zephyr_ranges); index++) {
size_t size;
range = &mmu_zephyr_ranges[index];
size = POINTER_TO_UINT(range->end) - POINTER_TO_UINT(range->start);
inv_dcache_after_map_helper(range->start, size, range->attrs);
}
for (index = 0U; index < mmu_config.num_regions; index++) {
region = &mmu_config.mmu_regions[index];
inv_dcache_after_map_helper(UINT_TO_POINTER(region->base_va), region->size,
region->attrs);
}
}
/* Translation table control register settings */
@ -815,9 +836,6 @@ static void enable_mmu_el1(struct arm_mmu_ptables *ptables, unsigned int flags)
/* Ensure these changes are seen before MMU is enabled */
barrier_isync_fence_full();
/* Invalidate all data caches before enable them */
sys_cache_data_invd_all();
/* Enable the MMU and data cache */
val = read_sctlr_el1();
write_sctlr_el1(val | SCTLR_M_BIT | SCTLR_C_BIT);
@ -955,8 +973,19 @@ void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
LOG_ERR("__arch_mem_map() returned %d", ret);
k_panic();
} else {
uint32_t mem_flags = flags & K_MEM_CACHE_MASK;
sync_domains((uintptr_t)virt, size);
invalidate_tlb_all();
switch (mem_flags) {
case K_MEM_CACHE_WB:
case K_MEM_CACHE_WT:
mem_flags = (mem_flags == K_MEM_CACHE_WB) ? MT_NORMAL : MT_NORMAL_WT;
inv_dcache_after_map_helper(virt, size, mem_flags);
default:
break;
}
}
}
@ -1077,6 +1106,7 @@ static int private_map(struct arm_mmu_ptables *ptables, const char *name,
__ASSERT(ret == 0, "add_map() returned %d", ret);
invalidate_tlb_all();
inv_dcache_after_map_helper(UINT_TO_POINTER(virt), size, attrs);
return ret;
}