diff --git a/arch/x86/core/acpi.c b/arch/x86/core/acpi.c index 6e553e711a4..d451b7ee506 100644 --- a/arch/x86/core/acpi.c +++ b/arch/x86/core/acpi.c @@ -35,7 +35,7 @@ static void find_rsdp(void) } if (zero_page_base == NULL) { - z_mem_map(&zero_page_base, 0, 4096, K_MEM_PERM_RW); + z_phys_map(&zero_page_base, 0, 4096, K_MEM_PERM_RW); } /* Physical (real mode!) address 0000:040e stores a (real diff --git a/drivers/pcie/host/msi.c b/drivers/pcie/host/msi.c index 19c1d7b88ff..49cf95a9ce9 100644 --- a/drivers/pcie/host/msi.c +++ b/drivers/pcie/host/msi.c @@ -67,9 +67,9 @@ static bool map_msix_table_entries(pcie_bdf_t bdf, return false; } - z_mem_map((uint8_t **)&mapped_table, - bar.phys_addr + table_offset, - bar.size, K_MEM_PERM_RW); + z_phys_map((uint8_t **)&mapped_table, + bar.phys_addr + table_offset, + bar.size, K_MEM_PERM_RW); for (i = 0; i < n_vector; i++) { vectors[i].msix_vector = (struct msix_vector *) diff --git a/include/sys/device_mmio.h b/include/sys/device_mmio.h index 721491ddf39..19bf7080b32 100644 --- a/include/sys/device_mmio.h +++ b/include/sys/device_mmio.h @@ -88,8 +88,8 @@ static inline void device_map(mm_reg_t *virt_addr, uintptr_t phys_addr, /* Pass along flags and add that we want supervisor mode * read-write access. */ - z_mem_map((uint8_t **)virt_addr, phys_addr, size, - flags | K_MEM_PERM_RW); + z_phys_map((uint8_t **)virt_addr, phys_addr, size, + flags | K_MEM_PERM_RW); #else ARG_UNUSED(size); ARG_UNUSED(flags); diff --git a/include/sys/mem_manage.h b/include/sys/mem_manage.h index acc7a1d790c..faa903b0f85 100644 --- a/include/sys/mem_manage.h +++ b/include/sys/mem_manage.h @@ -79,13 +79,13 @@ extern "C" { * This API is part of infrastructure still under development and may * change. * - * @param linear_addr [out] Output linear address storage location - * @param phys_addr Physical address base of the memory region + * @param virt [out] Output virtual address storage location + * @param phys Physical address base of the memory region * @param size Size of the memory region * @param flags Caching mode and access flags, see K_MAP_* macros */ -void z_mem_map(uint8_t **linear_addr, uintptr_t phys_addr, size_t size, - uint32_t flags); +void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size, + uint32_t flags); /** * Given an arbitrary region, provide a aligned region that covers it diff --git a/kernel/include/kernel_arch_interface.h b/kernel/include/kernel_arch_interface.h index 42880ce82bd..c784852e809 100644 --- a/kernel/include/kernel_arch_interface.h +++ b/kernel/include/kernel_arch_interface.h @@ -244,8 +244,6 @@ static inline bool arch_is_in_isr(void); * This API is part of infrastructure still under development and may * change. * - * @see z_mem_map() - * * @param dest Page-aligned Destination virtual address to map * @param addr Page-aligned Source physical address to map * @param size Page-aligned size of the mapped memory region in bytes diff --git a/kernel/mmu.c b/kernel/mmu.c index 31fc786e45f..b6180614f20 100644 --- a/kernel/mmu.c +++ b/kernel/mmu.c @@ -48,7 +48,7 @@ static struct k_spinlock mm_lock; /* Current position for memory mappings in kernel memory. * At the moment, all kernel memory mappings are permanent. - * z_mem_map() mappings start at the end of the address space, and grow + * Memory mappings start at the end of the address space, and grow * downward. * * All of this is under heavy development and is subject to change. @@ -79,8 +79,7 @@ size_t k_mem_region_align(uintptr_t *aligned_addr, size_t *aligned_size, return addr_offset; } -void z_mem_map(uint8_t **virt_addr, uintptr_t phys_addr, size_t size, - uint32_t flags) +void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size, uint32_t flags) { uintptr_t aligned_addr, addr_offset; size_t aligned_size; @@ -89,7 +88,7 @@ void z_mem_map(uint8_t **virt_addr, uintptr_t phys_addr, size_t size, uint8_t *dest_virt; addr_offset = k_mem_region_align(&aligned_addr, &aligned_size, - phys_addr, size, + phys, size, CONFIG_MMU_PAGE_SIZE); key = k_spin_lock(&mm_lock); @@ -120,7 +119,7 @@ void z_mem_map(uint8_t **virt_addr, uintptr_t phys_addr, size_t size, k_spin_unlock(&mm_lock, key); if (ret == 0) { - *virt_addr = dest_virt + addr_offset; + *virt_ptr = dest_virt + addr_offset; } else { /* This happens if there is an insurmountable problem * with the selected cache modes or access flags @@ -133,6 +132,6 @@ void z_mem_map(uint8_t **virt_addr, uintptr_t phys_addr, size_t size, return; fail: LOG_ERR("memory mapping 0x%lx (size %zu, flags 0x%x) failed", - phys_addr, size, flags); + phys, size, flags); k_panic(); } diff --git a/tests/kernel/mem_protect/mem_map/src/main.c b/tests/kernel/mem_protect/mem_map/src/main.c index 147e92d8805..27cb4cc7c27 100644 --- a/tests/kernel/mem_protect/mem_map/src/main.c +++ b/tests/kernel/mem_protect/mem_map/src/main.c @@ -32,7 +32,7 @@ void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) } -/* z_mem_map() doesn't have alignment requirements, any oddly-sized buffer +/* z_phys_map() doesn't have alignment requirements, any oddly-sized buffer * can get mapped. This will span two pages. */ #define BUF_SIZE 5003 @@ -43,7 +43,7 @@ void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) * * @ingroup kernel_memprotect_tests */ -void test_z_mem_map_rw(void) +void test_z_phys_map_rw(void) { uint8_t *mapped_rw, *mapped_ro; uint8_t *buf = test_page + BUF_OFFSET; @@ -51,8 +51,8 @@ void test_z_mem_map_rw(void) expect_fault = false; /* Map in a page that allows writes */ - z_mem_map(&mapped_rw, (uintptr_t)buf, - BUF_SIZE, BASE_FLAGS | K_MEM_PERM_RW); + z_phys_map(&mapped_rw, (uintptr_t)buf, + BUF_SIZE, BASE_FLAGS | K_MEM_PERM_RW); /* Initialize buf with some bytes */ for (int i = 0; i < BUF_SIZE; i++) { @@ -60,8 +60,8 @@ void test_z_mem_map_rw(void) } /* Map again this time only allowing reads */ - z_mem_map(&mapped_ro, (uintptr_t)buf, - BUF_SIZE, BASE_FLAGS); + z_phys_map(&mapped_ro, (uintptr_t)buf, + BUF_SIZE, BASE_FLAGS); /* Check that the mapped area contains the expected data. */ for (int i = 0; i < BUF_SIZE; i++) { @@ -88,7 +88,7 @@ static void transplanted_function(bool *executed) * * @ingroup kernel_memprotect_tests */ -void test_z_mem_map_exec(void) +void test_z_phys_map_exec(void) { uint8_t *mapped_rw, *mapped_exec, *mapped_ro; bool executed = false; @@ -97,22 +97,22 @@ void test_z_mem_map_exec(void) expect_fault = false; /* Map with write permissions and copy the function into the page */ - z_mem_map(&mapped_rw, (uintptr_t)test_page, - sizeof(test_page), BASE_FLAGS | K_MEM_PERM_RW); + z_phys_map(&mapped_rw, (uintptr_t)test_page, + sizeof(test_page), BASE_FLAGS | K_MEM_PERM_RW); memcpy(mapped_rw, (void *)&transplanted_function, CONFIG_MMU_PAGE_SIZE); /* Now map with execution enabled and try to run the copied fn */ - z_mem_map(&mapped_exec, (uintptr_t)test_page, - sizeof(test_page), BASE_FLAGS | K_MEM_PERM_EXEC); + z_phys_map(&mapped_exec, (uintptr_t)test_page, + sizeof(test_page), BASE_FLAGS | K_MEM_PERM_EXEC); func = (void (*)(bool *executed))mapped_exec; func(&executed); zassert_true(executed, "function did not execute"); /* Now map without execution and execution should now fail */ - z_mem_map(&mapped_ro, (uintptr_t)test_page, - sizeof(test_page), BASE_FLAGS); + z_phys_map(&mapped_ro, (uintptr_t)test_page, + sizeof(test_page), BASE_FLAGS); func = (void (*)(bool *executed))mapped_ro; expect_fault = true; @@ -122,7 +122,7 @@ void test_z_mem_map_exec(void) ztest_test_fail(); } #else -void test_z_mem_map_exec(void) +void test_z_phys_map_exec(void) { ztest_test_skip(); } @@ -133,18 +133,18 @@ void test_z_mem_map_exec(void) * * @ingroup kernel_memprotect_tests */ -void test_z_mem_map_side_effect(void) +void test_z_phys_map_side_effect(void) { uint8_t *mapped; expect_fault = false; - /* z_mem_map() is supposed to always create fresh mappings. + /* z_phys_map() is supposed to always create fresh mappings. * Show that by mapping test_page to an RO region, we can still * modify test_page. */ - z_mem_map(&mapped, (uintptr_t)test_page, - sizeof(test_page), BASE_FLAGS); + z_phys_map(&mapped, (uintptr_t)test_page, + sizeof(test_page), BASE_FLAGS); /* Should NOT fault */ test_page[0] = 42; @@ -160,9 +160,9 @@ void test_z_mem_map_side_effect(void) void test_main(void) { ztest_test_suite(test_mem_map, - ztest_unit_test(test_z_mem_map_rw), - ztest_unit_test(test_z_mem_map_exec), - ztest_unit_test(test_z_mem_map_side_effect) + ztest_unit_test(test_z_phys_map_rw), + ztest_unit_test(test_z_phys_map_exec), + ztest_unit_test(test_z_phys_map_side_effect) ); ztest_run_test_suite(test_mem_map); }