drivers: xen: gnttab: limit number of grant frames via config

Xen allocates a region that should be used as a place for grant table
mapping and passes it via the device tree. By design, this region may
be quite large (up to 4096+ frames/pages), but the number of frames is
usually limited by the max_grant_frames domain parameter (usually 32 or
64).

Linux maps these frames on demand and when reaches mentioned limit
it just stops expanding. At the same time, previous implementation of
Zephyr gnttab driver calculated the number of grant frames by dividing
whole region by page size and tried to map it during init. If the
region specified in the device tree was larger than the
max_grant_frames set by Xen, it would fail on ASSERT, since Xen would
return an error.

To address these issues CONFIG_NR_GRANT_FRAMES was introduced. It
allows to limit size of grant table and map only required number of
pages. Additionally, a check for max_grant_frames Xen limit was
introduced to initialization - if this value will be less than
CONFIG_NR_GRANT_FRAMES, k_panic() will be called.

Signed-off-by: Dmytro Firsov <dmytro_firsov@epam.com>
This commit is contained in:
Dmytro Firsov 2024-05-14 20:53:41 +03:00 committed by Benjamin Cabé
parent 0e21b25245
commit cd31a41328
2 changed files with 53 additions and 15 deletions

View File

@ -1,5 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2022-2023 EPAM Systems
# Copyright (c) 2022-2024 EPAM Systems
if XEN
@ -20,6 +20,15 @@ config XEN_GRANT_TABLE_INIT_PRIORITY
depends on XEN_GRANT_TABLE
default 50
config NR_GRANT_FRAMES
int "Number of grant frames mapped to Zephyr"
depends on XEN_GRANT_TABLE
default 1
help
This value configures how much grant frames will be supported by Zephyr
grant table driver in runtime. This value should be <= max_grant_frames
configured for domain in Xen hypervisor.
endmenu
endif # XEN

View File

@ -36,16 +36,12 @@ LOG_MODULE_REGISTER(xen_gnttab);
#define GOP_RETRY_DELAY 200
#define GNTTAB_GREF_USED (UINT32_MAX - 1)
#define GNTTAB_SIZE DT_REG_SIZE_BY_IDX(DT_INST(0, xen_xen), 0)
BUILD_ASSERT(!(GNTTAB_SIZE % XEN_PAGE_SIZE), "Size of gnttab have to be aligned on XEN_PAGE_SIZE");
/* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
#define NR_GRANT_FRAMES (GNTTAB_SIZE / XEN_PAGE_SIZE)
#define NR_GRANT_ENTRIES \
(NR_GRANT_FRAMES * XEN_PAGE_SIZE / sizeof(grant_entry_v1_t))
#define GNTTAB_SIZE (CONFIG_NR_GRANT_FRAMES * XEN_PAGE_SIZE)
#define NR_GRANT_ENTRIES (GNTTAB_SIZE / sizeof(grant_entry_v1_t))
BUILD_ASSERT(GNTTAB_SIZE <= DT_REG_SIZE_BY_IDX(DT_INST(0, xen_xen), 0),
"Number of grant frames is bigger than grant table DT region!");
BUILD_ASSERT(GNTTAB_SIZE <= CONFIG_KERNEL_VM_SIZE);
DEVICE_MMIO_TOPLEVEL_STATIC(grant_tables, DT_INST(0, xen_xen));
static struct gnttab {
struct k_sem sem;
@ -303,13 +299,39 @@ const char *gnttabop_error(int16_t status)
}
}
/* Picked from Linux implementation */
#define LEGACY_MAX_GNT_FRAMES_SUPPORTED 4
static unsigned long gnttab_get_max_frames(void)
{
int ret;
struct gnttab_query_size q = {
.dom = DOMID_SELF,
};
ret = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &q, 1);
if ((ret < 0) || (q.status != GNTST_okay)) {
return LEGACY_MAX_GNT_FRAMES_SUPPORTED;
}
return q.max_nr_frames;
}
static int gnttab_init(void)
{
grant_ref_t gref;
struct xen_add_to_physmap xatp;
struct gnttab_setup_table setup;
xen_pfn_t frames[NR_GRANT_FRAMES];
xen_pfn_t frames[CONFIG_NR_GRANT_FRAMES];
int rc = 0, i;
unsigned long xen_max_grant_frames;
uintptr_t gnttab_base = DT_REG_ADDR_BY_IDX(DT_INST(0, xen_xen), 0);
mm_reg_t gnttab_reg;
xen_max_grant_frames = gnttab_get_max_frames();
if (xen_max_grant_frames < CONFIG_NR_GRANT_FRAMES) {
LOG_ERR("Xen max_grant_frames is less than CONFIG_NR_GRANT_FRAMES!");
k_panic();
}
/* Will be taken/given during gnt_refs allocation/release */
k_sem_init(&gnttab.sem, NR_GRANT_ENTRIES - GNTTAB_NR_RESERVED_ENTRIES,
@ -322,25 +344,32 @@ static int gnttab_init(void)
gnttab.gref_list[gref] = gref + 1;
}
for (i = 0; i < NR_GRANT_FRAMES; i++) {
for (i = 0; i < CONFIG_NR_GRANT_FRAMES; i++) {
xatp.domid = DOMID_SELF;
xatp.size = 0;
xatp.space = XENMAPSPACE_grant_table;
xatp.idx = i;
xatp.gpfn = xen_virt_to_gfn(Z_TOPLEVEL_ROM_NAME(grant_tables).phys_addr) + i;
xatp.gpfn = xen_virt_to_gfn(gnttab_base) + i;
rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
__ASSERT(!rc, "add_to_physmap failed; status = %d\n", rc);
}
setup.dom = DOMID_SELF;
setup.nr_frames = NR_GRANT_FRAMES;
setup.nr_frames = CONFIG_NR_GRANT_FRAMES;
set_xen_guest_handle(setup.frame_list, frames);
rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
__ASSERT((!rc) && (!setup.status), "Table setup failed; status = %s\n",
gnttabop_error(setup.status));
DEVICE_MMIO_TOPLEVEL_MAP(grant_tables, K_MEM_CACHE_WB | K_MEM_PERM_RW);
gnttab.table = (grant_entry_v1_t *)DEVICE_MMIO_TOPLEVEL_GET(grant_tables);
/*
* Xen DT region reserved for grant table (first reg in hypervisor node)
* may be much bigger than CONFIG_NR_GRANT_FRAMES multiplied by page size.
* Thus, we need to map only part of region, that is limited by config.
* The size of this part is calculated in GNTTAB_SIZE macro and used as
* parameter for device_map()
*/
device_map(&gnttab_reg, gnttab_base, GNTTAB_SIZE, K_MEM_CACHE_WB | K_MEM_PERM_RW);
gnttab.table = (grant_entry_v1_t *)gnttab_reg;
LOG_DBG("%s: grant table mapped\n", __func__);