ANDROID: KVM: arm64: Pre-populate host stage2

We will soon attempt to avoid any memory allocations from the host mem
abort path. In order to pave the way towards supporting this, let's
pre-populate the host stage-2 for the entire address space using as many
block mappings as possible. Some of these mappings may need to be
collapsed shortly after from fix_host_ownership() for example, so this
doesn't guarantee the absence of memory aborts altogether, but helps
getting the structure of the page-table in the right shape early on.

Bug: 264070847
Change-Id: Ib3ce25c893f779437ce473d64e08e8876870556c
Signed-off-by: Quentin Perret <qperret@google.com>
This commit is contained in:
Quentin Perret 2023-04-03 13:41:02 +00:00 committed by Carlos Llamas
parent 0b6736459a
commit 04ddc7eec0

View file

@ -146,6 +146,27 @@ static void prepare_host_vtcr(void)
id_aa64mmfr1_el1_sys_val, phys_shift);
}
static int prepopulate_host_stage2(void)
{
struct memblock_region *reg;
u64 addr = 0;
int i, ret;
for (i = 0; i < hyp_memblock_nr; i++) {
reg = &hyp_memory[i];
ret = host_stage2_idmap_locked(addr, reg->base - addr, PKVM_HOST_MMIO_PROT, false);
if (ret)
return ret;
ret = host_stage2_idmap_locked(reg->base, reg->size, PKVM_HOST_MEM_PROT, false);
if (ret)
return ret;
addr = reg->base + reg->size;
}
return host_stage2_idmap_locked(addr, BIT(host_mmu.pgt.ia_bits) - addr, PKVM_HOST_MMIO_PROT,
false);
}
int kvm_host_prepare_stage2(void *pgt_pool_base)
{
struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu;
@ -172,7 +193,7 @@ int kvm_host_prepare_stage2(void *pgt_pool_base)
mmu->pgt = &host_mmu.pgt;
atomic64_set(&mmu->vmid.id, 0);
return 0;
return prepopulate_host_stage2();
}
static bool guest_stage2_force_pte_cb(u64 addr, u64 end,