ANDROID: KVM: arm64: Allocate host fp/simd state later in initialization

Allocate and map hyp memory to maintain the host's fp/simd state,
which is also used for SVE and SME, later in the initialization
process. The amount of memory needed to track the host's state
varies depending on the number of cpus in the system, whether
there's SVE support, as well as the SVE vector size. Much of the
state needed to extract this information isn't initialized yet at
kvm_hyp_reserve().

Fixes: 6dc9af85f7 ("ANDROID: KVM: arm64: Allocate host fp state at pkvm init rather than per cpu")
Bug: 303684934
Signed-off-by: Fuad Tabba <tabba@google.com>
Change-Id: I744be685a107ddd92c6975bafb0149aebad7bb55
This commit is contained in:
Fuad Tabba 2023-10-10 15:59:18 +01:00
parent 83ebd50235
commit 6334225e9b
7 changed files with 60 additions and 30 deletions

View file

@ -259,6 +259,8 @@ extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[];
DECLARE_KVM_NVHE_SYM(__per_cpu_start);
DECLARE_KVM_NVHE_SYM(__per_cpu_end);
extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_host_fp_state)[];
DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)

View file

@ -414,10 +414,4 @@ static inline size_t pkvm_host_fp_state_size(void)
return sizeof(struct user_fpsimd_state);
}
static inline unsigned long hyp_host_fp_pages(unsigned long nr_cpus)
{
return PAGE_ALIGN(size_mul(nr_cpus, pkvm_host_fp_state_size())) >>
PAGE_SHIFT;
}
#endif /* __ARM64_KVM_PKVM_H__ */

View file

@ -1642,6 +1642,11 @@ static unsigned long nvhe_percpu_order(void)
return size ? get_order(size) : 0;
}
static inline size_t pkvm_host_fp_state_order(void)
{
return get_order(pkvm_host_fp_state_size());
}
/* A lookup table holding the hypervisor VA for each vector slot */
static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS];
@ -2006,6 +2011,8 @@ static void teardown_hyp_mode(void)
for_each_possible_cpu(cpu) {
free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
free_pages(kvm_nvhe_sym(kvm_arm_hyp_host_fp_state)[cpu],
pkvm_host_fp_state_order());
}
}
@ -2092,6 +2099,34 @@ static int kvm_hyp_init_protection(u32 hyp_va_bits)
return 0;
}
static int init_pkvm_host_fp_state(void)
{
int cpu;
if (!is_protected_kvm_enabled())
return 0;
/* Allocate pages for protected-mode host-fp state. */
for_each_possible_cpu(cpu) {
struct page *page;
unsigned long addr;
page = alloc_pages(GFP_KERNEL, pkvm_host_fp_state_order());
if (!page)
return -ENOMEM;
addr = (unsigned long)page_address(page);
kvm_nvhe_sym(kvm_arm_hyp_host_fp_state)[cpu] = addr;
}
/*
* Don't map the pages in hyp since these are only used in protected
* mode, which will (re)create its own mapping when initialized.
*/
return 0;
}
/**
* Inits Hyp-mode on all online CPUs
*/
@ -2259,6 +2294,10 @@ static int init_hyp_mode(void)
cpu_prepare_hyp_mode(cpu);
}
err = init_pkvm_host_fp_state();
if (err)
goto out_err;
kvm_hyp_init_symbols();
/* TODO: Real .h interface */

View file

@ -82,8 +82,6 @@ struct pkvm_hyp_vm {
struct pkvm_hyp_vcpu *vcpus[];
};
extern void *host_fp_state;
static inline struct pkvm_hyp_vm *
pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu *hyp_vcpu)
{
@ -107,7 +105,6 @@ extern phys_addr_t pvmfw_base;
extern phys_addr_t pvmfw_size;
void pkvm_hyp_vm_table_init(void *tbl);
void pkvm_hyp_host_fp_init(void *host_fp);
int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
unsigned long pgd_hva, unsigned long last_ran_hva);

View file

@ -41,17 +41,11 @@ static DEFINE_PER_CPU(struct pkvm_hyp_vcpu *, loaded_hyp_vcpu);
*
* Only valid when (fp_state == FP_STATE_GUEST_OWNED) in the hyp vCPU structure.
*/
void *host_fp_state;
unsigned long __ro_after_init kvm_arm_hyp_host_fp_state[NR_CPUS];
static void *__get_host_fpsimd_bytes(void)
{
void *state = host_fp_state +
size_mul(pkvm_host_fp_state_size(), hyp_smp_processor_id());
if (state < host_fp_state)
return NULL;
return state;
return kern_hyp_va((void *) kvm_arm_hyp_host_fp_state[hyp_smp_processor_id()]);
}
struct user_fpsimd_state *get_host_fpsimd_state(struct kvm_vcpu *vcpu)
@ -295,12 +289,6 @@ void pkvm_hyp_vm_table_init(void *tbl)
vm_table = tbl;
}
void pkvm_hyp_host_fp_init(void *host_fp)
{
WARN_ON(host_fp_state);
host_fp_state = host_fp;
}
/*
* Return the hyp vm structure corresponding to the handle.
*/

View file

@ -34,7 +34,6 @@ static void *vm_table_base;
static void *hyp_pgt_base;
static void *host_s2_pgt_base;
static void *ffa_proxy_pages;
static void *hyp_host_fp_base;
static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
static struct hyp_pool hpool;
@ -69,10 +68,21 @@ static int divide_memory_pool(void *virt, unsigned long size)
if (!ffa_proxy_pages)
return -ENOMEM;
nr_pages = hyp_host_fp_pages(hyp_nr_cpus);
hyp_host_fp_base = hyp_early_alloc_contig(nr_pages);
if (!hyp_host_fp_base)
return -ENOMEM;
return 0;
}
static int create_hyp_host_fp_mappings(void)
{
void *start, *end;
int ret, i;
for (i = 0; i < hyp_nr_cpus; i++) {
start = (void *)kern_hyp_va(kvm_arm_hyp_host_fp_state[i]);
end = start + PAGE_ALIGN(pkvm_host_fp_state_size());
ret = pkvm_create_mappings(start, end, PAGE_HYP);
if (ret)
return ret;
}
return 0;
}
@ -164,6 +174,8 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE);
}
create_hyp_host_fp_mappings();
/*
* Map the host sections RO in the hypervisor, but transfer the
* ownership from the host to the hypervisor itself to make sure they
@ -405,7 +417,6 @@ void __noreturn __pkvm_init_finalise(void)
goto out;
pkvm_hyp_vm_table_init(vm_table_base);
pkvm_hyp_host_fp_init(hyp_host_fp_base);
out:
/*
* We tail-called to here from handle___pkvm_init() and will not return,

View file

@ -173,7 +173,6 @@ void __init kvm_hyp_reserve(void)
hyp_mem_pages += hyp_vm_table_pages();
hyp_mem_pages += hyp_vmemmap_pages(STRUCT_HYP_PAGE_SIZE);
hyp_mem_pages += hyp_ffa_proxy_pages();
hyp_mem_pages += hyp_host_fp_pages(num_possible_cpus());
/*
* Try to allocate a PMD-aligned region to reduce TLB pressure once