diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 6755a0d9f3f0..60e834c85ea7 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -259,6 +259,8 @@ extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[]; DECLARE_KVM_NVHE_SYM(__per_cpu_start); DECLARE_KVM_NVHE_SYM(__per_cpu_end); +extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_host_fp_state)[]; + DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs); #define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs) diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h index a1459fbe2d46..3077c2514a6d 100644 --- a/arch/arm64/include/asm/kvm_pkvm.h +++ b/arch/arm64/include/asm/kvm_pkvm.h @@ -414,10 +414,4 @@ static inline size_t pkvm_host_fp_state_size(void) return sizeof(struct user_fpsimd_state); } -static inline unsigned long hyp_host_fp_pages(unsigned long nr_cpus) -{ - return PAGE_ALIGN(size_mul(nr_cpus, pkvm_host_fp_state_size())) >> - PAGE_SHIFT; -} - #endif /* __ARM64_KVM_PKVM_H__ */ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index c1d58bf4f76e..de68a5c2cfa2 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1642,6 +1642,11 @@ static unsigned long nvhe_percpu_order(void) return size ? get_order(size) : 0; } +static inline size_t pkvm_host_fp_state_order(void) +{ + return get_order(pkvm_host_fp_state_size()); +} + /* A lookup table holding the hypervisor VA for each vector slot */ static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS]; @@ -2006,6 +2011,8 @@ static void teardown_hyp_mode(void) for_each_possible_cpu(cpu) { free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order()); + free_pages(kvm_nvhe_sym(kvm_arm_hyp_host_fp_state)[cpu], + pkvm_host_fp_state_order()); } } @@ -2092,6 +2099,34 @@ static int kvm_hyp_init_protection(u32 hyp_va_bits) return 0; } +static int init_pkvm_host_fp_state(void) +{ + int cpu; + + if (!is_protected_kvm_enabled()) + return 0; + + /* Allocate pages for protected-mode host-fp state. */ + for_each_possible_cpu(cpu) { + struct page *page; + unsigned long addr; + + page = alloc_pages(GFP_KERNEL, pkvm_host_fp_state_order()); + if (!page) + return -ENOMEM; + + addr = (unsigned long)page_address(page); + kvm_nvhe_sym(kvm_arm_hyp_host_fp_state)[cpu] = addr; + } + + /* + * Don't map the pages in hyp since these are only used in protected + * mode, which will (re)create its own mapping when initialized. + */ + + return 0; +} + /** * Inits Hyp-mode on all online CPUs */ @@ -2259,6 +2294,10 @@ static int init_hyp_mode(void) cpu_prepare_hyp_mode(cpu); } + err = init_pkvm_host_fp_state(); + if (err) + goto out_err; + kvm_hyp_init_symbols(); /* TODO: Real .h interface */ diff --git a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h index b11fd818fff9..943cf7fc7124 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h +++ b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h @@ -82,8 +82,6 @@ struct pkvm_hyp_vm { struct pkvm_hyp_vcpu *vcpus[]; }; -extern void *host_fp_state; - static inline struct pkvm_hyp_vm * pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu *hyp_vcpu) { @@ -107,7 +105,6 @@ extern phys_addr_t pvmfw_base; extern phys_addr_t pvmfw_size; void pkvm_hyp_vm_table_init(void *tbl); -void pkvm_hyp_host_fp_init(void *host_fp); int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva, unsigned long pgd_hva, unsigned long last_ran_hva); diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index bb25de654934..8a74afc5da5f 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -41,17 +41,11 @@ static DEFINE_PER_CPU(struct pkvm_hyp_vcpu *, loaded_hyp_vcpu); * * Only valid when (fp_state == FP_STATE_GUEST_OWNED) in the hyp vCPU structure. */ -void *host_fp_state; +unsigned long __ro_after_init kvm_arm_hyp_host_fp_state[NR_CPUS]; static void *__get_host_fpsimd_bytes(void) { - void *state = host_fp_state + - size_mul(pkvm_host_fp_state_size(), hyp_smp_processor_id()); - - if (state < host_fp_state) - return NULL; - - return state; + return kern_hyp_va((void *) kvm_arm_hyp_host_fp_state[hyp_smp_processor_id()]); } struct user_fpsimd_state *get_host_fpsimd_state(struct kvm_vcpu *vcpu) @@ -295,12 +289,6 @@ void pkvm_hyp_vm_table_init(void *tbl) vm_table = tbl; } -void pkvm_hyp_host_fp_init(void *host_fp) -{ - WARN_ON(host_fp_state); - host_fp_state = host_fp; -} - /* * Return the hyp vm structure corresponding to the handle. */ diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c index c0f502c5edb2..ac3a0dc79083 100644 --- a/arch/arm64/kvm/hyp/nvhe/setup.c +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -34,7 +34,6 @@ static void *vm_table_base; static void *hyp_pgt_base; static void *host_s2_pgt_base; static void *ffa_proxy_pages; -static void *hyp_host_fp_base; static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops; static struct hyp_pool hpool; @@ -69,10 +68,21 @@ static int divide_memory_pool(void *virt, unsigned long size) if (!ffa_proxy_pages) return -ENOMEM; - nr_pages = hyp_host_fp_pages(hyp_nr_cpus); - hyp_host_fp_base = hyp_early_alloc_contig(nr_pages); - if (!hyp_host_fp_base) - return -ENOMEM; + return 0; +} + +static int create_hyp_host_fp_mappings(void) +{ + void *start, *end; + int ret, i; + + for (i = 0; i < hyp_nr_cpus; i++) { + start = (void *)kern_hyp_va(kvm_arm_hyp_host_fp_state[i]); + end = start + PAGE_ALIGN(pkvm_host_fp_state_size()); + ret = pkvm_create_mappings(start, end, PAGE_HYP); + if (ret) + return ret; + } return 0; } @@ -164,6 +174,8 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size, params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE); } + create_hyp_host_fp_mappings(); + /* * Map the host sections RO in the hypervisor, but transfer the * ownership from the host to the hypervisor itself to make sure they @@ -405,7 +417,6 @@ void __noreturn __pkvm_init_finalise(void) goto out; pkvm_hyp_vm_table_init(vm_table_base); - pkvm_hyp_host_fp_init(hyp_host_fp_base); out: /* * We tail-called to here from handle___pkvm_init() and will not return, diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c index 789753790a28..1cff760b8f23 100644 --- a/arch/arm64/kvm/pkvm.c +++ b/arch/arm64/kvm/pkvm.c @@ -173,7 +173,6 @@ void __init kvm_hyp_reserve(void) hyp_mem_pages += hyp_vm_table_pages(); hyp_mem_pages += hyp_vmemmap_pages(STRUCT_HYP_PAGE_SIZE); hyp_mem_pages += hyp_ffa_proxy_pages(); - hyp_mem_pages += hyp_host_fp_pages(num_possible_cpus()); /* * Try to allocate a PMD-aligned region to reduce TLB pressure once