ANDROID: KVM: arm64: Allocate host fp state at pkvm init rather than per cpu

Subsequent patches will augment this state to allocate space for
tracking the host sve state. SVE state size is not static, and
there isn't support for dynamic per_cpu allocation in hyp.

This is done as a first step in allowing us to allocate SVE state
under the same umbrella.

Signed-off-by: Fuad Tabba <tabba@google.com>

Bug: 267291591
Change-Id: I0902623a5ab81a80105f5b00a26765d257bc1ceb
This commit is contained in:
Fuad Tabba 2023-02-13 09:40:18 +00:00 committed by Lee Jones
parent dbe1e94b31
commit 6dc9af85f7
6 changed files with 31 additions and 5 deletions

View file

@ -385,4 +385,10 @@ static inline unsigned long hyp_ffa_proxy_pages(void)
return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE);
}
static inline unsigned long hyp_host_fp_pages(unsigned long nr_cpus)
{
return PAGE_ALIGN(nr_cpus * sizeof(struct user_fpsimd_state)) >>
PAGE_SHIFT;
}
#endif /* __ARM64_KVM_PKVM_H__ */

View file

@ -82,7 +82,7 @@ struct pkvm_hyp_vm {
struct pkvm_hyp_vcpu *vcpus[];
};
DECLARE_PER_CPU(struct user_fpsimd_state, loaded_host_fpsimd_state);
extern void *host_fp_state;
static inline struct pkvm_hyp_vm *
pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu *hyp_vcpu)
@ -107,6 +107,7 @@ extern phys_addr_t pvmfw_base;
extern phys_addr_t pvmfw_size;
void pkvm_hyp_vm_table_init(void *tbl);
void pkvm_hyp_host_fp_init(void *host_fp);
int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
unsigned long pgd_hva, unsigned long last_ran_hva);

View file

@ -413,7 +413,10 @@ static const hyp_entry_exit_handler_fn exit_hyp_vm_handlers[] = {
static struct user_fpsimd_state *get_host_fpsimd_state(void)
{
return this_cpu_ptr(&loaded_host_fpsimd_state);
char *state = (char *) host_fp_state +
sizeof(struct user_fpsimd_state) * hyp_smp_processor_id();
return (struct user_fpsimd_state *) state;
}
static void flush_hyp_vgic_state(struct pkvm_hyp_vcpu *hyp_vcpu)

View file

@ -32,12 +32,14 @@ unsigned int kvm_arm_vmid_bits;
static DEFINE_PER_CPU(struct pkvm_hyp_vcpu *, loaded_hyp_vcpu);
/*
* Host FPSIMD state. Written to when the guest accesses its own FPSIMD state,
* and read when the guest state is live and we need to switch back to the host.
* Host fp state for all cpus. This could include the host simd state, as well
* as the sve and sme states if supported. Written to when the guest accesses
* its own FPSIMD state, and read when the guest state is live and we need to
* switch back to the host.
*
* Only valid when (fp_state == FP_STATE_GUEST_OWNED) in the hyp vCPU structure.
*/
DEFINE_PER_CPU(struct user_fpsimd_state, loaded_host_fpsimd_state);
void *host_fp_state;
/*
* Set trap register values based on features in ID_AA64PFR0.
@ -264,6 +266,12 @@ void pkvm_hyp_vm_table_init(void *tbl)
vm_table = tbl;
}
void pkvm_hyp_host_fp_init(void *host_fp)
{
WARN_ON(host_fp_state);
host_fp_state = host_fp;
}
/*
* Return the hyp vm structure corresponding to the handle.
*/

View file

@ -34,6 +34,7 @@ static void *vm_table_base;
static void *hyp_pgt_base;
static void *host_s2_pgt_base;
static void *ffa_proxy_pages;
static void *hyp_host_fp_base;
static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
static struct hyp_pool hpool;
@ -68,6 +69,11 @@ static int divide_memory_pool(void *virt, unsigned long size)
if (!ffa_proxy_pages)
return -ENOMEM;
nr_pages = hyp_host_fp_pages(hyp_nr_cpus);
hyp_host_fp_base = hyp_early_alloc_contig(nr_pages);
if (!hyp_host_fp_base)
return -ENOMEM;
return 0;
}
@ -370,6 +376,7 @@ void __noreturn __pkvm_init_finalise(void)
goto out;
pkvm_hyp_vm_table_init(vm_table_base);
pkvm_hyp_host_fp_init(hyp_host_fp_base);
out:
/*
* We tail-called to here from handle___pkvm_init() and will not return,

View file

@ -173,6 +173,7 @@ void __init kvm_hyp_reserve(void)
hyp_mem_pages += hyp_vm_table_pages();
hyp_mem_pages += hyp_vmemmap_pages(STRUCT_HYP_PAGE_SIZE);
hyp_mem_pages += hyp_ffa_proxy_pages();
hyp_mem_pages += hyp_host_fp_pages(num_possible_cpus());
/*
* Try to allocate a PMD-aligned region to reduce TLB pressure once