ANDROID: KVM: arm64: Support TLB invalidation in guest context
Typically, TLB invalidation of guest stage-2 mappings using nVHE is
performed by a hypercall originating from the host. For the invalidation
instruction to be effective, therefore, __tlb_switch_to_{guest,host}()
swizzle the active stage-2 context around the TLBI instruction.
With guest-to-host memory sharing and unsharing hypercalls originating
from the guest under pKVM, there is now a need to support both guest
and host VMID invalidations issued from guest context.
Replace the __tlb_switch_to_{guest,host}() functions with a more general
{enter,exit}_vmid_context() implementation which supports being invoked
from guest context and acts as a no-op if the target context matches the
running context.
Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Will Deacon <willdeacon@google.com>
Bug: 233587962
Change-Id: I92c6f48eb4c4b6286b930c2f0cda245bccc1927b
Signed-off-by: Quentin Perret <qperret@google.com>
This commit is contained in:
parent
01803a8ee1
commit
47318559bc
1 changed files with 78 additions and 18 deletions
|
|
@ -11,26 +11,62 @@
|
|||
#include <nvhe/mem_protect.h>
|
||||
|
||||
struct tlb_inv_context {
|
||||
u64 tcr;
|
||||
struct kvm_s2_mmu *mmu;
|
||||
u64 tcr;
|
||||
u64 sctlr;
|
||||
};
|
||||
|
||||
static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
|
||||
struct tlb_inv_context *cxt)
|
||||
static void enter_vmid_context(struct kvm_s2_mmu *mmu,
|
||||
struct tlb_inv_context *cxt)
|
||||
{
|
||||
struct kvm_s2_mmu *host_s2_mmu = &host_mmu.arch.mmu;
|
||||
struct kvm_cpu_context *host_ctxt;
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
|
||||
vcpu = host_ctxt->__hyp_running_vcpu;
|
||||
cxt->mmu = NULL;
|
||||
|
||||
/*
|
||||
* If we're already in the desired context, then there's nothing
|
||||
* to do.
|
||||
*/
|
||||
if (vcpu) {
|
||||
if (mmu == vcpu->arch.hw_mmu || WARN_ON(mmu != host_s2_mmu))
|
||||
return;
|
||||
} else if (mmu == host_s2_mmu) {
|
||||
return;
|
||||
}
|
||||
|
||||
cxt->mmu = mmu;
|
||||
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
|
||||
u64 val;
|
||||
|
||||
/*
|
||||
* For CPUs that are affected by ARM 1319367, we need to
|
||||
* avoid a host Stage-1 walk while we have the guest's
|
||||
* VMID set in the VTTBR in order to invalidate TLBs.
|
||||
* We're guaranteed that the S1 MMU is enabled, so we can
|
||||
* simply set the EPD bits to avoid any further TLB fill.
|
||||
* avoid a Stage-1 walk with the old VMID while we have
|
||||
* the new VMID set in the VTTBR in order to invalidate TLBs.
|
||||
* We're guaranteed that the host S1 MMU is enabled, so
|
||||
* we can simply set the EPD bits to avoid any further
|
||||
* TLB fill. For guests, we ensure that the S1 MMU is
|
||||
* temporarily enabled in the next context.
|
||||
*/
|
||||
val = cxt->tcr = read_sysreg_el1(SYS_TCR);
|
||||
val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
|
||||
write_sysreg_el1(val, SYS_TCR);
|
||||
isb();
|
||||
|
||||
if (vcpu) {
|
||||
val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR);
|
||||
if (!(val & SCTLR_ELx_M)) {
|
||||
val |= SCTLR_ELx_M;
|
||||
write_sysreg_el1(val, SYS_SCTLR);
|
||||
isb();
|
||||
}
|
||||
} else {
|
||||
/* The host S1 MMU is always enabled. */
|
||||
cxt->sctlr = SCTLR_ELx_M;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -39,20 +75,44 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
|
|||
* ensuring that we always have an ISB, but not two ISBs back
|
||||
* to back.
|
||||
*/
|
||||
__load_stage2(mmu, kern_hyp_va(mmu->arch));
|
||||
if (vcpu)
|
||||
__load_host_stage2();
|
||||
else
|
||||
__load_stage2(mmu, kern_hyp_va(mmu->arch));
|
||||
|
||||
asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
|
||||
}
|
||||
|
||||
static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
|
||||
static void exit_vmid_context(struct tlb_inv_context *cxt)
|
||||
{
|
||||
__load_host_stage2();
|
||||
struct kvm_s2_mmu *mmu = cxt->mmu;
|
||||
struct kvm_cpu_context *host_ctxt;
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
|
||||
vcpu = host_ctxt->__hyp_running_vcpu;
|
||||
|
||||
if (!mmu)
|
||||
return;
|
||||
|
||||
if (vcpu)
|
||||
__load_stage2(mmu, kern_hyp_va(mmu->arch));
|
||||
else
|
||||
__load_host_stage2();
|
||||
|
||||
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
|
||||
/* Ensure write of the host VMID */
|
||||
/* Ensure write of the old VMID */
|
||||
isb();
|
||||
/* Restore the host's TCR_EL1 */
|
||||
|
||||
if (!(cxt->sctlr & SCTLR_ELx_M)) {
|
||||
write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
|
||||
isb();
|
||||
}
|
||||
|
||||
write_sysreg_el1(cxt->tcr, SYS_TCR);
|
||||
}
|
||||
|
||||
cxt->mmu = NULL;
|
||||
}
|
||||
|
||||
void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
|
||||
|
|
@ -63,7 +123,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
|
|||
dsb(ishst);
|
||||
|
||||
/* Switch to requested VMID */
|
||||
__tlb_switch_to_guest(mmu, &cxt);
|
||||
enter_vmid_context(mmu, &cxt);
|
||||
|
||||
/*
|
||||
* We could do so much better if we had the VA as well.
|
||||
|
|
@ -106,7 +166,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
|
|||
if (icache_is_vpipt())
|
||||
icache_inval_all_pou();
|
||||
|
||||
__tlb_switch_to_host(&cxt);
|
||||
exit_vmid_context(&cxt);
|
||||
}
|
||||
|
||||
void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
|
||||
|
|
@ -116,13 +176,13 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
|
|||
dsb(ishst);
|
||||
|
||||
/* Switch to requested VMID */
|
||||
__tlb_switch_to_guest(mmu, &cxt);
|
||||
enter_vmid_context(mmu, &cxt);
|
||||
|
||||
__tlbi(vmalls12e1is);
|
||||
dsb(ish);
|
||||
isb();
|
||||
|
||||
__tlb_switch_to_host(&cxt);
|
||||
exit_vmid_context(&cxt);
|
||||
}
|
||||
|
||||
void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
|
||||
|
|
@ -130,14 +190,14 @@ void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
|
|||
struct tlb_inv_context cxt;
|
||||
|
||||
/* Switch to requested VMID */
|
||||
__tlb_switch_to_guest(mmu, &cxt);
|
||||
enter_vmid_context(mmu, &cxt);
|
||||
|
||||
__tlbi(vmalle1);
|
||||
asm volatile("ic iallu");
|
||||
dsb(nsh);
|
||||
isb();
|
||||
|
||||
__tlb_switch_to_host(&cxt);
|
||||
exit_vmid_context(&cxt);
|
||||
}
|
||||
|
||||
void __kvm_flush_vm_context(void)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue