diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index 86cd64130328..4ba504f5f4bd 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -2008,19 +2008,6 @@ int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages) return ret; } -static int restrict_host_page_perms(u64 addr, kvm_pte_t pte, u32 level, enum kvm_pgtable_prot prot) -{ - int ret = 0; - - /* XXX: optimize ... */ - if (kvm_pte_valid(pte) && (level == KVM_PGTABLE_MAX_LEVELS - 1)) - ret = kvm_pgtable_stage2_unmap(&host_mmu.pgt, addr, PAGE_SIZE); - if (!ret) - ret = host_stage2_idmap_locked(addr, PAGE_SIZE, prot, false); - - return ret; -} - #define MODULE_PROT_ALLOWLIST (KVM_PGTABLE_PROT_RWX | \ KVM_PGTABLE_PROT_DEVICE |\ KVM_PGTABLE_PROT_NC | \ @@ -2065,12 +2052,12 @@ int module_change_host_page_prot(u64 pfn, enum kvm_pgtable_prot prot) } update: - if (prot == default_host_prot(!!page)) - ret = host_stage2_set_owner_locked(addr, PAGE_SIZE, PKVM_ID_HOST); - else if (!prot) - ret = host_stage2_set_owner_locked(addr, PAGE_SIZE, PKVM_ID_PROTECTED); - else - ret = restrict_host_page_perms(addr, pte, level, prot); + if (!prot) { + ret = host_stage2_set_owner_locked(addr, PAGE_SIZE, + PKVM_ID_PROTECTED); + } else { + ret = host_stage2_idmap_locked(addr, PAGE_SIZE, prot, false); + } if (ret || !page) goto unlock; diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 2d11455aabe8..b9140293da7d 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -645,8 +645,13 @@ enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte) return prot; } -static bool stage2_pte_needs_update(kvm_pte_t old, kvm_pte_t new) +static bool stage2_pte_needs_update(struct kvm_pgtable *pgt, + kvm_pte_t old, kvm_pte_t new) { + /* Following filter logic applies only to guest stage-2 entries. */ + if (pgt->flags & KVM_PGTABLE_S2_IDMAP) + return true; + if (!kvm_pte_valid(old) || !kvm_pte_valid(new)) return true; @@ -715,12 +720,15 @@ static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level, new = data->annotation; /* - * Skip updating the PTE if we are trying to recreate the exact - * same mapping or only change the access permissions. Instead, - * the vCPU will exit one more time from guest if still needed - * and then go through the path of relaxing permissions. + * Skip updating a guest PTE if we are trying to recreate the exact + * same mapping or change only the access permissions. Instead, + * the vCPU will exit one more time from the guest if still needed + * and then go through the path of relaxing permissions. This applies + * only to guest PTEs; Host PTEs are unconditionally updated. The + * host cannot livelock because the abort handler has done prior + * checks before calling here. */ - if (!stage2_pte_needs_update(old, new)) + if (!stage2_pte_needs_update(pgt, old, new)) return -EAGAIN; if (pte_ops->pte_is_counted_cb(old, level))