ANDROID: KVM: arm64: Donate IOMMU regions to pKVM

The MMIO register space for IOMMUs controlled by the hypervisor is
currently unmapped from the host stage-2, and we rely on the host abort
path to not accidentally map them. However, this approach becomes
increasingly difficult to maintain as we introduce support for donating
MMIO regions and not just memory -- nothing prevents the host from
donating a protected MMIO register to another entity for example.

Now that MMIO donations are possible, let's use the proper
host-donate-hyp machinery to implement this. As a nice side effect, this
guarantees the host stage-2 page-table is annotated with hyp ownership
for those IOMMU regions, which guarantees the core range alignment
feature in the host mem abort parth will do the right thing without
requiring a second pass in the IOMMU code. This also turns the host
stage-2 PTEs into "non-default" entries, hence avoiding issues with the
coallescing code looking forward.

Bug: 264070847
Change-Id: I1fad1b1be36f3b654190a912617e780141945a8f
Signed-off-by: Quentin Perret <qperret@google.com>
This commit is contained in:
Quentin Perret 2023-04-24 10:45:32 +00:00 committed by Carlos Llamas
parent 23b62ec342
commit 5136a28ab6
4 changed files with 16 additions and 61 deletions

View file

@ -93,8 +93,6 @@ int __pkvm_iommu_register(unsigned long dev_id, unsigned long drv_id,
int __pkvm_iommu_pm_notify(unsigned long dev_id,
enum pkvm_iommu_pm_event event);
int __pkvm_iommu_finalize(int err);
int pkvm_iommu_host_stage2_adjust_range(phys_addr_t addr, phys_addr_t *start,
phys_addr_t *end);
bool pkvm_iommu_host_dabt_handler(struct kvm_cpu_context *host_ctxt, u32 esr,
phys_addr_t fault_pa);
void pkvm_iommu_host_stage2_idmap(phys_addr_t start, phys_addr_t end,

View file

@ -72,6 +72,7 @@ int __pkvm_host_share_hyp(u64 pfn);
int __pkvm_host_unshare_hyp(u64 pfn);
int __pkvm_host_reclaim_page(struct pkvm_hyp_vm *vm, u64 pfn, u64 ipa);
int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
int __pkvm_host_donate_hyp_locked(u64 pfn, u64 nr_pages);
int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu);
int __pkvm_host_donate_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu);

View file

@ -392,6 +392,7 @@ int __pkvm_iommu_register(unsigned long dev_id, unsigned long drv_id,
.id = dev_id,
.ops = drv->ops,
.pa = dev_pa,
.va = hyp_phys_to_virt(dev_pa),
.size = dev_size,
.flags = flags,
};
@ -421,22 +422,11 @@ int __pkvm_iommu_register(unsigned long dev_id, unsigned long drv_id,
goto out_free;
}
/*
* Unmap the device's MMIO range from host stage-2. If registration
* is successful, future attempts to re-map will be blocked by
* pkvm_iommu_host_stage2_adjust_range.
*/
ret = host_stage2_unmap_reg_locked(dev_pa, dev_size);
ret = __pkvm_host_donate_hyp_locked(hyp_phys_to_pfn(dev_pa),
PAGE_ALIGN(dev_size) >> PAGE_SHIFT);
if (ret)
goto out_free;
/* Create EL2 mapping for the device. */
ret = __pkvm_create_private_mapping(dev_pa, dev_size,
PAGE_HYP_DEVICE, (unsigned long *)(&dev->va));
if (ret){
goto out_free;
}
/* Register device and prevent host from mapping the MMIO range. */
list_add_tail(&dev->list, &iommu_list);
if (dev->parent)
@ -495,39 +485,6 @@ int __pkvm_iommu_pm_notify(unsigned long dev_id, enum pkvm_iommu_pm_event event)
return ret;
}
/*
* Check host memory access against IOMMUs' MMIO regions.
* Returns -EPERM if the address is within the bounds of a registered device.
* Otherwise returns zero and adjusts boundaries of the new mapping to avoid
* MMIO regions of registered IOMMUs.
*/
int pkvm_iommu_host_stage2_adjust_range(phys_addr_t addr, phys_addr_t *start,
phys_addr_t *end)
{
struct pkvm_iommu *dev;
phys_addr_t new_start = *start;
phys_addr_t new_end = *end;
phys_addr_t dev_start, dev_end;
assert_host_component_locked();
list_for_each_entry(dev, &iommu_list, list) {
dev_start = dev->pa;
dev_end = dev_start + dev->size;
if (addr < dev_start)
new_end = min(new_end, dev_start);
else if (addr >= dev_end)
new_start = max(new_start, dev_end);
else
return -EPERM;
}
*start = new_start;
*end = new_end;
return 0;
}
bool pkvm_iommu_host_dabt_handler(struct kvm_cpu_context *host_ctxt, u32 esr,
phys_addr_t pa)
{

View file

@ -789,17 +789,6 @@ static int host_stage2_idmap(struct kvm_vcpu_fault_info *fault, u64 addr)
}
}
/*
* Adjust against IOMMU devices first. host_stage2_adjust_range() should
* be called last for proper alignment.
*/
if (!is_memory) {
ret = pkvm_iommu_host_stage2_adjust_range(addr, &range.start,
&range.end);
if (ret)
return ret;
}
ret = host_stage2_adjust_range(addr, &range, level);
if (ret)
return ret;
@ -1922,6 +1911,17 @@ int __pkvm_host_unshare_hyp(u64 pfn)
}
int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
{
int ret;
host_lock_component();
ret = __pkvm_host_donate_hyp_locked(pfn, nr_pages);
host_unlock_component();
return ret;
}
int __pkvm_host_donate_hyp_locked(u64 pfn, u64 nr_pages)
{
int ret;
u64 host_addr = hyp_pfn_to_phys(pfn);
@ -1942,13 +1942,12 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
},
};
host_lock_component();
hyp_assert_lock_held(&host_mmu.lock);
hyp_lock_component();
ret = do_donate(&donation);
hyp_unlock_component();
host_unlock_component();
return ret;
}