ANDROID: KVM: arm64: Introduce module_change_host_prot_range

This allows protection attributes to be changed for a range of
pages via a single module API call.

The original API call modifying a single page is now implemented
as a shim on top of the new range-based call.

The ABI STG is also fixed up:

type 'struct pkvm_module_ops' changed
  member 'union { int(* host_stage2_mod_prot_range)(u64, enum kvm_pgtable_prot, u64); struct { u64 android_kabi_reserved1; }; union { }; }' was added
  member 'u64 android_kabi_reserved1' was removed

Bug: 308373293
Change-Id: I6fbb2e0b325aa972148f48746565dcc10d74edaf
Signed-off-by: Keir Fraser <keirf@google.com>
This commit is contained in:
Keir Fraser 2023-11-02 16:26:11 +00:00
parent fd720ebc6a
commit fbc707442c
5 changed files with 82 additions and 23 deletions

View file

@ -12283,6 +12283,11 @@ pointer_reference {
kind: POINTER
pointee_type_id: 0xb94739b9
}
pointer_reference {
id: 0x24c218d7
kind: POINTER
pointee_type_id: 0xb94885c2
}
pointer_reference {
id: 0x24c6c7eb
kind: POINTER
@ -40287,6 +40292,11 @@ member {
type_id: 0x797868f8
offset: 32
}
member {
id: 0x3dbb0f88
type_id: 0x79c25039
offset: 2048
}
member {
id: 0x3dbd80ff
type_id: 0x79d85976
@ -100848,6 +100858,11 @@ member {
type_id: 0x24cb3ae4
offset: 896
}
member {
id: 0xbcc50199
name: "host_stage2_mod_prot_range"
type_id: 0x24c218d7
}
member {
id: 0xedc7b540
name: "host_status"
@ -215152,6 +215167,16 @@ struct_union {
member_id: 0x3bfa35f3
}
}
struct_union {
id: 0x79c25039
kind: UNION
definition {
bytesize: 8
member_id: 0xbcc50199
member_id: 0x27000c61
member_id: 0x36752b74
}
}
struct_union {
id: 0x79d85976
kind: UNION
@ -247063,7 +247088,7 @@ struct_union {
member_id: 0x636da10f
member_id: 0x6f066e7f
member_id: 0x3afd0925
member_id: 0x2d0812b0
member_id: 0x3dbb0f88
member_id: 0x637607e0
member_id: 0xac894cc9
member_id: 0xe0f63db8
@ -327601,6 +327626,13 @@ function {
parameter_id: 0x18bd6530
parameter_id: 0x310ec01d
}
function {
id: 0xb94885c2
return_type_id: 0x6720d32f
parameter_id: 0x92233392
parameter_id: 0x1908b154
parameter_id: 0x92233392
}
function {
id: 0xb94d0c8b
return_type_id: 0x06835e9c

View file

@ -153,7 +153,8 @@ struct pkvm_module_ops {
void* (*hyp_va)(phys_addr_t phys);
unsigned long (*kern_hyp_va)(unsigned long x);
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_USE(1, int (*host_stage2_mod_prot_range)(u64 pfn, enum kvm_pgtable_prot prot, u64 nr_pages));
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);

View file

@ -104,6 +104,7 @@ int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
struct kvm_hyp_memcache *host_mc);
int module_change_host_page_prot(u64 pfn, enum kvm_pgtable_prot prot);
int module_change_host_page_prot_range(u64 pfn, enum kvm_pgtable_prot prot, u64 nr_pages);
void destroy_hyp_vm_pgt(struct pkvm_hyp_vm *vm);
void drain_hyp_pool(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc);

View file

@ -2013,56 +2013,75 @@ int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
KVM_PGTABLE_PROT_NC | \
KVM_PGTABLE_PROT_PXN | \
KVM_PGTABLE_PROT_UXN)
int module_change_host_page_prot(u64 pfn, enum kvm_pgtable_prot prot)
int module_change_host_page_prot_range(u64 pfn, enum kvm_pgtable_prot prot, u64 nr_pages)
{
u64 addr = hyp_pfn_to_phys(pfn);
u64 i, addr = hyp_pfn_to_phys(pfn);
u64 end = addr + nr_pages * PAGE_SIZE;
struct hyp_page *page = NULL;
kvm_pte_t pte;
u32 level;
struct kvm_mem_range range;
bool is_mmio;
int ret;
if ((prot & MODULE_PROT_ALLOWLIST) != prot)
return -EINVAL;
is_mmio = !find_mem_range(addr, &range);
if (end > range.end) {
/* Specified range not in a single mmio or memory block. */
return -EPERM;
}
host_lock_component();
ret = kvm_pgtable_get_leaf(&host_mmu.pgt, addr, &pte, &level);
if (ret)
goto unlock;
/*
* There is no hyp_vmemmap covering MMIO regions, which makes tracking
* of module-owned MMIO regions hard, so we trust the modules not to
* mess things up.
*/
if (!addr_is_memory(addr))
if (is_mmio)
goto update;
ret = -EPERM;
/* Range is memory: we can track module ownership. */
page = hyp_phys_to_page(addr);
/*
* Modules can only modify pages they already own, and pristine host
* pages.
* pages. The entire range must be consistently one or the other.
*/
if (!(page->flags & MODULE_OWNED_PAGE) &&
(host_get_page_state(pte, addr) != PKVM_PAGE_OWNED))
goto unlock;
if (page->flags & MODULE_OWNED_PAGE) {
/* The entire range must be module-owned. */
ret = -EPERM;
for (i = 1; i < nr_pages; i++) {
if (!(page[i].flags & MODULE_OWNED_PAGE))
goto unlock;
}
} else {
/* The entire range must be pristine. */
ret = __host_check_page_state_range(
addr, nr_pages << PAGE_SHIFT, PKVM_PAGE_OWNED);
if (ret)
goto unlock;
}
update:
if (!prot) {
ret = host_stage2_set_owner_locked(addr, PAGE_SIZE,
PKVM_ID_PROTECTED);
ret = host_stage2_set_owner_locked(
addr, nr_pages << PAGE_SHIFT, PKVM_ID_PROTECTED);
} else {
ret = host_stage2_idmap_locked(addr, PAGE_SIZE, prot, false);
ret = host_stage2_idmap_locked(
addr, nr_pages << PAGE_SHIFT, prot, false);
}
if (ret || !page)
if (WARN_ON(ret) || !page)
goto unlock;
if (prot != KVM_PGTABLE_PROT_RWX)
hyp_phys_to_page(addr)->flags |= MODULE_OWNED_PAGE;
else
hyp_phys_to_page(addr)->flags &= ~MODULE_OWNED_PAGE;
for (i = 0; i < nr_pages; i++) {
if (prot != KVM_PGTABLE_PROT_RWX)
page[i].flags |= MODULE_OWNED_PAGE;
else
page[i].flags &= ~MODULE_OWNED_PAGE;
}
unlock:
host_unlock_component();
@ -2070,6 +2089,11 @@ unlock:
return ret;
}
int module_change_host_page_prot(u64 pfn, enum kvm_pgtable_prot prot)
{
return module_change_host_page_prot_range(pfn, prot, 1);
}
int hyp_pin_shared_mem(void *from, void *to)
{
u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);

View file

@ -115,6 +115,7 @@ const struct pkvm_module_ops module_ops = {
.hyp_pa = hyp_virt_to_phys,
.hyp_va = hyp_phys_to_virt,
.kern_hyp_va = __kern_hyp_va,
.host_stage2_mod_prot_range = module_change_host_page_prot_range,
};
int __pkvm_init_module(void *module_init)