FROMLIST: virt: geniezone: Add ioeventfd support
Ioeventfd leverages eventfd to provide asynchronous notification mechanism for VMM. VMM can register a mmio address and bind with an eventfd. Once a mmio trap occurs on this registered region, its corresponding eventfd will be notified. Change-Id: Iff6bb7dd8ba42d08813e531ab40629492a1218bc Signed-off-by: Yingshiuan Pan <yingshiuan.pan@mediatek.com> Signed-off-by: Liju Chen <liju-clr.chen@mediatek.com> Signed-off-by: Yi-De Wu <yi-de.wu@mediatek.com> Bug: 280363874 Link: https://lore.kernel.org/lkml/20230727080005.14474-8-yi-de.wu@mediatek.com/
This commit is contained in:
parent
e73a5222e6
commit
c26057e351
6 changed files with 355 additions and 2 deletions
|
|
@ -7,4 +7,5 @@
|
||||||
GZVM_DIR ?= ../../../drivers/virt/geniezone
|
GZVM_DIR ?= ../../../drivers/virt/geniezone
|
||||||
|
|
||||||
gzvm-y := $(GZVM_DIR)/gzvm_main.o $(GZVM_DIR)/gzvm_vm.o \
|
gzvm-y := $(GZVM_DIR)/gzvm_main.o $(GZVM_DIR)/gzvm_vm.o \
|
||||||
$(GZVM_DIR)/gzvm_vcpu.o $(GZVM_DIR)/gzvm_irqfd.o
|
$(GZVM_DIR)/gzvm_vcpu.o $(GZVM_DIR)/gzvm_irqfd.o \
|
||||||
|
$(GZVM_DIR)/gzvm_ioeventfd.o
|
||||||
|
|
|
||||||
273
drivers/virt/geniezone/gzvm_ioeventfd.c
Normal file
273
drivers/virt/geniezone/gzvm_ioeventfd.c
Normal file
|
|
@ -0,0 +1,273 @@
|
||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2023 MediaTek Inc.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/eventfd.h>
|
||||||
|
#include <linux/file.h>
|
||||||
|
#include <linux/syscalls.h>
|
||||||
|
#include <linux/gzvm.h>
|
||||||
|
#include <linux/gzvm_drv.h>
|
||||||
|
#include <linux/wait.h>
|
||||||
|
#include <linux/poll.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
|
||||||
|
struct gzvm_ioevent {
|
||||||
|
struct list_head list;
|
||||||
|
__u64 addr;
|
||||||
|
__u32 len;
|
||||||
|
struct eventfd_ctx *evt_ctx;
|
||||||
|
__u64 datamatch;
|
||||||
|
bool wildcard;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ioeventfd_check_collision() - Check collison assumes gzvm->slots_lock held.
|
||||||
|
* @gzvm: Pointer to gzvm.
|
||||||
|
* @p: Pointer to gzvm_ioevent.
|
||||||
|
*
|
||||||
|
* Return:
|
||||||
|
* * true - collison found
|
||||||
|
* * false - no collison
|
||||||
|
*/
|
||||||
|
static bool ioeventfd_check_collision(struct gzvm *gzvm, struct gzvm_ioevent *p)
|
||||||
|
{
|
||||||
|
struct gzvm_ioevent *_p;
|
||||||
|
|
||||||
|
list_for_each_entry(_p, &gzvm->ioevents, list)
|
||||||
|
if (_p->addr == p->addr &&
|
||||||
|
(!_p->len || !p->len ||
|
||||||
|
(_p->len == p->len &&
|
||||||
|
(_p->wildcard || p->wildcard ||
|
||||||
|
_p->datamatch == p->datamatch))))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void gzvm_ioevent_release(struct gzvm_ioevent *p)
|
||||||
|
{
|
||||||
|
eventfd_ctx_put(p->evt_ctx);
|
||||||
|
list_del(&p->list);
|
||||||
|
kfree(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool gzvm_ioevent_in_range(struct gzvm_ioevent *p, __u64 addr, int len,
|
||||||
|
const void *val)
|
||||||
|
{
|
||||||
|
u64 _val;
|
||||||
|
|
||||||
|
if (addr != p->addr)
|
||||||
|
/* address must be precise for a hit */
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (!p->len)
|
||||||
|
/* length = 0 means only look at the address, so always a hit */
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if (len != p->len)
|
||||||
|
/* address-range must be precise for a hit */
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (p->wildcard)
|
||||||
|
/* all else equal, wildcard is always a hit */
|
||||||
|
return true;
|
||||||
|
|
||||||
|
/* otherwise, we have to actually compare the data */
|
||||||
|
|
||||||
|
WARN_ON_ONCE(!IS_ALIGNED((unsigned long)val, len));
|
||||||
|
|
||||||
|
switch (len) {
|
||||||
|
case 1:
|
||||||
|
_val = *(u8 *)val;
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
_val = *(u16 *)val;
|
||||||
|
break;
|
||||||
|
case 4:
|
||||||
|
_val = *(u32 *)val;
|
||||||
|
break;
|
||||||
|
case 8:
|
||||||
|
_val = *(u64 *)val;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return _val == p->datamatch;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int gzvm_deassign_ioeventfd(struct gzvm *gzvm,
|
||||||
|
struct gzvm_ioeventfd *args)
|
||||||
|
{
|
||||||
|
struct gzvm_ioevent *p, *tmp;
|
||||||
|
struct eventfd_ctx *evt_ctx;
|
||||||
|
int ret = -ENOENT;
|
||||||
|
bool wildcard;
|
||||||
|
|
||||||
|
evt_ctx = eventfd_ctx_fdget(args->fd);
|
||||||
|
if (IS_ERR(evt_ctx))
|
||||||
|
return PTR_ERR(evt_ctx);
|
||||||
|
|
||||||
|
wildcard = !(args->flags & GZVM_IOEVENTFD_FLAG_DATAMATCH);
|
||||||
|
|
||||||
|
mutex_lock(&gzvm->lock);
|
||||||
|
|
||||||
|
list_for_each_entry_safe(p, tmp, &gzvm->ioevents, list) {
|
||||||
|
if (p->evt_ctx != evt_ctx ||
|
||||||
|
p->addr != args->addr ||
|
||||||
|
p->len != args->len ||
|
||||||
|
p->wildcard != wildcard)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (!p->wildcard && p->datamatch != args->datamatch)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
gzvm_ioevent_release(p);
|
||||||
|
ret = 0;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_unlock(&gzvm->lock);
|
||||||
|
|
||||||
|
/* got in the front of this function */
|
||||||
|
eventfd_ctx_put(evt_ctx);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int gzvm_assign_ioeventfd(struct gzvm *gzvm, struct gzvm_ioeventfd *args)
|
||||||
|
{
|
||||||
|
struct eventfd_ctx *evt_ctx;
|
||||||
|
struct gzvm_ioevent *evt;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
evt_ctx = eventfd_ctx_fdget(args->fd);
|
||||||
|
if (IS_ERR(evt_ctx))
|
||||||
|
return PTR_ERR(evt_ctx);
|
||||||
|
|
||||||
|
evt = kmalloc(sizeof(*evt), GFP_KERNEL);
|
||||||
|
if (!evt)
|
||||||
|
return -ENOMEM;
|
||||||
|
*evt = (struct gzvm_ioevent) {
|
||||||
|
.addr = args->addr,
|
||||||
|
.len = args->len,
|
||||||
|
.evt_ctx = evt_ctx,
|
||||||
|
};
|
||||||
|
if (args->flags & GZVM_IOEVENTFD_FLAG_DATAMATCH) {
|
||||||
|
evt->datamatch = args->datamatch;
|
||||||
|
evt->wildcard = false;
|
||||||
|
} else {
|
||||||
|
evt->wildcard = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ioeventfd_check_collision(gzvm, evt)) {
|
||||||
|
ret = -EEXIST;
|
||||||
|
goto err_free;
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_lock(&gzvm->lock);
|
||||||
|
list_add_tail(&evt->list, &gzvm->ioevents);
|
||||||
|
mutex_unlock(&gzvm->lock);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err_free:
|
||||||
|
kfree(evt);
|
||||||
|
eventfd_ctx_put(evt_ctx);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* gzvm_ioeventfd_check_valid() - Check user arguments is valid.
|
||||||
|
* @args: Pointer to gzvm_ioeventfd.
|
||||||
|
*
|
||||||
|
* Return:
|
||||||
|
* * true if user arguments are valid.
|
||||||
|
* * false if user arguments are invalid.
|
||||||
|
*/
|
||||||
|
static bool gzvm_ioeventfd_check_valid(struct gzvm_ioeventfd *args)
|
||||||
|
{
|
||||||
|
/* must be natural-word sized, or 0 to ignore length */
|
||||||
|
switch (args->len) {
|
||||||
|
case 0:
|
||||||
|
case 1:
|
||||||
|
case 2:
|
||||||
|
case 4:
|
||||||
|
case 8:
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* check for range overflow */
|
||||||
|
if (args->addr + args->len < args->addr)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* check for extra flags that we don't understand */
|
||||||
|
if (args->flags & ~GZVM_IOEVENTFD_VALID_FLAG_MASK)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* ioeventfd with no length can't be combined with DATAMATCH */
|
||||||
|
if (!args->len && (args->flags & GZVM_IOEVENTFD_FLAG_DATAMATCH))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* gzvm does not support pio bus ioeventfd */
|
||||||
|
if (args->flags & GZVM_IOEVENTFD_FLAG_PIO)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* gzvm_ioeventfd() - Register ioevent to ioevent list.
|
||||||
|
* @gzvm: Pointer to gzvm.
|
||||||
|
* @args: Pointer to gzvm_ioeventfd.
|
||||||
|
*
|
||||||
|
* Return:
|
||||||
|
* * 0 - Success.
|
||||||
|
* * Negative - Failure.
|
||||||
|
*/
|
||||||
|
int gzvm_ioeventfd(struct gzvm *gzvm, struct gzvm_ioeventfd *args)
|
||||||
|
{
|
||||||
|
if (gzvm_ioeventfd_check_valid(args) == false)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (args->flags & GZVM_IOEVENTFD_FLAG_DEASSIGN)
|
||||||
|
return gzvm_deassign_ioeventfd(gzvm, args);
|
||||||
|
return gzvm_assign_ioeventfd(gzvm, args);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* gzvm_ioevent_write() - Travers this vm's registered ioeventfd to see if
|
||||||
|
* need notifying it.
|
||||||
|
* @vcpu: Pointer to vcpu.
|
||||||
|
* @addr: mmio address.
|
||||||
|
* @len: mmio size.
|
||||||
|
* @val: Pointer to void.
|
||||||
|
*
|
||||||
|
* Return:
|
||||||
|
* * true if this io is already sent to ioeventfd's listener.
|
||||||
|
* * false if we cannot find any ioeventfd registering this mmio write.
|
||||||
|
*/
|
||||||
|
bool gzvm_ioevent_write(struct gzvm_vcpu *vcpu, __u64 addr, int len,
|
||||||
|
const void *val)
|
||||||
|
{
|
||||||
|
struct gzvm_ioevent *e;
|
||||||
|
|
||||||
|
list_for_each_entry(e, &vcpu->gzvm->ioevents, list) {
|
||||||
|
if (gzvm_ioevent_in_range(e, addr, len, val)) {
|
||||||
|
eventfd_signal(e->evt_ctx, 1);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
int gzvm_init_ioeventfd(struct gzvm *gzvm)
|
||||||
|
{
|
||||||
|
INIT_LIST_HEAD(&gzvm->ioevents);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
@ -50,6 +50,30 @@ static long gzvm_vcpu_update_one_reg(struct gzvm_vcpu *vcpu,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* gzvm_vcpu_handle_mmio() - Handle mmio in kernel space.
|
||||||
|
* @vcpu: Pointer to vcpu.
|
||||||
|
*
|
||||||
|
* Return:
|
||||||
|
* * true - This mmio exit has been processed.
|
||||||
|
* * false - This mmio exit has not been processed, require userspace.
|
||||||
|
*/
|
||||||
|
static bool gzvm_vcpu_handle_mmio(struct gzvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
__u64 addr;
|
||||||
|
__u32 len;
|
||||||
|
const void *val_ptr;
|
||||||
|
|
||||||
|
/* So far, we don't have in-kernel mmio read handler */
|
||||||
|
if (!vcpu->run->mmio.is_write)
|
||||||
|
return false;
|
||||||
|
addr = vcpu->run->mmio.phys_addr;
|
||||||
|
len = vcpu->run->mmio.size;
|
||||||
|
val_ptr = &vcpu->run->mmio.data;
|
||||||
|
|
||||||
|
return gzvm_ioevent_write(vcpu, addr, len, val_ptr);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gzvm_vcpu_run() - Handle vcpu run ioctl, entry point to guest and exit
|
* gzvm_vcpu_run() - Handle vcpu run ioctl, entry point to guest and exit
|
||||||
* point from guest
|
* point from guest
|
||||||
|
|
@ -81,7 +105,8 @@ static long gzvm_vcpu_run(struct gzvm_vcpu *vcpu, void * __user argp)
|
||||||
|
|
||||||
switch (exit_reason) {
|
switch (exit_reason) {
|
||||||
case GZVM_EXIT_MMIO:
|
case GZVM_EXIT_MMIO:
|
||||||
need_userspace = true;
|
if (!gzvm_vcpu_handle_mmio(vcpu))
|
||||||
|
need_userspace = true;
|
||||||
break;
|
break;
|
||||||
/**
|
/**
|
||||||
* it's geniezone's responsibility to fill corresponding data
|
* it's geniezone's responsibility to fill corresponding data
|
||||||
|
|
|
||||||
|
|
@ -386,6 +386,16 @@ static long gzvm_vm_ioctl(struct file *filp, unsigned int ioctl,
|
||||||
ret = gzvm_irqfd(gzvm, &data);
|
ret = gzvm_irqfd(gzvm, &data);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case GZVM_IOEVENTFD: {
|
||||||
|
struct gzvm_ioeventfd data;
|
||||||
|
|
||||||
|
if (copy_from_user(&data, argp, sizeof(data))) {
|
||||||
|
ret = -EFAULT;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
ret = gzvm_ioeventfd(gzvm, &data);
|
||||||
|
break;
|
||||||
|
}
|
||||||
case GZVM_ENABLE_CAP: {
|
case GZVM_ENABLE_CAP: {
|
||||||
struct gzvm_enable_cap cap;
|
struct gzvm_enable_cap cap;
|
||||||
|
|
||||||
|
|
@ -462,6 +472,13 @@ static struct gzvm *gzvm_create_vm(unsigned long vm_type)
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = gzvm_init_ioeventfd(gzvm);
|
||||||
|
if (ret) {
|
||||||
|
pr_err("Failed to initialize ioeventfd\n");
|
||||||
|
kfree(gzvm);
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
}
|
||||||
|
|
||||||
mutex_lock(&gzvm_list_lock);
|
mutex_lock(&gzvm_list_lock);
|
||||||
list_add(&gzvm->vm_list, &gzvm_list);
|
list_add(&gzvm->vm_list, &gzvm_list);
|
||||||
mutex_unlock(&gzvm_list_lock);
|
mutex_unlock(&gzvm_list_lock);
|
||||||
|
|
|
||||||
|
|
@ -6,6 +6,7 @@
|
||||||
#ifndef __GZVM_DRV_H__
|
#ifndef __GZVM_DRV_H__
|
||||||
#define __GZVM_DRV_H__
|
#define __GZVM_DRV_H__
|
||||||
|
|
||||||
|
#include <linux/eventfd.h>
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
#include <linux/miscdevice.h>
|
#include <linux/miscdevice.h>
|
||||||
|
|
@ -90,6 +91,8 @@ struct gzvm {
|
||||||
struct mutex resampler_lock;
|
struct mutex resampler_lock;
|
||||||
} irqfds;
|
} irqfds;
|
||||||
|
|
||||||
|
struct list_head ioevents;
|
||||||
|
|
||||||
struct list_head vm_list;
|
struct list_head vm_list;
|
||||||
u16 vm_id;
|
u16 vm_id;
|
||||||
|
|
||||||
|
|
@ -139,4 +142,13 @@ void gzvm_drv_irqfd_exit(void);
|
||||||
int gzvm_vm_irqfd_init(struct gzvm *gzvm);
|
int gzvm_vm_irqfd_init(struct gzvm *gzvm);
|
||||||
void gzvm_vm_irqfd_release(struct gzvm *gzvm);
|
void gzvm_vm_irqfd_release(struct gzvm *gzvm);
|
||||||
|
|
||||||
|
int gzvm_init_ioeventfd(struct gzvm *gzvm);
|
||||||
|
int gzvm_ioeventfd(struct gzvm *gzvm, struct gzvm_ioeventfd *args);
|
||||||
|
bool gzvm_ioevent_write(struct gzvm_vcpu *vcpu, __u64 addr, int len,
|
||||||
|
const void *val);
|
||||||
|
void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
|
||||||
|
struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr);
|
||||||
|
void add_wait_queue_priority(struct wait_queue_head *wq_head,
|
||||||
|
struct wait_queue_entry *wq_entry);
|
||||||
|
|
||||||
#endif /* __GZVM_DRV_H__ */
|
#endif /* __GZVM_DRV_H__ */
|
||||||
|
|
|
||||||
|
|
@ -301,4 +301,29 @@ struct gzvm_irqfd {
|
||||||
|
|
||||||
#define GZVM_IRQFD _IOW(GZVM_IOC_MAGIC, 0x76, struct gzvm_irqfd)
|
#define GZVM_IRQFD _IOW(GZVM_IOC_MAGIC, 0x76, struct gzvm_irqfd)
|
||||||
|
|
||||||
|
enum {
|
||||||
|
gzvm_ioeventfd_flag_nr_datamatch = 0,
|
||||||
|
gzvm_ioeventfd_flag_nr_pio = 1,
|
||||||
|
gzvm_ioeventfd_flag_nr_deassign = 2,
|
||||||
|
gzvm_ioeventfd_flag_nr_max,
|
||||||
|
};
|
||||||
|
|
||||||
|
#define GZVM_IOEVENTFD_FLAG_DATAMATCH (1 << gzvm_ioeventfd_flag_nr_datamatch)
|
||||||
|
#define GZVM_IOEVENTFD_FLAG_PIO (1 << gzvm_ioeventfd_flag_nr_pio)
|
||||||
|
#define GZVM_IOEVENTFD_FLAG_DEASSIGN (1 << gzvm_ioeventfd_flag_nr_deassign)
|
||||||
|
#define GZVM_IOEVENTFD_VALID_FLAG_MASK ((1 << gzvm_ioeventfd_flag_nr_max) - 1)
|
||||||
|
|
||||||
|
struct gzvm_ioeventfd {
|
||||||
|
__u64 datamatch;
|
||||||
|
/* private: legal pio/mmio address */
|
||||||
|
__u64 addr;
|
||||||
|
/* private: 1, 2, 4, or 8 bytes; or 0 to ignore length */
|
||||||
|
__u32 len;
|
||||||
|
__s32 fd;
|
||||||
|
__u32 flags;
|
||||||
|
__u8 pad[36];
|
||||||
|
};
|
||||||
|
|
||||||
|
#define GZVM_IOEVENTFD _IOW(GZVM_IOC_MAGIC, 0x79, struct gzvm_ioeventfd)
|
||||||
|
|
||||||
#endif /* __GZVM_H__ */
|
#endif /* __GZVM_H__ */
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue