mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 20:51:44 +00:00
vdpa_sim: add support for user VA
The new "use_va" module parameter (default: true) is used in vdpa_alloc_device() to inform the vDPA framework that the device supports VA. vringh is initialized to use VA only when "use_va" is true and the user's mm has been bound. So, only when the bus supports user VA (e.g. vhost-vdpa). vdpasim_mm_work_fn work is used to serialize the binding to a new address space when the .bind_mm callback is invoked, and unbinding when the .unbind_mm callback is invoked. Call mmget_not_zero()/kthread_use_mm() inside the worker function to pin the address space only as long as needed, following the documentation of mmget() in include/linux/sched/mm.h: * Never use this function to pin this address space for an * unbounded/indefinite amount of time. Acked-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> Message-Id: <20230404131734.45943-1-sgarzare@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
d7621c28fc
commit
4bb94d2de2
@ -35,10 +35,44 @@ module_param(max_iotlb_entries, int, 0444);
|
||||
MODULE_PARM_DESC(max_iotlb_entries,
|
||||
"Maximum number of iotlb entries for each address space. 0 means unlimited. (default: 2048)");
|
||||
|
||||
static bool use_va = true;
|
||||
module_param(use_va, bool, 0444);
|
||||
MODULE_PARM_DESC(use_va, "Enable/disable the device's ability to use VA");
|
||||
|
||||
#define VDPASIM_QUEUE_ALIGN PAGE_SIZE
|
||||
#define VDPASIM_QUEUE_MAX 256
|
||||
#define VDPASIM_VENDOR_ID 0
|
||||
|
||||
struct vdpasim_mm_work {
|
||||
struct kthread_work work;
|
||||
struct vdpasim *vdpasim;
|
||||
struct mm_struct *mm_to_bind;
|
||||
int ret;
|
||||
};
|
||||
|
||||
static void vdpasim_mm_work_fn(struct kthread_work *work)
|
||||
{
|
||||
struct vdpasim_mm_work *mm_work =
|
||||
container_of(work, struct vdpasim_mm_work, work);
|
||||
struct vdpasim *vdpasim = mm_work->vdpasim;
|
||||
|
||||
mm_work->ret = 0;
|
||||
|
||||
//TODO: should we attach the cgroup of the mm owner?
|
||||
vdpasim->mm_bound = mm_work->mm_to_bind;
|
||||
}
|
||||
|
||||
static void vdpasim_worker_change_mm_sync(struct vdpasim *vdpasim,
|
||||
struct vdpasim_mm_work *mm_work)
|
||||
{
|
||||
struct kthread_work *work = &mm_work->work;
|
||||
|
||||
kthread_init_work(work, vdpasim_mm_work_fn);
|
||||
kthread_queue_work(vdpasim->worker, work);
|
||||
|
||||
kthread_flush_work(work);
|
||||
}
|
||||
|
||||
static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
|
||||
{
|
||||
return container_of(vdpa, struct vdpasim, vdpa);
|
||||
@ -59,13 +93,20 @@ static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
|
||||
{
|
||||
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
|
||||
uint16_t last_avail_idx = vq->vring.last_avail_idx;
|
||||
struct vring_desc *desc = (struct vring_desc *)
|
||||
(uintptr_t)vq->desc_addr;
|
||||
struct vring_avail *avail = (struct vring_avail *)
|
||||
(uintptr_t)vq->driver_addr;
|
||||
struct vring_used *used = (struct vring_used *)
|
||||
(uintptr_t)vq->device_addr;
|
||||
|
||||
vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num, true,
|
||||
(struct vring_desc *)(uintptr_t)vq->desc_addr,
|
||||
(struct vring_avail *)
|
||||
(uintptr_t)vq->driver_addr,
|
||||
(struct vring_used *)
|
||||
(uintptr_t)vq->device_addr);
|
||||
if (use_va && vdpasim->mm_bound) {
|
||||
vringh_init_iotlb_va(&vq->vring, vdpasim->features, vq->num,
|
||||
true, desc, avail, used);
|
||||
} else {
|
||||
vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num,
|
||||
true, desc, avail, used);
|
||||
}
|
||||
|
||||
vq->vring.last_avail_idx = last_avail_idx;
|
||||
|
||||
@ -130,8 +171,20 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops;
|
||||
static void vdpasim_work_fn(struct kthread_work *work)
|
||||
{
|
||||
struct vdpasim *vdpasim = container_of(work, struct vdpasim, work);
|
||||
struct mm_struct *mm = vdpasim->mm_bound;
|
||||
|
||||
if (use_va && mm) {
|
||||
if (!mmget_not_zero(mm))
|
||||
return;
|
||||
kthread_use_mm(mm);
|
||||
}
|
||||
|
||||
vdpasim->dev_attr.work_fn(vdpasim);
|
||||
|
||||
if (use_va && mm) {
|
||||
kthread_unuse_mm(mm);
|
||||
mmput(mm);
|
||||
}
|
||||
}
|
||||
|
||||
struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
|
||||
@ -162,7 +215,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
|
||||
vdpa = __vdpa_alloc_device(NULL, ops,
|
||||
dev_attr->ngroups, dev_attr->nas,
|
||||
dev_attr->alloc_size,
|
||||
dev_attr->name, false);
|
||||
dev_attr->name, use_va);
|
||||
if (IS_ERR(vdpa)) {
|
||||
ret = PTR_ERR(vdpa);
|
||||
goto err_alloc;
|
||||
@ -582,6 +635,30 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vdpasim_bind_mm(struct vdpa_device *vdpa, struct mm_struct *mm)
|
||||
{
|
||||
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
|
||||
struct vdpasim_mm_work mm_work;
|
||||
|
||||
mm_work.vdpasim = vdpasim;
|
||||
mm_work.mm_to_bind = mm;
|
||||
|
||||
vdpasim_worker_change_mm_sync(vdpasim, &mm_work);
|
||||
|
||||
return mm_work.ret;
|
||||
}
|
||||
|
||||
static void vdpasim_unbind_mm(struct vdpa_device *vdpa)
|
||||
{
|
||||
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
|
||||
struct vdpasim_mm_work mm_work;
|
||||
|
||||
mm_work.vdpasim = vdpasim;
|
||||
mm_work.mm_to_bind = NULL;
|
||||
|
||||
vdpasim_worker_change_mm_sync(vdpasim, &mm_work);
|
||||
}
|
||||
|
||||
static int vdpasim_dma_map(struct vdpa_device *vdpa, unsigned int asid,
|
||||
u64 iova, u64 size,
|
||||
u64 pa, u32 perm, void *opaque)
|
||||
@ -678,6 +755,8 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
|
||||
.set_group_asid = vdpasim_set_group_asid,
|
||||
.dma_map = vdpasim_dma_map,
|
||||
.dma_unmap = vdpasim_dma_unmap,
|
||||
.bind_mm = vdpasim_bind_mm,
|
||||
.unbind_mm = vdpasim_unbind_mm,
|
||||
.free = vdpasim_free,
|
||||
};
|
||||
|
||||
@ -712,6 +791,8 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
|
||||
.get_iova_range = vdpasim_get_iova_range,
|
||||
.set_group_asid = vdpasim_set_group_asid,
|
||||
.set_map = vdpasim_set_map,
|
||||
.bind_mm = vdpasim_bind_mm,
|
||||
.unbind_mm = vdpasim_unbind_mm,
|
||||
.free = vdpasim_free,
|
||||
};
|
||||
|
||||
|
@ -59,6 +59,7 @@ struct vdpasim {
|
||||
struct vdpasim_virtqueue *vqs;
|
||||
struct kthread_worker *worker;
|
||||
struct kthread_work work;
|
||||
struct mm_struct *mm_bound;
|
||||
struct vdpasim_dev_attr dev_attr;
|
||||
/* mutex to synchronize virtqueue state */
|
||||
struct mutex mutex;
|
||||
|
Loading…
Reference in New Issue
Block a user