2020-03-26 14:01:23 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
|
|
* Copyright (C) 2018-2020 Intel Corporation.
|
|
|
|
* Copyright (C) 2020 Red Hat, Inc.
|
|
|
|
*
|
|
|
|
* Author: Tiwei Bie <tiwei.bie@intel.com>
|
|
|
|
* Jason Wang <jasowang@redhat.com>
|
|
|
|
*
|
|
|
|
* Thanks Michael S. Tsirkin for the valuable comments and
|
|
|
|
* suggestions. And thanks to Cunming Liang and Zhihong Wang for all
|
|
|
|
* their supports.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/cdev.h>
|
|
|
|
#include <linux/device.h>
|
2020-05-29 08:03:01 +00:00
|
|
|
#include <linux/mm.h>
|
2021-03-15 16:34:46 +00:00
|
|
|
#include <linux/slab.h>
|
2020-03-26 14:01:23 +00:00
|
|
|
#include <linux/iommu.h>
|
|
|
|
#include <linux/uuid.h>
|
|
|
|
#include <linux/vdpa.h>
|
|
|
|
#include <linux/nospec.h>
|
|
|
|
#include <linux/vhost.h>
|
|
|
|
|
|
|
|
#include "vhost.h"
|
|
|
|
|
2020-08-04 16:20:39 +00:00
|
|
|
enum {
|
2020-08-04 16:20:40 +00:00
|
|
|
VHOST_VDPA_BACKEND_FEATURES =
|
|
|
|
(1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
|
2022-03-30 18:03:55 +00:00
|
|
|
(1ULL << VHOST_BACKEND_F_IOTLB_BATCH) |
|
|
|
|
(1ULL << VHOST_BACKEND_F_IOTLB_ASID),
|
2020-08-04 16:20:39 +00:00
|
|
|
};
|
|
|
|
|
2020-03-26 14:01:23 +00:00
|
|
|
#define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
|
|
|
|
|
2022-03-30 18:03:50 +00:00
|
|
|
#define VHOST_VDPA_IOTLB_BUCKETS 16
|
|
|
|
|
|
|
|
struct vhost_vdpa_as {
|
|
|
|
struct hlist_node hash_link;
|
|
|
|
struct vhost_iotlb iotlb;
|
|
|
|
u32 id;
|
|
|
|
};
|
|
|
|
|
2020-03-26 14:01:23 +00:00
|
|
|
struct vhost_vdpa {
|
|
|
|
struct vhost_dev vdev;
|
|
|
|
struct iommu_domain *domain;
|
|
|
|
struct vhost_virtqueue *vqs;
|
|
|
|
struct completion completion;
|
|
|
|
struct vdpa_device *vdpa;
|
2022-03-30 18:03:50 +00:00
|
|
|
struct hlist_head as[VHOST_VDPA_IOTLB_BUCKETS];
|
2020-03-26 14:01:23 +00:00
|
|
|
struct device dev;
|
|
|
|
struct cdev cdev;
|
|
|
|
atomic_t opened;
|
2022-03-15 03:25:52 +00:00
|
|
|
u32 nvqs;
|
2020-03-26 14:01:23 +00:00
|
|
|
int virtio_id;
|
|
|
|
int minor;
|
2020-06-05 10:27:13 +00:00
|
|
|
struct eventfd_ctx *config_ctx;
|
2020-08-04 16:20:40 +00:00
|
|
|
int in_batch;
|
2020-10-23 09:00:42 +00:00
|
|
|
struct vdpa_iova_range range;
|
2022-03-30 18:03:55 +00:00
|
|
|
u32 batch_asid;
|
2020-03-26 14:01:23 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static DEFINE_IDA(vhost_vdpa_ida);
|
|
|
|
|
|
|
|
static dev_t vhost_vdpa_major;
|
|
|
|
|
2022-11-09 15:42:13 +00:00
|
|
|
static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
|
2022-12-19 07:33:31 +00:00
|
|
|
struct vhost_iotlb *iotlb, u64 start,
|
|
|
|
u64 last, u32 asid);
|
2022-11-09 15:42:13 +00:00
|
|
|
|
2022-03-30 18:03:55 +00:00
|
|
|
static inline u32 iotlb_to_asid(struct vhost_iotlb *iotlb)
|
|
|
|
{
|
|
|
|
struct vhost_vdpa_as *as = container_of(iotlb, struct
|
|
|
|
vhost_vdpa_as, iotlb);
|
|
|
|
return as->id;
|
|
|
|
}
|
|
|
|
|
2022-03-30 18:03:50 +00:00
|
|
|
static struct vhost_vdpa_as *asid_to_as(struct vhost_vdpa *v, u32 asid)
|
|
|
|
{
|
|
|
|
struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
|
|
|
|
struct vhost_vdpa_as *as;
|
|
|
|
|
|
|
|
hlist_for_each_entry(as, head, hash_link)
|
|
|
|
if (as->id == asid)
|
|
|
|
return as;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2022-03-30 18:03:55 +00:00
|
|
|
static struct vhost_iotlb *asid_to_iotlb(struct vhost_vdpa *v, u32 asid)
|
|
|
|
{
|
|
|
|
struct vhost_vdpa_as *as = asid_to_as(v, asid);
|
|
|
|
|
|
|
|
if (!as)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return &as->iotlb;
|
|
|
|
}
|
|
|
|
|
2022-03-30 18:03:50 +00:00
|
|
|
static struct vhost_vdpa_as *vhost_vdpa_alloc_as(struct vhost_vdpa *v, u32 asid)
|
|
|
|
{
|
|
|
|
struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
|
|
|
|
struct vhost_vdpa_as *as;
|
|
|
|
|
|
|
|
if (asid_to_as(v, asid))
|
|
|
|
return NULL;
|
|
|
|
|
2022-03-30 18:03:55 +00:00
|
|
|
if (asid >= v->vdpa->nas)
|
|
|
|
return NULL;
|
|
|
|
|
2022-03-30 18:03:50 +00:00
|
|
|
as = kmalloc(sizeof(*as), GFP_KERNEL);
|
|
|
|
if (!as)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
vhost_iotlb_init(&as->iotlb, 0, 0);
|
|
|
|
as->id = asid;
|
|
|
|
hlist_add_head(&as->hash_link, head);
|
|
|
|
|
|
|
|
return as;
|
|
|
|
}
|
|
|
|
|
2022-03-30 18:03:55 +00:00
|
|
|
static struct vhost_vdpa_as *vhost_vdpa_find_alloc_as(struct vhost_vdpa *v,
|
|
|
|
u32 asid)
|
|
|
|
{
|
|
|
|
struct vhost_vdpa_as *as = asid_to_as(v, asid);
|
|
|
|
|
|
|
|
if (as)
|
|
|
|
return as;
|
|
|
|
|
|
|
|
return vhost_vdpa_alloc_as(v, asid);
|
|
|
|
}
|
|
|
|
|
2023-10-21 09:25:14 +00:00
|
|
|
static void vhost_vdpa_reset_map(struct vhost_vdpa *v, u32 asid)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
const struct vdpa_config_ops *ops = vdpa->config;
|
|
|
|
|
|
|
|
if (ops->reset_map)
|
|
|
|
ops->reset_map(vdpa, asid);
|
|
|
|
}
|
|
|
|
|
2022-03-30 18:03:50 +00:00
|
|
|
static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
|
|
|
|
{
|
|
|
|
struct vhost_vdpa_as *as = asid_to_as(v, asid);
|
|
|
|
|
|
|
|
if (!as)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
hlist_del(&as->hash_link);
|
2022-12-19 07:33:31 +00:00
|
|
|
vhost_vdpa_iotlb_unmap(v, &as->iotlb, 0ULL, 0ULL - 1, asid);
|
2023-10-21 09:25:14 +00:00
|
|
|
/*
|
|
|
|
* Devices with vendor specific IOMMU may need to restore
|
|
|
|
* iotlb to the initial or default state, which cannot be
|
|
|
|
* cleaned up in the all range unmap call above. Give them
|
|
|
|
* a chance to clean up or reset the map to the desired
|
|
|
|
* state.
|
|
|
|
*/
|
|
|
|
vhost_vdpa_reset_map(v, asid);
|
2022-03-30 18:03:50 +00:00
|
|
|
kfree(as);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-26 14:01:23 +00:00
|
|
|
static void handle_vq_kick(struct vhost_work *work)
|
|
|
|
{
|
|
|
|
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
|
|
|
|
poll.work);
|
|
|
|
struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
|
|
|
|
const struct vdpa_config_ops *ops = v->vdpa->config;
|
|
|
|
|
|
|
|
ops->kick_vq(v->vdpa, vq - v->vqs);
|
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
|
|
|
|
{
|
|
|
|
struct vhost_virtqueue *vq = private;
|
2020-07-31 06:55:28 +00:00
|
|
|
struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
|
2020-03-26 14:01:23 +00:00
|
|
|
|
|
|
|
if (call_ctx)
|
|
|
|
eventfd_signal(call_ctx, 1);
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2020-06-05 10:27:13 +00:00
|
|
|
static irqreturn_t vhost_vdpa_config_cb(void *private)
|
|
|
|
{
|
|
|
|
struct vhost_vdpa *v = private;
|
|
|
|
struct eventfd_ctx *config_ctx = v->config_ctx;
|
|
|
|
|
|
|
|
if (config_ctx)
|
|
|
|
eventfd_signal(config_ctx, 1);
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2020-07-31 06:55:31 +00:00
|
|
|
static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
|
|
|
|
{
|
|
|
|
struct vhost_virtqueue *vq = &v->vqs[qid];
|
|
|
|
const struct vdpa_config_ops *ops = v->vdpa->config;
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
int ret, irq;
|
|
|
|
|
|
|
|
if (!ops->get_vq_irq)
|
|
|
|
return;
|
|
|
|
|
|
|
|
irq = ops->get_vq_irq(vdpa, qid);
|
2022-02-22 11:54:25 +00:00
|
|
|
if (irq < 0)
|
|
|
|
return;
|
|
|
|
|
2020-07-31 06:55:31 +00:00
|
|
|
irq_bypass_unregister_producer(&vq->call_ctx.producer);
|
2022-02-22 11:54:25 +00:00
|
|
|
if (!vq->call_ctx.ctx)
|
2020-07-31 06:55:31 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
vq->call_ctx.producer.token = vq->call_ctx.ctx;
|
|
|
|
vq->call_ctx.producer.irq = irq;
|
|
|
|
ret = irq_bypass_register_producer(&vq->call_ctx.producer);
|
2020-10-23 10:40:46 +00:00
|
|
|
if (unlikely(ret))
|
|
|
|
dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret = %d\n",
|
|
|
|
qid, vq->call_ctx.producer.token, ret);
|
2020-07-31 06:55:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
|
|
|
|
{
|
|
|
|
struct vhost_virtqueue *vq = &v->vqs[qid];
|
|
|
|
|
|
|
|
irq_bypass_unregister_producer(&vq->call_ctx.producer);
|
|
|
|
}
|
|
|
|
|
2023-10-21 09:25:17 +00:00
|
|
|
static int _compat_vdpa_reset(struct vhost_vdpa *v)
|
2020-03-26 14:01:23 +00:00
|
|
|
{
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
2023-10-21 09:25:17 +00:00
|
|
|
u32 flags = 0;
|
2020-03-26 14:01:23 +00:00
|
|
|
|
2023-10-21 09:25:17 +00:00
|
|
|
if (v->vdev.vqs) {
|
|
|
|
flags |= !vhost_backend_has_feature(v->vdev.vqs[0],
|
|
|
|
VHOST_BACKEND_F_IOTLB_PERSIST) ?
|
|
|
|
VDPA_RESET_F_CLEAN_MAP : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return vdpa_reset(vdpa, flags);
|
|
|
|
}
|
2021-08-31 10:36:27 +00:00
|
|
|
|
2023-10-21 09:25:17 +00:00
|
|
|
static int vhost_vdpa_reset(struct vhost_vdpa *v)
|
|
|
|
{
|
|
|
|
v->in_batch = 0;
|
|
|
|
return _compat_vdpa_reset(v);
|
2020-03-26 14:01:23 +00:00
|
|
|
}
|
|
|
|
|
2023-04-04 13:13:19 +00:00
|
|
|
static long vhost_vdpa_bind_mm(struct vhost_vdpa *v)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
const struct vdpa_config_ops *ops = vdpa->config;
|
|
|
|
|
|
|
|
if (!vdpa->use_va || !ops->bind_mm)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return ops->bind_mm(vdpa, v->vdev.mm);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vhost_vdpa_unbind_mm(struct vhost_vdpa *v)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
const struct vdpa_config_ops *ops = vdpa->config;
|
|
|
|
|
|
|
|
if (!vdpa->use_va || !ops->unbind_mm)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ops->unbind_mm(vdpa);
|
|
|
|
}
|
|
|
|
|
2020-03-26 14:01:23 +00:00
|
|
|
static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
const struct vdpa_config_ops *ops = vdpa->config;
|
|
|
|
u32 device_id;
|
|
|
|
|
|
|
|
device_id = ops->get_device_id(vdpa);
|
|
|
|
|
|
|
|
if (copy_to_user(argp, &device_id, sizeof(device_id)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
2022-01-11 18:33:57 +00:00
|
|
|
const struct vdpa_config_ops *ops = vdpa->config;
|
2020-03-26 14:01:23 +00:00
|
|
|
u8 status;
|
|
|
|
|
2022-01-11 18:33:57 +00:00
|
|
|
status = ops->get_status(vdpa);
|
2020-03-26 14:01:23 +00:00
|
|
|
|
|
|
|
if (copy_to_user(statusp, &status, sizeof(status)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
const struct vdpa_config_ops *ops = vdpa->config;
|
2020-07-31 06:55:31 +00:00
|
|
|
u8 status, status_old;
|
2022-03-15 03:25:52 +00:00
|
|
|
u32 nvqs = v->nvqs;
|
|
|
|
int ret;
|
2020-07-31 06:55:31 +00:00
|
|
|
u16 i;
|
2020-03-26 14:01:23 +00:00
|
|
|
|
|
|
|
if (copy_from_user(&status, statusp, sizeof(status)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2022-01-11 18:33:57 +00:00
|
|
|
status_old = ops->get_status(vdpa);
|
2020-07-31 06:55:31 +00:00
|
|
|
|
2020-03-26 14:01:23 +00:00
|
|
|
/*
|
|
|
|
* Userspace shouldn't remove status bits unless reset the
|
|
|
|
* status to 0.
|
|
|
|
*/
|
2021-11-04 19:58:33 +00:00
|
|
|
if (status != 0 && (status_old & ~status) != 0)
|
2020-03-26 14:01:23 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2021-09-15 06:39:32 +00:00
|
|
|
if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
|
|
|
|
for (i = 0; i < nvqs; i++)
|
|
|
|
vhost_vdpa_unsetup_vq_irq(v, i);
|
|
|
|
|
2021-08-31 10:36:26 +00:00
|
|
|
if (status == 0) {
|
2023-10-21 09:25:17 +00:00
|
|
|
ret = _compat_vdpa_reset(v);
|
2021-08-31 10:36:26 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
} else
|
2022-01-05 11:46:35 +00:00
|
|
|
vdpa_set_status(vdpa, status);
|
2020-03-26 14:01:23 +00:00
|
|
|
|
2020-07-31 06:55:31 +00:00
|
|
|
if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
|
|
|
|
for (i = 0; i < nvqs; i++)
|
|
|
|
vhost_vdpa_setup_vq_irq(v, i);
|
|
|
|
|
2020-03-26 14:01:23 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
|
|
|
|
struct vhost_vdpa_config *c)
|
|
|
|
{
|
2021-03-15 16:34:45 +00:00
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
2022-01-06 13:09:25 +00:00
|
|
|
size_t size = vdpa->config->get_config_size(vdpa);
|
2020-03-26 14:01:23 +00:00
|
|
|
|
2021-12-08 10:33:37 +00:00
|
|
|
if (c->len == 0 || c->off > size)
|
2020-03-26 14:01:23 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (c->len > size - c->off)
|
|
|
|
return -E2BIG;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static long vhost_vdpa_get_config(struct vhost_vdpa *v,
|
|
|
|
struct vhost_vdpa_config __user *c)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
struct vhost_vdpa_config config;
|
|
|
|
unsigned long size = offsetof(struct vhost_vdpa_config, buf);
|
|
|
|
u8 *buf;
|
|
|
|
|
|
|
|
if (copy_from_user(&config, c, size))
|
|
|
|
return -EFAULT;
|
|
|
|
if (vhost_vdpa_config_validate(v, &config))
|
|
|
|
return -EINVAL;
|
|
|
|
buf = kvzalloc(config.len, GFP_KERNEL);
|
|
|
|
if (!buf)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2020-07-27 14:58:18 +00:00
|
|
|
vdpa_get_config(vdpa, config.off, buf, config.len);
|
2020-03-26 14:01:23 +00:00
|
|
|
|
|
|
|
if (copy_to_user(c->buf, buf, config.len)) {
|
|
|
|
kvfree(buf);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
kvfree(buf);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static long vhost_vdpa_set_config(struct vhost_vdpa *v,
|
|
|
|
struct vhost_vdpa_config __user *c)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
struct vhost_vdpa_config config;
|
|
|
|
unsigned long size = offsetof(struct vhost_vdpa_config, buf);
|
|
|
|
u8 *buf;
|
|
|
|
|
|
|
|
if (copy_from_user(&config, c, size))
|
|
|
|
return -EFAULT;
|
|
|
|
if (vhost_vdpa_config_validate(v, &config))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2020-11-11 01:14:48 +00:00
|
|
|
buf = vmemdup_user(c->buf, config.len);
|
|
|
|
if (IS_ERR(buf))
|
|
|
|
return PTR_ERR(buf);
|
2020-03-26 14:01:23 +00:00
|
|
|
|
2021-10-26 17:55:12 +00:00
|
|
|
vdpa_set_config(vdpa, config.off, buf, config.len);
|
2020-03-26 14:01:23 +00:00
|
|
|
|
|
|
|
kvfree(buf);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-08-10 17:15:10 +00:00
|
|
|
static bool vhost_vdpa_can_suspend(const struct vhost_vdpa *v)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
const struct vdpa_config_ops *ops = vdpa->config;
|
|
|
|
|
|
|
|
return ops->suspend;
|
|
|
|
}
|
|
|
|
|
2023-01-03 10:51:06 +00:00
|
|
|
static bool vhost_vdpa_can_resume(const struct vhost_vdpa *v)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
const struct vdpa_config_ops *ops = vdpa->config;
|
|
|
|
|
|
|
|
return ops->resume;
|
|
|
|
}
|
|
|
|
|
2023-10-18 17:14:42 +00:00
|
|
|
static bool vhost_vdpa_has_desc_group(const struct vhost_vdpa *v)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
const struct vdpa_config_ops *ops = vdpa->config;
|
|
|
|
|
|
|
|
return ops->get_vq_desc_group;
|
|
|
|
}
|
|
|
|
|
2020-03-26 14:01:23 +00:00
|
|
|
static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
const struct vdpa_config_ops *ops = vdpa->config;
|
|
|
|
u64 features;
|
|
|
|
|
2022-01-05 11:46:33 +00:00
|
|
|
features = ops->get_device_features(vdpa);
|
2020-03-26 14:01:23 +00:00
|
|
|
|
|
|
|
if (copy_to_user(featurep, &features, sizeof(features)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-06-09 09:21:26 +00:00
|
|
|
static u64 vhost_vdpa_get_backend_features(const struct vhost_vdpa *v)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
const struct vdpa_config_ops *ops = vdpa->config;
|
|
|
|
|
|
|
|
if (!ops->get_backend_features)
|
|
|
|
return 0;
|
|
|
|
else
|
|
|
|
return ops->get_backend_features(vdpa);
|
|
|
|
}
|
|
|
|
|
vhost-vdpa: introduce IOTLB_PERSIST backend feature bit
Userspace needs this feature flag to distinguish if vhost-vdpa iotlb in
the kernel can be trusted to persist IOTLB mapping across vDPA reset.
Without it, userspace has no way to tell apart if it's running on an
older kernel, which could silently drop all iotlb mapping across vDPA
reset, especially with broken parent driver implementation for the
.reset driver op. The broken driver may incorrectly drop all mappings of
its own as part of .reset, which inadvertently ends up with corrupted
mapping state between vhost-vdpa userspace and the kernel. As a
workaround, to make the mapping behaviour predictable across reset,
userspace has to pro-actively remove all mappings before vDPA reset, and
then restore all the mappings afterwards. This workaround is done
unconditionally on top of all parent drivers today, due to the parent
driver implementation issue and no means to differentiate. This
workaround had been utilized in QEMU since day one when the
corresponding vhost-vdpa userspace backend came to the world.
There are 3 cases that backend may claim this feature bit on for:
- parent device that has to work with platform IOMMU
- parent device with on-chip IOMMU that has the expected
.reset_map support in driver
- parent device with vendor specific IOMMU implementation with
persistent IOTLB mapping already that has to specifically
declare this backend feature
The reason why .reset_map is being one of the pre-condition for
persistent iotlb is because without it, vhost-vdpa can't switch back
iotlb to the initial state later on, especially for the on-chip IOMMU
case which starts with identity mapping at device creation. virtio-vdpa
requires on-chip IOMMU to perform 1:1 passthrough translation from PA to
IOVA as-is to begin with, and .reset_map is the only means to turn back
iotlb to the identity mapping mode after vhost-vdpa is gone.
The difference in behavior did not matter as QEMU unmaps all the memory
unregistering the memory listener at vhost_vdpa_dev_start( started =
false), but the backend acknowledging this feature flag allows QEMU to
make sure it is safe to skip this unmap & map in the case of vhost stop
& start cycle.
In that sense, this feature flag is actually a signal for userspace to
know that the driver bug has been solved. Not offering it indicates that
userspace cannot trust the kernel will retain the maps.
Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
Acked-by: Eugenio Pérez <eperezma@redhat.com>
Message-Id: <1697880319-4937-4-git-send-email-si-wei.liu@oracle.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Tested-by: Lei Yang <leiyang@redhat.com>
2023-10-21 09:25:15 +00:00
|
|
|
static bool vhost_vdpa_has_persistent_map(const struct vhost_vdpa *v)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
const struct vdpa_config_ops *ops = vdpa->config;
|
|
|
|
|
|
|
|
return (!ops->set_map && !ops->dma_map) || ops->reset_map ||
|
|
|
|
vhost_vdpa_get_backend_features(v) & BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST);
|
|
|
|
}
|
|
|
|
|
2020-03-26 14:01:23 +00:00
|
|
|
static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
const struct vdpa_config_ops *ops = vdpa->config;
|
2023-04-24 22:50:29 +00:00
|
|
|
struct vhost_dev *d = &v->vdev;
|
|
|
|
u64 actual_features;
|
2020-03-26 14:01:23 +00:00
|
|
|
u64 features;
|
2023-04-24 22:50:29 +00:00
|
|
|
int i;
|
2020-03-26 14:01:23 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* It's not allowed to change the features after they have
|
|
|
|
* been negotiated.
|
|
|
|
*/
|
|
|
|
if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
if (copy_from_user(&features, featurep, sizeof(features)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2022-01-15 00:27:59 +00:00
|
|
|
if (vdpa_set_features(vdpa, features))
|
2020-03-26 14:01:23 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2023-04-24 22:50:29 +00:00
|
|
|
/* let the vqs know what has been configured */
|
|
|
|
actual_features = ops->get_driver_features(vdpa);
|
|
|
|
for (i = 0; i < d->nvqs; ++i) {
|
|
|
|
struct vhost_virtqueue *vq = d->vqs[i];
|
|
|
|
|
|
|
|
mutex_lock(&vq->mutex);
|
|
|
|
vq->acked_features = actual_features;
|
|
|
|
mutex_unlock(&vq->mutex);
|
|
|
|
}
|
|
|
|
|
2020-03-26 14:01:23 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
const struct vdpa_config_ops *ops = vdpa->config;
|
|
|
|
u16 num;
|
|
|
|
|
|
|
|
num = ops->get_vq_num_max(vdpa);
|
|
|
|
|
|
|
|
if (copy_to_user(argp, &num, sizeof(num)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-06-05 10:27:13 +00:00
|
|
|
static void vhost_vdpa_config_put(struct vhost_vdpa *v)
|
|
|
|
{
|
2021-03-11 13:52:56 +00:00
|
|
|
if (v->config_ctx) {
|
2020-06-05 10:27:13 +00:00
|
|
|
eventfd_ctx_put(v->config_ctx);
|
2021-03-11 13:52:56 +00:00
|
|
|
v->config_ctx = NULL;
|
|
|
|
}
|
2020-06-05 10:27:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
|
|
|
|
{
|
|
|
|
struct vdpa_callback cb;
|
|
|
|
int fd;
|
|
|
|
struct eventfd_ctx *ctx;
|
|
|
|
|
|
|
|
cb.callback = vhost_vdpa_config_cb;
|
2021-09-29 09:09:33 +00:00
|
|
|
cb.private = v;
|
2020-06-05 10:27:13 +00:00
|
|
|
if (copy_from_user(&fd, argp, sizeof(fd)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
|
|
|
|
swap(ctx, v->config_ctx);
|
|
|
|
|
|
|
|
if (!IS_ERR_OR_NULL(ctx))
|
|
|
|
eventfd_ctx_put(ctx);
|
|
|
|
|
2021-03-11 13:52:57 +00:00
|
|
|
if (IS_ERR(v->config_ctx)) {
|
|
|
|
long ret = PTR_ERR(v->config_ctx);
|
|
|
|
|
|
|
|
v->config_ctx = NULL;
|
|
|
|
return ret;
|
|
|
|
}
|
2020-06-05 10:27:13 +00:00
|
|
|
|
|
|
|
v->vdpa->config->set_config_cb(v->vdpa, &cb);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2020-07-31 06:55:31 +00:00
|
|
|
|
2020-10-23 09:00:42 +00:00
|
|
|
static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
|
|
|
|
{
|
|
|
|
struct vhost_vdpa_iova_range range = {
|
|
|
|
.first = v->range.first,
|
|
|
|
.last = v->range.last,
|
|
|
|
};
|
|
|
|
|
2020-12-02 06:44:43 +00:00
|
|
|
if (copy_to_user(argp, &range, sizeof(range)))
|
|
|
|
return -EFAULT;
|
|
|
|
return 0;
|
2020-10-23 09:00:42 +00:00
|
|
|
}
|
|
|
|
|
2022-03-15 03:25:51 +00:00
|
|
|
static long vhost_vdpa_get_config_size(struct vhost_vdpa *v, u32 __user *argp)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
const struct vdpa_config_ops *ops = vdpa->config;
|
|
|
|
u32 size;
|
|
|
|
|
|
|
|
size = ops->get_config_size(vdpa);
|
|
|
|
|
|
|
|
if (copy_to_user(argp, &size, sizeof(size)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-03-15 03:25:53 +00:00
|
|
|
static long vhost_vdpa_get_vqs_count(struct vhost_vdpa *v, u32 __user *argp)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
|
|
|
|
if (copy_to_user(argp, &vdpa->nvqs, sizeof(vdpa->nvqs)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-08-10 17:15:11 +00:00
|
|
|
/* After a successful return of ioctl the device must not process more
|
|
|
|
* virtqueue descriptors. The device can answer to read or writes of config
|
|
|
|
* fields as if it were not suspended. In particular, writing to "queue_enable"
|
|
|
|
* with a value of 1 will not make the device start processing buffers.
|
|
|
|
*/
|
|
|
|
static long vhost_vdpa_suspend(struct vhost_vdpa *v)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
const struct vdpa_config_ops *ops = vdpa->config;
|
|
|
|
|
|
|
|
if (!ops->suspend)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
return ops->suspend(vdpa);
|
|
|
|
}
|
|
|
|
|
2023-01-03 10:51:07 +00:00
|
|
|
/* After a successful return of this ioctl the device resumes processing
|
|
|
|
* virtqueue descriptors. The device becomes fully operational the same way it
|
|
|
|
* was before it was suspended.
|
|
|
|
*/
|
|
|
|
static long vhost_vdpa_resume(struct vhost_vdpa *v)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
const struct vdpa_config_ops *ops = vdpa->config;
|
|
|
|
|
|
|
|
if (!ops->resume)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
return ops->resume(vdpa);
|
|
|
|
}
|
|
|
|
|
2020-03-26 14:01:23 +00:00
|
|
|
static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
|
|
|
|
void __user *argp)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
const struct vdpa_config_ops *ops = vdpa->config;
|
2020-08-04 16:20:43 +00:00
|
|
|
struct vdpa_vq_state vq_state;
|
2020-03-26 14:01:23 +00:00
|
|
|
struct vdpa_callback cb;
|
|
|
|
struct vhost_virtqueue *vq;
|
|
|
|
struct vhost_vring_state s;
|
|
|
|
u32 idx;
|
|
|
|
long r;
|
|
|
|
|
|
|
|
r = get_user(idx, (u32 __user *)argp);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
if (idx >= v->nvqs)
|
|
|
|
return -ENOBUFS;
|
|
|
|
|
|
|
|
idx = array_index_nospec(idx, v->nvqs);
|
|
|
|
vq = &v->vqs[idx];
|
|
|
|
|
2020-08-04 16:20:37 +00:00
|
|
|
switch (cmd) {
|
|
|
|
case VHOST_VDPA_SET_VRING_ENABLE:
|
2020-03-26 14:01:23 +00:00
|
|
|
if (copy_from_user(&s, argp, sizeof(s)))
|
|
|
|
return -EFAULT;
|
|
|
|
ops->set_vq_ready(vdpa, idx, s.num);
|
|
|
|
return 0;
|
2022-03-30 18:03:53 +00:00
|
|
|
case VHOST_VDPA_GET_VRING_GROUP:
|
2022-06-09 04:19:01 +00:00
|
|
|
if (!ops->get_vq_group)
|
|
|
|
return -EOPNOTSUPP;
|
2022-03-30 18:03:53 +00:00
|
|
|
s.index = idx;
|
|
|
|
s.num = ops->get_vq_group(vdpa, idx);
|
|
|
|
if (s.num >= vdpa->ngroups)
|
|
|
|
return -EIO;
|
|
|
|
else if (copy_to_user(argp, &s, sizeof(s)))
|
2023-10-18 17:14:43 +00:00
|
|
|
return -EFAULT;
|
|
|
|
return 0;
|
|
|
|
case VHOST_VDPA_GET_VRING_DESC_GROUP:
|
|
|
|
if (!vhost_vdpa_has_desc_group(v))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
s.index = idx;
|
|
|
|
s.num = ops->get_vq_desc_group(vdpa, idx);
|
|
|
|
if (s.num >= vdpa->ngroups)
|
|
|
|
return -EIO;
|
|
|
|
else if (copy_to_user(argp, &s, sizeof(s)))
|
2022-03-30 18:03:53 +00:00
|
|
|
return -EFAULT;
|
|
|
|
return 0;
|
2022-03-30 18:03:54 +00:00
|
|
|
case VHOST_VDPA_SET_GROUP_ASID:
|
|
|
|
if (copy_from_user(&s, argp, sizeof(s)))
|
|
|
|
return -EFAULT;
|
|
|
|
if (s.num >= vdpa->nas)
|
|
|
|
return -EINVAL;
|
|
|
|
if (!ops->set_group_asid)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
return ops->set_group_asid(vdpa, idx, s.num);
|
2020-08-04 16:20:37 +00:00
|
|
|
case VHOST_GET_VRING_BASE:
|
2020-08-04 16:20:44 +00:00
|
|
|
r = ops->get_vq_state(v->vdpa, idx, &vq_state);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
2023-04-24 22:50:31 +00:00
|
|
|
if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
|
|
|
|
vq->last_avail_idx = vq_state.packed.last_avail_idx |
|
|
|
|
(vq_state.packed.last_avail_counter << 15);
|
|
|
|
vq->last_used_idx = vq_state.packed.last_used_idx |
|
|
|
|
(vq_state.packed.last_used_counter << 15);
|
|
|
|
} else {
|
|
|
|
vq->last_avail_idx = vq_state.split.avail_index;
|
|
|
|
}
|
2020-08-04 16:20:37 +00:00
|
|
|
break;
|
|
|
|
}
|
2020-03-26 14:01:23 +00:00
|
|
|
|
|
|
|
r = vhost_vring_ioctl(&v->vdev, cmd, argp);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case VHOST_SET_VRING_ADDR:
|
|
|
|
if (ops->set_vq_address(vdpa, idx,
|
|
|
|
(u64)(uintptr_t)vq->desc,
|
|
|
|
(u64)(uintptr_t)vq->avail,
|
|
|
|
(u64)(uintptr_t)vq->used))
|
|
|
|
r = -EINVAL;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VHOST_SET_VRING_BASE:
|
2023-04-24 22:50:31 +00:00
|
|
|
if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
|
|
|
|
vq_state.packed.last_avail_idx = vq->last_avail_idx & 0x7fff;
|
|
|
|
vq_state.packed.last_avail_counter = !!(vq->last_avail_idx & 0x8000);
|
|
|
|
vq_state.packed.last_used_idx = vq->last_used_idx & 0x7fff;
|
|
|
|
vq_state.packed.last_used_counter = !!(vq->last_used_idx & 0x8000);
|
|
|
|
} else {
|
|
|
|
vq_state.split.avail_index = vq->last_avail_idx;
|
|
|
|
}
|
|
|
|
r = ops->set_vq_state(vdpa, idx, &vq_state);
|
2020-03-26 14:01:23 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VHOST_SET_VRING_CALL:
|
2020-07-31 06:55:28 +00:00
|
|
|
if (vq->call_ctx.ctx) {
|
2020-03-26 14:01:23 +00:00
|
|
|
cb.callback = vhost_vdpa_virtqueue_cb;
|
|
|
|
cb.private = vq;
|
2023-03-23 05:30:40 +00:00
|
|
|
cb.trigger = vq->call_ctx.ctx;
|
2020-03-26 14:01:23 +00:00
|
|
|
} else {
|
|
|
|
cb.callback = NULL;
|
|
|
|
cb.private = NULL;
|
2023-03-23 05:30:40 +00:00
|
|
|
cb.trigger = NULL;
|
2020-03-26 14:01:23 +00:00
|
|
|
}
|
|
|
|
ops->set_vq_cb(vdpa, idx, &cb);
|
2020-07-31 06:55:31 +00:00
|
|
|
vhost_vdpa_setup_vq_irq(v, idx);
|
2020-03-26 14:01:23 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VHOST_SET_VRING_NUM:
|
|
|
|
ops->set_vq_num(vdpa, idx, vq->num);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static long vhost_vdpa_unlocked_ioctl(struct file *filep,
|
|
|
|
unsigned int cmd, unsigned long arg)
|
|
|
|
{
|
|
|
|
struct vhost_vdpa *v = filep->private_data;
|
|
|
|
struct vhost_dev *d = &v->vdev;
|
|
|
|
void __user *argp = (void __user *)arg;
|
2020-09-07 10:43:43 +00:00
|
|
|
u64 __user *featurep = argp;
|
|
|
|
u64 features;
|
2020-10-23 12:08:53 +00:00
|
|
|
long r = 0;
|
2020-03-26 14:01:23 +00:00
|
|
|
|
2020-09-07 10:43:43 +00:00
|
|
|
if (cmd == VHOST_SET_BACKEND_FEATURES) {
|
2020-10-23 12:08:53 +00:00
|
|
|
if (copy_from_user(&features, featurep, sizeof(features)))
|
|
|
|
return -EFAULT;
|
2022-08-10 17:15:10 +00:00
|
|
|
if (features & ~(VHOST_VDPA_BACKEND_FEATURES |
|
2023-10-18 17:14:42 +00:00
|
|
|
BIT_ULL(VHOST_BACKEND_F_DESC_ASID) |
|
vhost-vdpa: introduce IOTLB_PERSIST backend feature bit
Userspace needs this feature flag to distinguish if vhost-vdpa iotlb in
the kernel can be trusted to persist IOTLB mapping across vDPA reset.
Without it, userspace has no way to tell apart if it's running on an
older kernel, which could silently drop all iotlb mapping across vDPA
reset, especially with broken parent driver implementation for the
.reset driver op. The broken driver may incorrectly drop all mappings of
its own as part of .reset, which inadvertently ends up with corrupted
mapping state between vhost-vdpa userspace and the kernel. As a
workaround, to make the mapping behaviour predictable across reset,
userspace has to pro-actively remove all mappings before vDPA reset, and
then restore all the mappings afterwards. This workaround is done
unconditionally on top of all parent drivers today, due to the parent
driver implementation issue and no means to differentiate. This
workaround had been utilized in QEMU since day one when the
corresponding vhost-vdpa userspace backend came to the world.
There are 3 cases that backend may claim this feature bit on for:
- parent device that has to work with platform IOMMU
- parent device with on-chip IOMMU that has the expected
.reset_map support in driver
- parent device with vendor specific IOMMU implementation with
persistent IOTLB mapping already that has to specifically
declare this backend feature
The reason why .reset_map is being one of the pre-condition for
persistent iotlb is because without it, vhost-vdpa can't switch back
iotlb to the initial state later on, especially for the on-chip IOMMU
case which starts with identity mapping at device creation. virtio-vdpa
requires on-chip IOMMU to perform 1:1 passthrough translation from PA to
IOVA as-is to begin with, and .reset_map is the only means to turn back
iotlb to the identity mapping mode after vhost-vdpa is gone.
The difference in behavior did not matter as QEMU unmaps all the memory
unregistering the memory listener at vhost_vdpa_dev_start( started =
false), but the backend acknowledging this feature flag allows QEMU to
make sure it is safe to skip this unmap & map in the case of vhost stop
& start cycle.
In that sense, this feature flag is actually a signal for userspace to
know that the driver bug has been solved. Not offering it indicates that
userspace cannot trust the kernel will retain the maps.
Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
Acked-by: Eugenio Pérez <eperezma@redhat.com>
Message-Id: <1697880319-4937-4-git-send-email-si-wei.liu@oracle.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Tested-by: Lei Yang <leiyang@redhat.com>
2023-10-21 09:25:15 +00:00
|
|
|
BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST) |
|
2023-01-03 10:51:06 +00:00
|
|
|
BIT_ULL(VHOST_BACKEND_F_SUSPEND) |
|
2023-06-09 09:21:25 +00:00
|
|
|
BIT_ULL(VHOST_BACKEND_F_RESUME) |
|
|
|
|
BIT_ULL(VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK)))
|
2022-08-10 17:15:10 +00:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
if ((features & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) &&
|
|
|
|
!vhost_vdpa_can_suspend(v))
|
2020-09-07 10:43:43 +00:00
|
|
|
return -EOPNOTSUPP;
|
2023-01-03 10:51:06 +00:00
|
|
|
if ((features & BIT_ULL(VHOST_BACKEND_F_RESUME)) &&
|
|
|
|
!vhost_vdpa_can_resume(v))
|
|
|
|
return -EOPNOTSUPP;
|
2023-10-18 17:14:42 +00:00
|
|
|
if ((features & BIT_ULL(VHOST_BACKEND_F_DESC_ASID)) &&
|
|
|
|
!(features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID)))
|
|
|
|
return -EINVAL;
|
|
|
|
if ((features & BIT_ULL(VHOST_BACKEND_F_DESC_ASID)) &&
|
|
|
|
!vhost_vdpa_has_desc_group(v))
|
|
|
|
return -EOPNOTSUPP;
|
vhost-vdpa: introduce IOTLB_PERSIST backend feature bit
Userspace needs this feature flag to distinguish if vhost-vdpa iotlb in
the kernel can be trusted to persist IOTLB mapping across vDPA reset.
Without it, userspace has no way to tell apart if it's running on an
older kernel, which could silently drop all iotlb mapping across vDPA
reset, especially with broken parent driver implementation for the
.reset driver op. The broken driver may incorrectly drop all mappings of
its own as part of .reset, which inadvertently ends up with corrupted
mapping state between vhost-vdpa userspace and the kernel. As a
workaround, to make the mapping behaviour predictable across reset,
userspace has to pro-actively remove all mappings before vDPA reset, and
then restore all the mappings afterwards. This workaround is done
unconditionally on top of all parent drivers today, due to the parent
driver implementation issue and no means to differentiate. This
workaround had been utilized in QEMU since day one when the
corresponding vhost-vdpa userspace backend came to the world.
There are 3 cases that backend may claim this feature bit on for:
- parent device that has to work with platform IOMMU
- parent device with on-chip IOMMU that has the expected
.reset_map support in driver
- parent device with vendor specific IOMMU implementation with
persistent IOTLB mapping already that has to specifically
declare this backend feature
The reason why .reset_map is being one of the pre-condition for
persistent iotlb is because without it, vhost-vdpa can't switch back
iotlb to the initial state later on, especially for the on-chip IOMMU
case which starts with identity mapping at device creation. virtio-vdpa
requires on-chip IOMMU to perform 1:1 passthrough translation from PA to
IOVA as-is to begin with, and .reset_map is the only means to turn back
iotlb to the identity mapping mode after vhost-vdpa is gone.
The difference in behavior did not matter as QEMU unmaps all the memory
unregistering the memory listener at vhost_vdpa_dev_start( started =
false), but the backend acknowledging this feature flag allows QEMU to
make sure it is safe to skip this unmap & map in the case of vhost stop
& start cycle.
In that sense, this feature flag is actually a signal for userspace to
know that the driver bug has been solved. Not offering it indicates that
userspace cannot trust the kernel will retain the maps.
Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
Acked-by: Eugenio Pérez <eperezma@redhat.com>
Message-Id: <1697880319-4937-4-git-send-email-si-wei.liu@oracle.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Tested-by: Lei Yang <leiyang@redhat.com>
2023-10-21 09:25:15 +00:00
|
|
|
if ((features & BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST)) &&
|
|
|
|
!vhost_vdpa_has_persistent_map(v))
|
|
|
|
return -EOPNOTSUPP;
|
2020-09-07 10:43:43 +00:00
|
|
|
vhost_set_backend_features(&v->vdev, features);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-26 14:01:23 +00:00
|
|
|
mutex_lock(&d->mutex);
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case VHOST_VDPA_GET_DEVICE_ID:
|
|
|
|
r = vhost_vdpa_get_device_id(v, argp);
|
|
|
|
break;
|
|
|
|
case VHOST_VDPA_GET_STATUS:
|
|
|
|
r = vhost_vdpa_get_status(v, argp);
|
|
|
|
break;
|
|
|
|
case VHOST_VDPA_SET_STATUS:
|
|
|
|
r = vhost_vdpa_set_status(v, argp);
|
|
|
|
break;
|
|
|
|
case VHOST_VDPA_GET_CONFIG:
|
|
|
|
r = vhost_vdpa_get_config(v, argp);
|
|
|
|
break;
|
|
|
|
case VHOST_VDPA_SET_CONFIG:
|
|
|
|
r = vhost_vdpa_set_config(v, argp);
|
|
|
|
break;
|
|
|
|
case VHOST_GET_FEATURES:
|
|
|
|
r = vhost_vdpa_get_features(v, argp);
|
|
|
|
break;
|
|
|
|
case VHOST_SET_FEATURES:
|
|
|
|
r = vhost_vdpa_set_features(v, argp);
|
|
|
|
break;
|
|
|
|
case VHOST_VDPA_GET_VRING_NUM:
|
|
|
|
r = vhost_vdpa_get_vring_num(v, argp);
|
|
|
|
break;
|
2022-03-30 18:03:51 +00:00
|
|
|
case VHOST_VDPA_GET_GROUP_NUM:
|
2022-05-23 08:33:26 +00:00
|
|
|
if (copy_to_user(argp, &v->vdpa->ngroups,
|
|
|
|
sizeof(v->vdpa->ngroups)))
|
|
|
|
r = -EFAULT;
|
2022-03-30 18:03:51 +00:00
|
|
|
break;
|
2022-03-30 18:03:52 +00:00
|
|
|
case VHOST_VDPA_GET_AS_NUM:
|
2022-05-23 08:33:26 +00:00
|
|
|
if (copy_to_user(argp, &v->vdpa->nas, sizeof(v->vdpa->nas)))
|
|
|
|
r = -EFAULT;
|
2022-03-30 18:03:52 +00:00
|
|
|
break;
|
2020-03-26 14:01:23 +00:00
|
|
|
case VHOST_SET_LOG_BASE:
|
|
|
|
case VHOST_SET_LOG_FD:
|
|
|
|
r = -ENOIOCTLCMD;
|
|
|
|
break;
|
2020-06-05 10:27:13 +00:00
|
|
|
case VHOST_VDPA_SET_CONFIG_CALL:
|
|
|
|
r = vhost_vdpa_set_config_call(v, argp);
|
|
|
|
break;
|
2020-09-07 10:43:43 +00:00
|
|
|
case VHOST_GET_BACKEND_FEATURES:
|
|
|
|
features = VHOST_VDPA_BACKEND_FEATURES;
|
2022-08-10 17:15:10 +00:00
|
|
|
if (vhost_vdpa_can_suspend(v))
|
|
|
|
features |= BIT_ULL(VHOST_BACKEND_F_SUSPEND);
|
2023-01-03 10:51:06 +00:00
|
|
|
if (vhost_vdpa_can_resume(v))
|
|
|
|
features |= BIT_ULL(VHOST_BACKEND_F_RESUME);
|
2023-10-18 17:14:42 +00:00
|
|
|
if (vhost_vdpa_has_desc_group(v))
|
|
|
|
features |= BIT_ULL(VHOST_BACKEND_F_DESC_ASID);
|
vhost-vdpa: introduce IOTLB_PERSIST backend feature bit
Userspace needs this feature flag to distinguish if vhost-vdpa iotlb in
the kernel can be trusted to persist IOTLB mapping across vDPA reset.
Without it, userspace has no way to tell apart if it's running on an
older kernel, which could silently drop all iotlb mapping across vDPA
reset, especially with broken parent driver implementation for the
.reset driver op. The broken driver may incorrectly drop all mappings of
its own as part of .reset, which inadvertently ends up with corrupted
mapping state between vhost-vdpa userspace and the kernel. As a
workaround, to make the mapping behaviour predictable across reset,
userspace has to pro-actively remove all mappings before vDPA reset, and
then restore all the mappings afterwards. This workaround is done
unconditionally on top of all parent drivers today, due to the parent
driver implementation issue and no means to differentiate. This
workaround had been utilized in QEMU since day one when the
corresponding vhost-vdpa userspace backend came to the world.
There are 3 cases that backend may claim this feature bit on for:
- parent device that has to work with platform IOMMU
- parent device with on-chip IOMMU that has the expected
.reset_map support in driver
- parent device with vendor specific IOMMU implementation with
persistent IOTLB mapping already that has to specifically
declare this backend feature
The reason why .reset_map is being one of the pre-condition for
persistent iotlb is because without it, vhost-vdpa can't switch back
iotlb to the initial state later on, especially for the on-chip IOMMU
case which starts with identity mapping at device creation. virtio-vdpa
requires on-chip IOMMU to perform 1:1 passthrough translation from PA to
IOVA as-is to begin with, and .reset_map is the only means to turn back
iotlb to the identity mapping mode after vhost-vdpa is gone.
The difference in behavior did not matter as QEMU unmaps all the memory
unregistering the memory listener at vhost_vdpa_dev_start( started =
false), but the backend acknowledging this feature flag allows QEMU to
make sure it is safe to skip this unmap & map in the case of vhost stop
& start cycle.
In that sense, this feature flag is actually a signal for userspace to
know that the driver bug has been solved. Not offering it indicates that
userspace cannot trust the kernel will retain the maps.
Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
Acked-by: Eugenio Pérez <eperezma@redhat.com>
Message-Id: <1697880319-4937-4-git-send-email-si-wei.liu@oracle.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Tested-by: Lei Yang <leiyang@redhat.com>
2023-10-21 09:25:15 +00:00
|
|
|
if (vhost_vdpa_has_persistent_map(v))
|
|
|
|
features |= BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST);
|
2023-06-09 09:21:26 +00:00
|
|
|
features |= vhost_vdpa_get_backend_features(v);
|
2020-10-23 12:08:53 +00:00
|
|
|
if (copy_to_user(featurep, &features, sizeof(features)))
|
|
|
|
r = -EFAULT;
|
2020-09-07 10:43:43 +00:00
|
|
|
break;
|
2020-10-23 09:00:42 +00:00
|
|
|
case VHOST_VDPA_GET_IOVA_RANGE:
|
|
|
|
r = vhost_vdpa_get_iova_range(v, argp);
|
|
|
|
break;
|
2022-03-15 03:25:51 +00:00
|
|
|
case VHOST_VDPA_GET_CONFIG_SIZE:
|
|
|
|
r = vhost_vdpa_get_config_size(v, argp);
|
|
|
|
break;
|
2022-03-15 03:25:53 +00:00
|
|
|
case VHOST_VDPA_GET_VQS_COUNT:
|
|
|
|
r = vhost_vdpa_get_vqs_count(v, argp);
|
|
|
|
break;
|
2022-08-10 17:15:11 +00:00
|
|
|
case VHOST_VDPA_SUSPEND:
|
|
|
|
r = vhost_vdpa_suspend(v);
|
|
|
|
break;
|
2023-01-03 10:51:07 +00:00
|
|
|
case VHOST_VDPA_RESUME:
|
|
|
|
r = vhost_vdpa_resume(v);
|
|
|
|
break;
|
2020-03-26 14:01:23 +00:00
|
|
|
default:
|
|
|
|
r = vhost_dev_ioctl(&v->vdev, cmd, argp);
|
|
|
|
if (r == -ENOIOCTLCMD)
|
|
|
|
r = vhost_vdpa_vring_ioctl(v, cmd, argp);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2023-04-04 13:13:19 +00:00
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case VHOST_SET_OWNER:
|
|
|
|
r = vhost_vdpa_bind_mm(v);
|
|
|
|
if (r)
|
|
|
|
vhost_dev_reset_owner(d, NULL);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
out:
|
2020-03-26 14:01:23 +00:00
|
|
|
mutex_unlock(&d->mutex);
|
|
|
|
return r;
|
|
|
|
}
|
2022-12-19 07:33:31 +00:00
|
|
|
static void vhost_vdpa_general_unmap(struct vhost_vdpa *v,
|
|
|
|
struct vhost_iotlb_map *map, u32 asid)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
const struct vdpa_config_ops *ops = vdpa->config;
|
|
|
|
if (ops->dma_map) {
|
|
|
|
ops->dma_unmap(vdpa, asid, map->start, map->size);
|
|
|
|
} else if (ops->set_map == NULL) {
|
|
|
|
iommu_unmap(v->domain, map->start, map->size);
|
|
|
|
}
|
|
|
|
}
|
2020-03-26 14:01:23 +00:00
|
|
|
|
2022-12-19 07:33:31 +00:00
|
|
|
static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
|
|
|
|
u64 start, u64 last, u32 asid)
|
2020-03-26 14:01:23 +00:00
|
|
|
{
|
|
|
|
struct vhost_dev *dev = &v->vdev;
|
|
|
|
struct vhost_iotlb_map *map;
|
|
|
|
struct page *page;
|
|
|
|
unsigned long pfn, pinned;
|
|
|
|
|
|
|
|
while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
|
2021-08-02 01:37:17 +00:00
|
|
|
pinned = PFN_DOWN(map->size);
|
|
|
|
for (pfn = PFN_DOWN(map->addr);
|
2020-03-26 14:01:23 +00:00
|
|
|
pinned > 0; pfn++, pinned--) {
|
|
|
|
page = pfn_to_page(pfn);
|
|
|
|
if (map->perm & VHOST_ACCESS_WO)
|
|
|
|
set_page_dirty_lock(page);
|
|
|
|
unpin_user_page(page);
|
|
|
|
}
|
2021-08-02 01:37:17 +00:00
|
|
|
atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
|
2022-12-19 07:33:31 +00:00
|
|
|
vhost_vdpa_general_unmap(v, map, asid);
|
2020-03-26 14:01:23 +00:00
|
|
|
vhost_iotlb_map_free(iotlb, map);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-19 07:33:31 +00:00
|
|
|
static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
|
|
|
|
u64 start, u64 last, u32 asid)
|
2021-08-31 10:36:31 +00:00
|
|
|
{
|
|
|
|
struct vhost_iotlb_map *map;
|
|
|
|
struct vdpa_map_file *map_file;
|
|
|
|
|
|
|
|
while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
|
|
|
|
map_file = (struct vdpa_map_file *)map->opaque;
|
|
|
|
fput(map_file->file);
|
|
|
|
kfree(map_file);
|
2022-12-19 07:33:31 +00:00
|
|
|
vhost_vdpa_general_unmap(v, map, asid);
|
2021-08-31 10:36:31 +00:00
|
|
|
vhost_iotlb_map_free(iotlb, map);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-30 18:03:43 +00:00
|
|
|
static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
|
2022-12-19 07:33:31 +00:00
|
|
|
struct vhost_iotlb *iotlb, u64 start,
|
|
|
|
u64 last, u32 asid)
|
2021-08-31 10:36:30 +00:00
|
|
|
{
|
2021-08-31 10:36:31 +00:00
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
|
|
|
|
if (vdpa->use_va)
|
2022-12-19 07:33:31 +00:00
|
|
|
return vhost_vdpa_va_unmap(v, iotlb, start, last, asid);
|
2021-08-31 10:36:31 +00:00
|
|
|
|
2022-12-19 07:33:31 +00:00
|
|
|
return vhost_vdpa_pa_unmap(v, iotlb, start, last, asid);
|
2021-08-31 10:36:30 +00:00
|
|
|
}
|
|
|
|
|
2020-03-26 14:01:23 +00:00
|
|
|
static int perm_to_iommu_flags(u32 perm)
|
|
|
|
{
|
|
|
|
int flags = 0;
|
|
|
|
|
|
|
|
switch (perm) {
|
|
|
|
case VHOST_ACCESS_WO:
|
|
|
|
flags |= IOMMU_WRITE;
|
|
|
|
break;
|
|
|
|
case VHOST_ACCESS_RO:
|
|
|
|
flags |= IOMMU_READ;
|
|
|
|
break;
|
|
|
|
case VHOST_ACCESS_RW:
|
|
|
|
flags |= (IOMMU_WRITE | IOMMU_READ);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
WARN(1, "invalidate vhost IOTLB permission\n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return flags | IOMMU_CACHE;
|
|
|
|
}
|
|
|
|
|
2022-03-30 18:03:43 +00:00
|
|
|
static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
|
|
|
|
u64 iova, u64 size, u64 pa, u32 perm, void *opaque)
|
2020-03-26 14:01:23 +00:00
|
|
|
{
|
|
|
|
struct vhost_dev *dev = &v->vdev;
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
const struct vdpa_config_ops *ops = vdpa->config;
|
2022-03-30 18:03:55 +00:00
|
|
|
u32 asid = iotlb_to_asid(iotlb);
|
2020-03-26 14:01:23 +00:00
|
|
|
int r = 0;
|
|
|
|
|
2022-03-30 18:03:43 +00:00
|
|
|
r = vhost_iotlb_add_range_ctx(iotlb, iova, iova + size - 1,
|
2021-08-31 10:36:31 +00:00
|
|
|
pa, perm, opaque);
|
2020-03-26 14:01:23 +00:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
2020-08-04 16:20:40 +00:00
|
|
|
if (ops->dma_map) {
|
2022-03-30 18:03:55 +00:00
|
|
|
r = ops->dma_map(vdpa, asid, iova, size, pa, perm, opaque);
|
2020-08-04 16:20:40 +00:00
|
|
|
} else if (ops->set_map) {
|
|
|
|
if (!v->in_batch)
|
2022-03-30 18:03:55 +00:00
|
|
|
r = ops->set_map(vdpa, asid, iotlb);
|
2020-08-04 16:20:40 +00:00
|
|
|
} else {
|
2020-03-26 14:01:23 +00:00
|
|
|
r = iommu_map(v->domain, iova, pa, size,
|
2023-01-23 20:35:54 +00:00
|
|
|
perm_to_iommu_flags(perm), GFP_KERNEL);
|
2020-08-04 16:20:40 +00:00
|
|
|
}
|
2021-08-31 10:36:31 +00:00
|
|
|
if (r) {
|
2022-03-30 18:03:43 +00:00
|
|
|
vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
|
2021-08-31 10:36:31 +00:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vdpa->use_va)
|
2021-08-02 01:37:17 +00:00
|
|
|
atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm);
|
2020-10-03 05:02:09 +00:00
|
|
|
|
2021-08-31 10:36:31 +00:00
|
|
|
return 0;
|
2020-03-26 14:01:23 +00:00
|
|
|
}
|
|
|
|
|
2022-03-30 18:03:43 +00:00
|
|
|
static void vhost_vdpa_unmap(struct vhost_vdpa *v,
|
|
|
|
struct vhost_iotlb *iotlb,
|
|
|
|
u64 iova, u64 size)
|
2020-03-26 14:01:23 +00:00
|
|
|
{
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
const struct vdpa_config_ops *ops = vdpa->config;
|
2022-03-30 18:03:55 +00:00
|
|
|
u32 asid = iotlb_to_asid(iotlb);
|
2020-03-26 14:01:23 +00:00
|
|
|
|
2022-12-19 07:33:31 +00:00
|
|
|
vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1, asid);
|
2020-03-26 14:01:23 +00:00
|
|
|
|
2022-12-19 07:33:31 +00:00
|
|
|
if (ops->set_map) {
|
2020-08-04 16:20:40 +00:00
|
|
|
if (!v->in_batch)
|
2022-03-30 18:03:55 +00:00
|
|
|
ops->set_map(vdpa, asid, iotlb);
|
2020-08-04 16:20:40 +00:00
|
|
|
}
|
2023-04-20 15:17:34 +00:00
|
|
|
|
2020-03-26 14:01:23 +00:00
|
|
|
}
|
|
|
|
|
2021-08-31 10:36:31 +00:00
|
|
|
static int vhost_vdpa_va_map(struct vhost_vdpa *v,
|
2022-03-30 18:03:43 +00:00
|
|
|
struct vhost_iotlb *iotlb,
|
2021-08-31 10:36:31 +00:00
|
|
|
u64 iova, u64 size, u64 uaddr, u32 perm)
|
|
|
|
{
|
|
|
|
struct vhost_dev *dev = &v->vdev;
|
|
|
|
u64 offset, map_size, map_iova = iova;
|
|
|
|
struct vdpa_map_file *map_file;
|
|
|
|
struct vm_area_struct *vma;
|
2021-09-07 07:32:53 +00:00
|
|
|
int ret = 0;
|
2021-08-31 10:36:31 +00:00
|
|
|
|
|
|
|
mmap_read_lock(dev->mm);
|
|
|
|
|
|
|
|
while (size) {
|
|
|
|
vma = find_vma(dev->mm, uaddr);
|
|
|
|
if (!vma) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
map_size = min(size, vma->vm_end - uaddr);
|
|
|
|
if (!(vma->vm_file && (vma->vm_flags & VM_SHARED) &&
|
|
|
|
!(vma->vm_flags & (VM_IO | VM_PFNMAP))))
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
map_file = kzalloc(sizeof(*map_file), GFP_KERNEL);
|
|
|
|
if (!map_file) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start;
|
|
|
|
map_file->offset = offset;
|
|
|
|
map_file->file = get_file(vma->vm_file);
|
2022-03-30 18:03:43 +00:00
|
|
|
ret = vhost_vdpa_map(v, iotlb, map_iova, map_size, uaddr,
|
2021-08-31 10:36:31 +00:00
|
|
|
perm, map_file);
|
|
|
|
if (ret) {
|
|
|
|
fput(map_file->file);
|
|
|
|
kfree(map_file);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
next:
|
|
|
|
size -= map_size;
|
|
|
|
uaddr += map_size;
|
|
|
|
map_iova += map_size;
|
|
|
|
}
|
|
|
|
if (ret)
|
2022-03-30 18:03:43 +00:00
|
|
|
vhost_vdpa_unmap(v, iotlb, iova, map_iova - iova);
|
2021-08-31 10:36:31 +00:00
|
|
|
|
|
|
|
mmap_read_unlock(dev->mm);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-08-31 10:36:30 +00:00
|
|
|
static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
|
2022-03-30 18:03:43 +00:00
|
|
|
struct vhost_iotlb *iotlb,
|
2021-08-31 10:36:30 +00:00
|
|
|
u64 iova, u64 size, u64 uaddr, u32 perm)
|
2020-03-26 14:01:23 +00:00
|
|
|
{
|
|
|
|
struct vhost_dev *dev = &v->vdev;
|
|
|
|
struct page **page_list;
|
2020-10-29 21:53:36 +00:00
|
|
|
unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
|
2020-03-26 14:01:23 +00:00
|
|
|
unsigned int gup_flags = FOLL_LONGTERM;
|
2020-10-29 21:53:36 +00:00
|
|
|
unsigned long npages, cur_base, map_pfn, last_pfn = 0;
|
2020-11-05 23:26:33 +00:00
|
|
|
unsigned long lock_limit, sz2pin, nchunks, i;
|
2021-08-31 10:36:30 +00:00
|
|
|
u64 start = iova;
|
2020-11-05 23:26:33 +00:00
|
|
|
long pinned;
|
2020-03-26 14:01:23 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2020-11-05 23:26:33 +00:00
|
|
|
/* Limit the use of memory for bookkeeping */
|
2020-10-29 21:53:36 +00:00
|
|
|
page_list = (struct page **) __get_free_page(GFP_KERNEL);
|
|
|
|
if (!page_list)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2021-08-31 10:36:30 +00:00
|
|
|
if (perm & VHOST_ACCESS_WO)
|
2020-03-26 14:01:23 +00:00
|
|
|
gup_flags |= FOLL_WRITE;
|
|
|
|
|
2021-08-31 10:36:30 +00:00
|
|
|
npages = PFN_UP(size + (iova & ~PAGE_MASK));
|
2020-11-05 23:26:33 +00:00
|
|
|
if (!npages) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto free;
|
|
|
|
}
|
2020-03-26 14:01:23 +00:00
|
|
|
|
2020-06-09 04:33:25 +00:00
|
|
|
mmap_read_lock(dev->mm);
|
2020-03-26 14:01:23 +00:00
|
|
|
|
2021-08-02 01:37:17 +00:00
|
|
|
lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
|
2020-11-05 23:26:33 +00:00
|
|
|
if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
|
2020-10-29 21:53:36 +00:00
|
|
|
ret = -ENOMEM;
|
2020-11-05 23:26:33 +00:00
|
|
|
goto unlock;
|
2020-10-03 05:02:10 +00:00
|
|
|
}
|
2020-03-26 14:01:23 +00:00
|
|
|
|
2021-08-31 10:36:30 +00:00
|
|
|
cur_base = uaddr & PAGE_MASK;
|
2020-10-03 05:02:10 +00:00
|
|
|
iova &= PAGE_MASK;
|
2020-11-05 23:26:33 +00:00
|
|
|
nchunks = 0;
|
2020-10-29 21:53:36 +00:00
|
|
|
|
|
|
|
while (npages) {
|
2020-11-05 23:26:33 +00:00
|
|
|
sz2pin = min_t(unsigned long, npages, list_size);
|
|
|
|
pinned = pin_user_pages(cur_base, sz2pin,
|
2023-05-17 19:25:45 +00:00
|
|
|
gup_flags, page_list);
|
2020-11-05 23:26:33 +00:00
|
|
|
if (sz2pin != pinned) {
|
|
|
|
if (pinned < 0) {
|
|
|
|
ret = pinned;
|
|
|
|
} else {
|
|
|
|
unpin_user_pages(page_list, pinned);
|
|
|
|
ret = -ENOMEM;
|
|
|
|
}
|
2020-10-29 21:53:36 +00:00
|
|
|
goto out;
|
2020-11-05 23:26:33 +00:00
|
|
|
}
|
|
|
|
nchunks++;
|
2020-10-29 21:53:36 +00:00
|
|
|
|
|
|
|
if (!last_pfn)
|
|
|
|
map_pfn = page_to_pfn(page_list[0]);
|
|
|
|
|
2020-11-05 23:26:33 +00:00
|
|
|
for (i = 0; i < pinned; i++) {
|
2020-10-29 21:53:36 +00:00
|
|
|
unsigned long this_pfn = page_to_pfn(page_list[i]);
|
|
|
|
u64 csize;
|
|
|
|
|
|
|
|
if (last_pfn && (this_pfn != last_pfn + 1)) {
|
|
|
|
/* Pin a contiguous chunk of memory */
|
2021-08-02 01:37:17 +00:00
|
|
|
csize = PFN_PHYS(last_pfn - map_pfn + 1);
|
2022-03-30 18:03:43 +00:00
|
|
|
ret = vhost_vdpa_map(v, iotlb, iova, csize,
|
2021-08-02 01:37:17 +00:00
|
|
|
PFN_PHYS(map_pfn),
|
2021-08-31 10:36:31 +00:00
|
|
|
perm, NULL);
|
2020-11-05 23:26:33 +00:00
|
|
|
if (ret) {
|
|
|
|
/*
|
|
|
|
* Unpin the pages that are left unmapped
|
|
|
|
* from this point on in the current
|
|
|
|
* page_list. The remaining outstanding
|
|
|
|
* ones which may stride across several
|
|
|
|
* chunks will be covered in the common
|
|
|
|
* error path subsequently.
|
|
|
|
*/
|
|
|
|
unpin_user_pages(&page_list[i],
|
|
|
|
pinned - i);
|
2020-10-29 21:53:36 +00:00
|
|
|
goto out;
|
2020-11-05 23:26:33 +00:00
|
|
|
}
|
|
|
|
|
2020-10-29 21:53:36 +00:00
|
|
|
map_pfn = this_pfn;
|
|
|
|
iova += csize;
|
2020-11-05 23:26:33 +00:00
|
|
|
nchunks = 0;
|
2020-03-26 14:01:23 +00:00
|
|
|
}
|
2020-10-29 21:53:36 +00:00
|
|
|
|
|
|
|
last_pfn = this_pfn;
|
2020-03-26 14:01:23 +00:00
|
|
|
}
|
2020-10-29 21:53:36 +00:00
|
|
|
|
2021-08-02 01:37:17 +00:00
|
|
|
cur_base += PFN_PHYS(pinned);
|
2020-11-05 23:26:33 +00:00
|
|
|
npages -= pinned;
|
2020-03-26 14:01:23 +00:00
|
|
|
}
|
|
|
|
|
2020-10-29 21:53:36 +00:00
|
|
|
/* Pin the rest chunk */
|
2022-03-30 18:03:43 +00:00
|
|
|
ret = vhost_vdpa_map(v, iotlb, iova, PFN_PHYS(last_pfn - map_pfn + 1),
|
2021-08-31 10:36:31 +00:00
|
|
|
PFN_PHYS(map_pfn), perm, NULL);
|
2020-03-26 14:01:23 +00:00
|
|
|
out:
|
2020-10-29 21:53:36 +00:00
|
|
|
if (ret) {
|
2020-11-05 23:26:33 +00:00
|
|
|
if (nchunks) {
|
|
|
|
unsigned long pfn;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unpin the outstanding pages which are yet to be
|
|
|
|
* mapped but haven't due to vdpa_map() or
|
|
|
|
* pin_user_pages() failure.
|
|
|
|
*
|
|
|
|
* Mapped pages are accounted in vdpa_map(), hence
|
|
|
|
* the corresponding unpinning will be handled by
|
|
|
|
* vdpa_unmap().
|
|
|
|
*/
|
|
|
|
WARN_ON(!last_pfn);
|
|
|
|
for (pfn = map_pfn; pfn <= last_pfn; pfn++)
|
|
|
|
unpin_user_page(pfn_to_page(pfn));
|
|
|
|
}
|
2022-03-30 18:03:43 +00:00
|
|
|
vhost_vdpa_unmap(v, iotlb, start, size);
|
2020-10-29 21:53:36 +00:00
|
|
|
}
|
2020-11-05 23:26:33 +00:00
|
|
|
unlock:
|
2020-06-09 04:33:25 +00:00
|
|
|
mmap_read_unlock(dev->mm);
|
2020-11-05 23:26:33 +00:00
|
|
|
free:
|
2020-10-29 21:53:36 +00:00
|
|
|
free_page((unsigned long)page_list);
|
2020-03-26 14:01:23 +00:00
|
|
|
return ret;
|
2021-08-31 10:36:30 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
|
2022-03-30 18:03:43 +00:00
|
|
|
struct vhost_iotlb *iotlb,
|
2021-08-31 10:36:30 +00:00
|
|
|
struct vhost_iotlb_msg *msg)
|
|
|
|
{
|
2021-08-31 10:36:31 +00:00
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
2021-08-31 10:36:30 +00:00
|
|
|
|
|
|
|
if (msg->iova < v->range.first || !msg->size ||
|
|
|
|
msg->iova > U64_MAX - msg->size + 1 ||
|
|
|
|
msg->iova + msg->size - 1 > v->range.last)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (vhost_iotlb_itree_first(iotlb, msg->iova,
|
|
|
|
msg->iova + msg->size - 1))
|
|
|
|
return -EEXIST;
|
|
|
|
|
2021-08-31 10:36:31 +00:00
|
|
|
if (vdpa->use_va)
|
2022-03-30 18:03:43 +00:00
|
|
|
return vhost_vdpa_va_map(v, iotlb, msg->iova, msg->size,
|
2021-08-31 10:36:31 +00:00
|
|
|
msg->uaddr, msg->perm);
|
|
|
|
|
2022-03-30 18:03:43 +00:00
|
|
|
return vhost_vdpa_pa_map(v, iotlb, msg->iova, msg->size, msg->uaddr,
|
2021-08-31 10:36:30 +00:00
|
|
|
msg->perm);
|
2020-03-26 14:01:23 +00:00
|
|
|
}
|
|
|
|
|
2022-03-30 18:03:49 +00:00
|
|
|
static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
|
2020-03-26 14:01:23 +00:00
|
|
|
struct vhost_iotlb_msg *msg)
|
|
|
|
{
|
|
|
|
struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
|
2020-08-04 16:20:40 +00:00
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
const struct vdpa_config_ops *ops = vdpa->config;
|
2022-03-30 18:03:55 +00:00
|
|
|
struct vhost_iotlb *iotlb = NULL;
|
|
|
|
struct vhost_vdpa_as *as = NULL;
|
2020-03-26 14:01:23 +00:00
|
|
|
int r = 0;
|
|
|
|
|
2021-04-12 09:55:12 +00:00
|
|
|
mutex_lock(&dev->mutex);
|
|
|
|
|
2020-03-26 14:01:23 +00:00
|
|
|
r = vhost_dev_check_owner(dev);
|
|
|
|
if (r)
|
2021-04-12 09:55:12 +00:00
|
|
|
goto unlock;
|
2020-03-26 14:01:23 +00:00
|
|
|
|
2022-03-30 18:03:55 +00:00
|
|
|
if (msg->type == VHOST_IOTLB_UPDATE ||
|
|
|
|
msg->type == VHOST_IOTLB_BATCH_BEGIN) {
|
|
|
|
as = vhost_vdpa_find_alloc_as(v, asid);
|
|
|
|
if (!as) {
|
|
|
|
dev_err(&v->dev, "can't find and alloc asid %d\n",
|
|
|
|
asid);
|
|
|
|
r = -EINVAL;
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
iotlb = &as->iotlb;
|
|
|
|
} else
|
|
|
|
iotlb = asid_to_iotlb(v, asid);
|
|
|
|
|
|
|
|
if ((v->in_batch && v->batch_asid != asid) || !iotlb) {
|
|
|
|
if (v->in_batch && v->batch_asid != asid) {
|
|
|
|
dev_info(&v->dev, "batch id %d asid %d\n",
|
|
|
|
v->batch_asid, asid);
|
|
|
|
}
|
|
|
|
if (!iotlb)
|
|
|
|
dev_err(&v->dev, "no iotlb for asid %d\n", asid);
|
|
|
|
r = -EINVAL;
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
2020-03-26 14:01:23 +00:00
|
|
|
switch (msg->type) {
|
|
|
|
case VHOST_IOTLB_UPDATE:
|
2022-03-30 18:03:43 +00:00
|
|
|
r = vhost_vdpa_process_iotlb_update(v, iotlb, msg);
|
2020-03-26 14:01:23 +00:00
|
|
|
break;
|
|
|
|
case VHOST_IOTLB_INVALIDATE:
|
2022-03-30 18:03:43 +00:00
|
|
|
vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size);
|
2020-03-26 14:01:23 +00:00
|
|
|
break;
|
2020-08-04 16:20:40 +00:00
|
|
|
case VHOST_IOTLB_BATCH_BEGIN:
|
2022-03-30 18:03:55 +00:00
|
|
|
v->batch_asid = asid;
|
2020-08-04 16:20:40 +00:00
|
|
|
v->in_batch = true;
|
|
|
|
break;
|
|
|
|
case VHOST_IOTLB_BATCH_END:
|
|
|
|
if (v->in_batch && ops->set_map)
|
2022-03-30 18:03:55 +00:00
|
|
|
ops->set_map(vdpa, asid, iotlb);
|
2020-08-04 16:20:40 +00:00
|
|
|
v->in_batch = false;
|
|
|
|
break;
|
2020-03-26 14:01:23 +00:00
|
|
|
default:
|
|
|
|
r = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
2021-04-12 09:55:12 +00:00
|
|
|
unlock:
|
|
|
|
mutex_unlock(&dev->mutex);
|
2020-03-26 14:01:23 +00:00
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
|
|
|
|
struct iov_iter *from)
|
|
|
|
{
|
|
|
|
struct file *file = iocb->ki_filp;
|
|
|
|
struct vhost_vdpa *v = file->private_data;
|
|
|
|
struct vhost_dev *dev = &v->vdev;
|
|
|
|
|
|
|
|
return vhost_chr_write_iter(dev, from);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
const struct vdpa_config_ops *ops = vdpa->config;
|
|
|
|
struct device *dma_dev = vdpa_get_dma_dev(vdpa);
|
2023-03-13 18:29:13 +00:00
|
|
|
const struct bus_type *bus;
|
2020-03-26 14:01:23 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Device want to do DMA by itself */
|
|
|
|
if (ops->set_map || ops->dma_map)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
bus = dma_dev->bus;
|
|
|
|
if (!bus)
|
|
|
|
return -EFAULT;
|
|
|
|
|
2023-02-02 08:42:12 +00:00
|
|
|
if (!device_iommu_capable(dma_dev, IOMMU_CAP_CACHE_COHERENCY)) {
|
|
|
|
dev_warn_once(&v->dev,
|
|
|
|
"Failed to allocate domain, device is not IOMMU cache coherent capable\n");
|
2020-03-26 14:01:23 +00:00
|
|
|
return -ENOTSUPP;
|
2023-02-02 08:42:12 +00:00
|
|
|
}
|
2020-03-26 14:01:23 +00:00
|
|
|
|
|
|
|
v->domain = iommu_domain_alloc(bus);
|
|
|
|
if (!v->domain)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
ret = iommu_attach_device(v->domain, dma_dev);
|
|
|
|
if (ret)
|
|
|
|
goto err_attach;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_attach:
|
|
|
|
iommu_domain_free(v->domain);
|
2023-03-01 16:32:01 +00:00
|
|
|
v->domain = NULL;
|
2020-03-26 14:01:23 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
struct device *dma_dev = vdpa_get_dma_dev(vdpa);
|
|
|
|
|
|
|
|
if (v->domain) {
|
|
|
|
iommu_detach_device(v->domain, dma_dev);
|
|
|
|
iommu_domain_free(v->domain);
|
|
|
|
}
|
|
|
|
|
|
|
|
v->domain = NULL;
|
|
|
|
}
|
|
|
|
|
2020-10-23 09:00:42 +00:00
|
|
|
static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
|
|
|
|
{
|
|
|
|
struct vdpa_iova_range *range = &v->range;
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
const struct vdpa_config_ops *ops = vdpa->config;
|
|
|
|
|
|
|
|
if (ops->get_iova_range) {
|
|
|
|
*range = ops->get_iova_range(vdpa);
|
2021-04-01 15:52:51 +00:00
|
|
|
} else if (v->domain && v->domain->geometry.force_aperture) {
|
|
|
|
range->first = v->domain->geometry.aperture_start;
|
|
|
|
range->last = v->domain->geometry.aperture_end;
|
2020-10-23 09:00:42 +00:00
|
|
|
} else {
|
|
|
|
range->first = 0;
|
|
|
|
range->last = ULLONG_MAX;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-30 18:03:50 +00:00
|
|
|
static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
|
|
|
|
{
|
2022-03-30 18:03:55 +00:00
|
|
|
struct vhost_vdpa_as *as;
|
|
|
|
u32 asid;
|
|
|
|
|
|
|
|
for (asid = 0; asid < v->vdpa->nas; asid++) {
|
|
|
|
as = asid_to_as(v, asid);
|
|
|
|
if (as)
|
|
|
|
vhost_vdpa_remove_as(v, asid);
|
|
|
|
}
|
2022-11-09 15:42:13 +00:00
|
|
|
|
2023-03-01 16:32:01 +00:00
|
|
|
vhost_vdpa_free_domain(v);
|
2022-11-09 15:42:13 +00:00
|
|
|
vhost_dev_cleanup(&v->vdev);
|
|
|
|
kfree(v->vdev.vqs);
|
2023-10-21 09:25:17 +00:00
|
|
|
v->vdev.vqs = NULL;
|
2022-03-30 18:03:50 +00:00
|
|
|
}
|
|
|
|
|
2020-03-26 14:01:23 +00:00
|
|
|
static int vhost_vdpa_open(struct inode *inode, struct file *filep)
|
|
|
|
{
|
|
|
|
struct vhost_vdpa *v;
|
|
|
|
struct vhost_dev *dev;
|
|
|
|
struct vhost_virtqueue **vqs;
|
2022-03-15 03:25:52 +00:00
|
|
|
int r, opened;
|
|
|
|
u32 i, nvqs;
|
2020-03-26 14:01:23 +00:00
|
|
|
|
|
|
|
v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
|
|
|
|
|
|
|
|
opened = atomic_cmpxchg(&v->opened, 0, 1);
|
|
|
|
if (opened)
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
nvqs = v->nvqs;
|
2021-08-31 10:36:27 +00:00
|
|
|
r = vhost_vdpa_reset(v);
|
|
|
|
if (r)
|
|
|
|
goto err;
|
2020-03-26 14:01:23 +00:00
|
|
|
|
|
|
|
vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
|
|
|
|
if (!vqs) {
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev = &v->vdev;
|
|
|
|
for (i = 0; i < nvqs; i++) {
|
|
|
|
vqs[i] = &v->vqs[i];
|
|
|
|
vqs[i]->handle_kick = handle_vq_kick;
|
|
|
|
}
|
2020-05-29 08:02:58 +00:00
|
|
|
vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
|
2020-03-26 14:01:23 +00:00
|
|
|
vhost_vdpa_process_iotlb_msg);
|
|
|
|
|
|
|
|
r = vhost_vdpa_alloc_domain(v);
|
|
|
|
if (r)
|
2022-03-30 18:03:55 +00:00
|
|
|
goto err_alloc_domain;
|
2020-03-26 14:01:23 +00:00
|
|
|
|
2020-10-23 09:00:42 +00:00
|
|
|
vhost_vdpa_set_iova_range(v);
|
|
|
|
|
2020-03-26 14:01:23 +00:00
|
|
|
filep->private_data = v;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2022-03-30 18:03:55 +00:00
|
|
|
err_alloc_domain:
|
2022-03-30 18:03:50 +00:00
|
|
|
vhost_vdpa_cleanup(v);
|
2020-03-26 14:01:23 +00:00
|
|
|
err:
|
|
|
|
atomic_dec(&v->opened);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2020-07-31 06:55:31 +00:00
|
|
|
static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
|
|
|
|
{
|
2022-03-15 03:25:52 +00:00
|
|
|
u32 i;
|
2020-07-31 06:55:31 +00:00
|
|
|
|
2021-02-24 11:48:45 +00:00
|
|
|
for (i = 0; i < v->nvqs; i++)
|
|
|
|
vhost_vdpa_unsetup_vq_irq(v, i);
|
2020-07-31 06:55:31 +00:00
|
|
|
}
|
|
|
|
|
2020-03-26 14:01:23 +00:00
|
|
|
static int vhost_vdpa_release(struct inode *inode, struct file *filep)
|
|
|
|
{
|
|
|
|
struct vhost_vdpa *v = filep->private_data;
|
|
|
|
struct vhost_dev *d = &v->vdev;
|
|
|
|
|
|
|
|
mutex_lock(&d->mutex);
|
|
|
|
filep->private_data = NULL;
|
2021-11-15 03:16:42 +00:00
|
|
|
vhost_vdpa_clean_irq(v);
|
2020-03-26 14:01:23 +00:00
|
|
|
vhost_vdpa_reset(v);
|
|
|
|
vhost_dev_stop(&v->vdev);
|
2023-04-04 13:13:19 +00:00
|
|
|
vhost_vdpa_unbind_mm(v);
|
2020-06-05 10:27:13 +00:00
|
|
|
vhost_vdpa_config_put(v);
|
2022-06-22 15:14:07 +00:00
|
|
|
vhost_vdpa_cleanup(v);
|
2020-03-26 14:01:23 +00:00
|
|
|
mutex_unlock(&d->mutex);
|
|
|
|
|
|
|
|
atomic_dec(&v->opened);
|
|
|
|
complete(&v->completion);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-06-04 18:47:29 +00:00
|
|
|
#ifdef CONFIG_MMU
|
2020-05-29 08:03:01 +00:00
|
|
|
static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
|
|
|
|
{
|
|
|
|
struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
const struct vdpa_config_ops *ops = vdpa->config;
|
|
|
|
struct vdpa_notification_area notify;
|
|
|
|
struct vm_area_struct *vma = vmf->vma;
|
|
|
|
u16 index = vma->vm_pgoff;
|
|
|
|
|
|
|
|
notify = ops->get_vq_notification(vdpa, index);
|
|
|
|
|
|
|
|
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
|
|
|
if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
|
2021-08-02 01:37:17 +00:00
|
|
|
PFN_DOWN(notify.addr), PAGE_SIZE,
|
2020-05-29 08:03:01 +00:00
|
|
|
vma->vm_page_prot))
|
|
|
|
return VM_FAULT_SIGBUS;
|
|
|
|
|
|
|
|
return VM_FAULT_NOPAGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct vm_operations_struct vhost_vdpa_vm_ops = {
|
|
|
|
.fault = vhost_vdpa_fault,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
struct vhost_vdpa *v = vma->vm_file->private_data;
|
|
|
|
struct vdpa_device *vdpa = v->vdpa;
|
|
|
|
const struct vdpa_config_ops *ops = vdpa->config;
|
|
|
|
struct vdpa_notification_area notify;
|
2020-06-10 08:58:52 +00:00
|
|
|
unsigned long index = vma->vm_pgoff;
|
2020-05-29 08:03:01 +00:00
|
|
|
|
|
|
|
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
|
|
|
|
return -EINVAL;
|
|
|
|
if ((vma->vm_flags & VM_SHARED) == 0)
|
|
|
|
return -EINVAL;
|
|
|
|
if (vma->vm_flags & VM_READ)
|
|
|
|
return -EINVAL;
|
|
|
|
if (index > 65535)
|
|
|
|
return -EINVAL;
|
|
|
|
if (!ops->get_vq_notification)
|
|
|
|
return -ENOTSUPP;
|
|
|
|
|
|
|
|
/* To be safe and easily modelled by userspace, We only
|
|
|
|
* support the doorbell which sits on the page boundary and
|
|
|
|
* does not share the page with other registers.
|
|
|
|
*/
|
|
|
|
notify = ops->get_vq_notification(vdpa, index);
|
|
|
|
if (notify.addr & (PAGE_SIZE - 1))
|
|
|
|
return -EINVAL;
|
|
|
|
if (vma->vm_end - vma->vm_start != notify.size)
|
|
|
|
return -ENOTSUPP;
|
|
|
|
|
2023-01-26 19:37:49 +00:00
|
|
|
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
|
2020-05-29 08:03:01 +00:00
|
|
|
vma->vm_ops = &vhost_vdpa_vm_ops;
|
|
|
|
return 0;
|
|
|
|
}
|
2020-06-04 18:47:29 +00:00
|
|
|
#endif /* CONFIG_MMU */
|
2020-05-29 08:03:01 +00:00
|
|
|
|
2020-03-26 14:01:23 +00:00
|
|
|
static const struct file_operations vhost_vdpa_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.open = vhost_vdpa_open,
|
|
|
|
.release = vhost_vdpa_release,
|
|
|
|
.write_iter = vhost_vdpa_chr_write_iter,
|
|
|
|
.unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
|
2020-06-04 18:47:29 +00:00
|
|
|
#ifdef CONFIG_MMU
|
2020-05-29 08:03:01 +00:00
|
|
|
.mmap = vhost_vdpa_mmap,
|
2020-06-04 18:47:29 +00:00
|
|
|
#endif /* CONFIG_MMU */
|
2020-03-26 14:01:23 +00:00
|
|
|
.compat_ioctl = compat_ptr_ioctl,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void vhost_vdpa_release_dev(struct device *device)
|
|
|
|
{
|
|
|
|
struct vhost_vdpa *v =
|
|
|
|
container_of(device, struct vhost_vdpa, dev);
|
|
|
|
|
|
|
|
ida_simple_remove(&vhost_vdpa_ida, v->minor);
|
|
|
|
kfree(v->vqs);
|
|
|
|
kfree(v);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vhost_vdpa_probe(struct vdpa_device *vdpa)
|
|
|
|
{
|
|
|
|
const struct vdpa_config_ops *ops = vdpa->config;
|
|
|
|
struct vhost_vdpa *v;
|
2020-08-04 16:20:42 +00:00
|
|
|
int minor;
|
2022-03-30 18:03:50 +00:00
|
|
|
int i, r;
|
2020-03-26 14:01:23 +00:00
|
|
|
|
2022-03-30 18:03:55 +00:00
|
|
|
/* We can't support platform IOMMU device with more than 1
|
|
|
|
* group or as
|
|
|
|
*/
|
|
|
|
if (!ops->set_map && !ops->dma_map &&
|
|
|
|
(vdpa->ngroups > 1 || vdpa->nas > 1))
|
2022-03-30 18:03:46 +00:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2020-03-26 14:01:23 +00:00
|
|
|
v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
|
|
|
|
if (!v)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
minor = ida_simple_get(&vhost_vdpa_ida, 0,
|
|
|
|
VHOST_VDPA_DEV_MAX, GFP_KERNEL);
|
|
|
|
if (minor < 0) {
|
|
|
|
kfree(v);
|
|
|
|
return minor;
|
|
|
|
}
|
|
|
|
|
|
|
|
atomic_set(&v->opened, 0);
|
|
|
|
v->minor = minor;
|
|
|
|
v->vdpa = vdpa;
|
2020-08-04 16:20:42 +00:00
|
|
|
v->nvqs = vdpa->nvqs;
|
2020-03-26 14:01:23 +00:00
|
|
|
v->virtio_id = ops->get_device_id(vdpa);
|
|
|
|
|
|
|
|
device_initialize(&v->dev);
|
|
|
|
v->dev.release = vhost_vdpa_release_dev;
|
|
|
|
v->dev.parent = &vdpa->dev;
|
|
|
|
v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
|
2020-08-04 16:20:42 +00:00
|
|
|
v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
|
2020-03-26 14:01:23 +00:00
|
|
|
GFP_KERNEL);
|
|
|
|
if (!v->vqs) {
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
|
|
|
|
if (r)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
cdev_init(&v->cdev, &vhost_vdpa_fops);
|
|
|
|
v->cdev.owner = THIS_MODULE;
|
|
|
|
|
|
|
|
r = cdev_device_add(&v->cdev, &v->dev);
|
|
|
|
if (r)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
init_completion(&v->completion);
|
|
|
|
vdpa_set_drvdata(vdpa, v);
|
|
|
|
|
2022-03-30 18:03:50 +00:00
|
|
|
for (i = 0; i < VHOST_VDPA_IOTLB_BUCKETS; i++)
|
|
|
|
INIT_HLIST_HEAD(&v->as[i]);
|
|
|
|
|
2020-03-26 14:01:23 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
|
|
|
put_device(&v->dev);
|
2022-08-05 09:12:54 +00:00
|
|
|
ida_simple_remove(&vhost_vdpa_ida, v->minor);
|
2020-03-26 14:01:23 +00:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vhost_vdpa_remove(struct vdpa_device *vdpa)
|
|
|
|
{
|
|
|
|
struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
|
|
|
|
int opened;
|
|
|
|
|
|
|
|
cdev_device_del(&v->cdev, &v->dev);
|
|
|
|
|
|
|
|
do {
|
|
|
|
opened = atomic_cmpxchg(&v->opened, 0, 1);
|
|
|
|
if (!opened)
|
|
|
|
break;
|
|
|
|
wait_for_completion(&v->completion);
|
|
|
|
} while (1);
|
|
|
|
|
|
|
|
put_device(&v->dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct vdpa_driver vhost_vdpa_driver = {
|
|
|
|
.driver = {
|
|
|
|
.name = "vhost_vdpa",
|
|
|
|
},
|
|
|
|
.probe = vhost_vdpa_probe,
|
|
|
|
.remove = vhost_vdpa_remove,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init vhost_vdpa_init(void)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
|
|
|
r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
|
|
|
|
"vhost-vdpa");
|
|
|
|
if (r)
|
|
|
|
goto err_alloc_chrdev;
|
|
|
|
|
|
|
|
r = vdpa_register_driver(&vhost_vdpa_driver);
|
|
|
|
if (r)
|
|
|
|
goto err_vdpa_register_driver;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_vdpa_register_driver:
|
|
|
|
unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
|
|
|
|
err_alloc_chrdev:
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
module_init(vhost_vdpa_init);
|
|
|
|
|
|
|
|
static void __exit vhost_vdpa_exit(void)
|
|
|
|
{
|
|
|
|
vdpa_unregister_driver(&vhost_vdpa_driver);
|
|
|
|
unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
|
|
|
|
}
|
|
|
|
module_exit(vhost_vdpa_exit);
|
|
|
|
|
|
|
|
MODULE_VERSION("0.0.1");
|
|
|
|
MODULE_LICENSE("GPL v2");
|
|
|
|
MODULE_AUTHOR("Intel Corporation");
|
|
|
|
MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");
|