2019-01-15 12:19:57 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
|
|
* Virtio driver for the paravirtualized IOMMU
|
|
|
|
*
|
2019-07-22 14:40:07 +00:00
|
|
|
* Copyright (C) 2019 Arm Limited
|
2019-01-15 12:19:57 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
|
|
|
#include <linux/delay.h>
|
2021-06-18 15:21:00 +00:00
|
|
|
#include <linux/dma-map-ops.h>
|
2019-01-15 12:19:57 +00:00
|
|
|
#include <linux/freezer.h>
|
|
|
|
#include <linux/interval_tree.h>
|
|
|
|
#include <linux/iommu.h>
|
|
|
|
#include <linux/module.h>
|
2023-07-14 17:46:39 +00:00
|
|
|
#include <linux/of.h>
|
2019-01-15 12:19:57 +00:00
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/virtio.h>
|
|
|
|
#include <linux/virtio_config.h>
|
|
|
|
#include <linux/virtio_ids.h>
|
|
|
|
#include <linux/wait.h>
|
|
|
|
|
|
|
|
#include <uapi/linux/virtio_iommu.h>
|
|
|
|
|
2022-08-16 17:28:05 +00:00
|
|
|
#include "dma-iommu.h"
|
|
|
|
|
2019-01-15 12:19:57 +00:00
|
|
|
#define MSI_IOVA_BASE 0x8000000
|
|
|
|
#define MSI_IOVA_LENGTH 0x100000
|
|
|
|
|
|
|
|
#define VIOMMU_REQUEST_VQ 0
|
2019-01-15 12:19:59 +00:00
|
|
|
#define VIOMMU_EVENT_VQ 1
|
|
|
|
#define VIOMMU_NR_VQS 2
|
2019-01-15 12:19:57 +00:00
|
|
|
|
|
|
|
struct viommu_dev {
|
|
|
|
struct iommu_device iommu;
|
|
|
|
struct device *dev;
|
|
|
|
struct virtio_device *vdev;
|
|
|
|
|
|
|
|
struct ida domain_ids;
|
|
|
|
|
|
|
|
struct virtqueue *vqs[VIOMMU_NR_VQS];
|
|
|
|
spinlock_t request_lock;
|
|
|
|
struct list_head requests;
|
2019-01-15 12:19:59 +00:00
|
|
|
void *evts;
|
2019-01-15 12:19:57 +00:00
|
|
|
|
|
|
|
/* Device configuration */
|
|
|
|
struct iommu_domain_geometry geometry;
|
|
|
|
u64 pgsize_bitmap;
|
2019-07-22 14:40:07 +00:00
|
|
|
u32 first_domain;
|
|
|
|
u32 last_domain;
|
|
|
|
/* Supported MAP flags */
|
|
|
|
u32 map_flags;
|
2019-01-15 12:19:58 +00:00
|
|
|
u32 probe_size;
|
2019-01-15 12:19:57 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct viommu_mapping {
|
|
|
|
phys_addr_t paddr;
|
|
|
|
struct interval_tree_node iova;
|
|
|
|
u32 flags;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct viommu_domain {
|
|
|
|
struct iommu_domain domain;
|
|
|
|
struct viommu_dev *viommu;
|
|
|
|
struct mutex mutex; /* protects viommu pointer */
|
|
|
|
unsigned int id;
|
2019-07-22 14:40:07 +00:00
|
|
|
u32 map_flags;
|
2019-01-15 12:19:57 +00:00
|
|
|
|
|
|
|
spinlock_t mappings_lock;
|
|
|
|
struct rb_root_cached mappings;
|
|
|
|
|
|
|
|
unsigned long nr_endpoints;
|
2021-12-01 17:33:22 +00:00
|
|
|
bool bypass;
|
2019-01-15 12:19:57 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct viommu_endpoint {
|
2019-01-15 12:19:58 +00:00
|
|
|
struct device *dev;
|
2019-01-15 12:19:57 +00:00
|
|
|
struct viommu_dev *viommu;
|
|
|
|
struct viommu_domain *vdomain;
|
2019-01-15 12:19:58 +00:00
|
|
|
struct list_head resv_regions;
|
2019-01-15 12:19:57 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct viommu_request {
|
|
|
|
struct list_head list;
|
|
|
|
void *writeback;
|
|
|
|
unsigned int write_offset;
|
|
|
|
unsigned int len;
|
2023-10-09 18:24:27 +00:00
|
|
|
char buf[] __counted_by(len);
|
2019-01-15 12:19:57 +00:00
|
|
|
};
|
|
|
|
|
2019-01-15 12:19:59 +00:00
|
|
|
#define VIOMMU_FAULT_RESV_MASK 0xffffff00
|
|
|
|
|
|
|
|
struct viommu_event {
|
|
|
|
union {
|
|
|
|
u32 head;
|
|
|
|
struct virtio_iommu_fault fault;
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
2019-01-15 12:19:57 +00:00
|
|
|
#define to_viommu_domain(domain) \
|
|
|
|
container_of(domain, struct viommu_domain, domain)
|
|
|
|
|
|
|
|
static int viommu_get_req_errno(void *buf, size_t len)
|
|
|
|
{
|
|
|
|
struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
|
|
|
|
|
|
|
|
switch (tail->status) {
|
|
|
|
case VIRTIO_IOMMU_S_OK:
|
|
|
|
return 0;
|
|
|
|
case VIRTIO_IOMMU_S_UNSUPP:
|
|
|
|
return -ENOSYS;
|
|
|
|
case VIRTIO_IOMMU_S_INVAL:
|
|
|
|
return -EINVAL;
|
|
|
|
case VIRTIO_IOMMU_S_RANGE:
|
|
|
|
return -ERANGE;
|
|
|
|
case VIRTIO_IOMMU_S_NOENT:
|
|
|
|
return -ENOENT;
|
|
|
|
case VIRTIO_IOMMU_S_FAULT:
|
|
|
|
return -EFAULT;
|
2019-07-22 14:40:07 +00:00
|
|
|
case VIRTIO_IOMMU_S_NOMEM:
|
|
|
|
return -ENOMEM;
|
2019-01-15 12:19:57 +00:00
|
|
|
case VIRTIO_IOMMU_S_IOERR:
|
|
|
|
case VIRTIO_IOMMU_S_DEVERR:
|
|
|
|
default:
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void viommu_set_req_status(void *buf, size_t len, int status)
|
|
|
|
{
|
|
|
|
struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
|
|
|
|
|
|
|
|
tail->status = status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu,
|
|
|
|
struct virtio_iommu_req_head *req,
|
|
|
|
size_t len)
|
|
|
|
{
|
|
|
|
size_t tail_size = sizeof(struct virtio_iommu_req_tail);
|
|
|
|
|
2019-01-15 12:19:58 +00:00
|
|
|
if (req->type == VIRTIO_IOMMU_T_PROBE)
|
|
|
|
return len - viommu->probe_size - tail_size;
|
|
|
|
|
2019-01-15 12:19:57 +00:00
|
|
|
return len - tail_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* __viommu_sync_req - Complete all in-flight requests
|
|
|
|
*
|
|
|
|
* Wait for all added requests to complete. When this function returns, all
|
|
|
|
* requests that were in-flight at the time of the call have completed.
|
|
|
|
*/
|
|
|
|
static int __viommu_sync_req(struct viommu_dev *viommu)
|
|
|
|
{
|
|
|
|
unsigned int len;
|
|
|
|
size_t write_len;
|
|
|
|
struct viommu_request *req;
|
|
|
|
struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
|
|
|
|
|
|
|
|
assert_spin_locked(&viommu->request_lock);
|
|
|
|
|
|
|
|
virtqueue_kick(vq);
|
|
|
|
|
|
|
|
while (!list_empty(&viommu->requests)) {
|
|
|
|
len = 0;
|
|
|
|
req = virtqueue_get_buf(vq, &len);
|
|
|
|
if (!req)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!len)
|
|
|
|
viommu_set_req_status(req->buf, req->len,
|
|
|
|
VIRTIO_IOMMU_S_IOERR);
|
|
|
|
|
|
|
|
write_len = req->len - req->write_offset;
|
|
|
|
if (req->writeback && len == write_len)
|
|
|
|
memcpy(req->writeback, req->buf + req->write_offset,
|
|
|
|
write_len);
|
|
|
|
|
|
|
|
list_del(&req->list);
|
|
|
|
kfree(req);
|
|
|
|
}
|
|
|
|
|
2019-10-25 16:13:40 +00:00
|
|
|
return 0;
|
2019-01-15 12:19:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int viommu_sync_req(struct viommu_dev *viommu)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&viommu->request_lock, flags);
|
|
|
|
ret = __viommu_sync_req(viommu);
|
|
|
|
if (ret)
|
|
|
|
dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
|
|
|
|
spin_unlock_irqrestore(&viommu->request_lock, flags);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* __viommu_add_request - Add one request to the queue
|
|
|
|
* @buf: pointer to the request buffer
|
|
|
|
* @len: length of the request buffer
|
|
|
|
* @writeback: copy data back to the buffer when the request completes.
|
|
|
|
*
|
|
|
|
* Add a request to the queue. Only synchronize the queue if it's already full.
|
|
|
|
* Otherwise don't kick the queue nor wait for requests to complete.
|
|
|
|
*
|
|
|
|
* When @writeback is true, data written by the device, including the request
|
|
|
|
* status, is copied into @buf after the request completes. This is unsafe if
|
|
|
|
* the caller allocates @buf on stack and drops the lock between add_req() and
|
|
|
|
* sync_req().
|
|
|
|
*
|
|
|
|
* Return 0 if the request was successfully added to the queue.
|
|
|
|
*/
|
|
|
|
static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len,
|
|
|
|
bool writeback)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
off_t write_offset;
|
|
|
|
struct viommu_request *req;
|
|
|
|
struct scatterlist top_sg, bottom_sg;
|
|
|
|
struct scatterlist *sg[2] = { &top_sg, &bottom_sg };
|
|
|
|
struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
|
|
|
|
|
|
|
|
assert_spin_locked(&viommu->request_lock);
|
|
|
|
|
|
|
|
write_offset = viommu_get_write_desc_offset(viommu, buf, len);
|
|
|
|
if (write_offset <= 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2023-10-09 18:24:27 +00:00
|
|
|
req = kzalloc(struct_size(req, buf, len), GFP_ATOMIC);
|
2019-01-15 12:19:57 +00:00
|
|
|
if (!req)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
req->len = len;
|
|
|
|
if (writeback) {
|
|
|
|
req->writeback = buf + write_offset;
|
|
|
|
req->write_offset = write_offset;
|
|
|
|
}
|
|
|
|
memcpy(&req->buf, buf, write_offset);
|
|
|
|
|
|
|
|
sg_init_one(&top_sg, req->buf, write_offset);
|
|
|
|
sg_init_one(&bottom_sg, req->buf + write_offset, len - write_offset);
|
|
|
|
|
|
|
|
ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
|
|
|
|
if (ret == -ENOSPC) {
|
|
|
|
/* If the queue is full, sync and retry */
|
|
|
|
if (!__viommu_sync_req(viommu))
|
|
|
|
ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
|
|
|
|
}
|
|
|
|
if (ret)
|
|
|
|
goto err_free;
|
|
|
|
|
|
|
|
list_add_tail(&req->list, &viommu->requests);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_free:
|
|
|
|
kfree(req);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&viommu->request_lock, flags);
|
|
|
|
ret = __viommu_add_req(viommu, buf, len, false);
|
|
|
|
if (ret)
|
|
|
|
dev_dbg(viommu->dev, "could not add request: %d\n", ret);
|
|
|
|
spin_unlock_irqrestore(&viommu->request_lock, flags);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Send a request and wait for it to complete. Return the request status (as an
|
|
|
|
* errno)
|
|
|
|
*/
|
|
|
|
static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf,
|
|
|
|
size_t len)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&viommu->request_lock, flags);
|
|
|
|
|
|
|
|
ret = __viommu_add_req(viommu, buf, len, true);
|
|
|
|
if (ret) {
|
|
|
|
dev_dbg(viommu->dev, "could not add request (%d)\n", ret);
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = __viommu_sync_req(viommu);
|
|
|
|
if (ret) {
|
|
|
|
dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
|
|
|
|
/* Fall-through (get the actual request status) */
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = viommu_get_req_errno(buf, len);
|
|
|
|
out_unlock:
|
|
|
|
spin_unlock_irqrestore(&viommu->request_lock, flags);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* viommu_add_mapping - add a mapping to the internal tree
|
|
|
|
*
|
|
|
|
* On success, return the new mapping. Otherwise return NULL.
|
|
|
|
*/
|
2021-12-01 17:33:24 +00:00
|
|
|
static int viommu_add_mapping(struct viommu_domain *vdomain, u64 iova, u64 end,
|
|
|
|
phys_addr_t paddr, u32 flags)
|
2019-01-15 12:19:57 +00:00
|
|
|
{
|
|
|
|
unsigned long irqflags;
|
|
|
|
struct viommu_mapping *mapping;
|
|
|
|
|
|
|
|
mapping = kzalloc(sizeof(*mapping), GFP_ATOMIC);
|
|
|
|
if (!mapping)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
mapping->paddr = paddr;
|
|
|
|
mapping->iova.start = iova;
|
2021-12-01 17:33:24 +00:00
|
|
|
mapping->iova.last = end;
|
2019-01-15 12:19:57 +00:00
|
|
|
mapping->flags = flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&vdomain->mappings_lock, irqflags);
|
|
|
|
interval_tree_insert(&mapping->iova, &vdomain->mappings);
|
|
|
|
spin_unlock_irqrestore(&vdomain->mappings_lock, irqflags);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* viommu_del_mappings - remove mappings from the internal tree
|
|
|
|
*
|
|
|
|
* @vdomain: the domain
|
|
|
|
* @iova: start of the range
|
2021-12-01 17:33:24 +00:00
|
|
|
* @end: end of the range
|
2019-01-15 12:19:57 +00:00
|
|
|
*
|
2021-12-01 17:33:24 +00:00
|
|
|
* On success, returns the number of unmapped bytes
|
2019-01-15 12:19:57 +00:00
|
|
|
*/
|
|
|
|
static size_t viommu_del_mappings(struct viommu_domain *vdomain,
|
2021-12-01 17:33:24 +00:00
|
|
|
u64 iova, u64 end)
|
2019-01-15 12:19:57 +00:00
|
|
|
{
|
|
|
|
size_t unmapped = 0;
|
|
|
|
unsigned long flags;
|
|
|
|
struct viommu_mapping *mapping = NULL;
|
|
|
|
struct interval_tree_node *node, *next;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&vdomain->mappings_lock, flags);
|
2021-12-01 17:33:24 +00:00
|
|
|
next = interval_tree_iter_first(&vdomain->mappings, iova, end);
|
2019-01-15 12:19:57 +00:00
|
|
|
while (next) {
|
|
|
|
node = next;
|
|
|
|
mapping = container_of(node, struct viommu_mapping, iova);
|
2021-12-01 17:33:24 +00:00
|
|
|
next = interval_tree_iter_next(node, iova, end);
|
2019-01-15 12:19:57 +00:00
|
|
|
|
|
|
|
/* Trying to split a mapping? */
|
|
|
|
if (mapping->iova.start < iova)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Virtio-iommu doesn't allow UNMAP to split a mapping created
|
|
|
|
* with a single MAP request, so remove the full mapping.
|
|
|
|
*/
|
|
|
|
unmapped += mapping->iova.last - mapping->iova.start + 1;
|
|
|
|
|
|
|
|
interval_tree_remove(node, &vdomain->mappings);
|
|
|
|
kfree(mapping);
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
|
|
|
|
|
|
|
|
return unmapped;
|
|
|
|
}
|
|
|
|
|
2021-12-01 17:33:25 +00:00
|
|
|
/*
|
|
|
|
* Fill the domain with identity mappings, skipping the device's reserved
|
|
|
|
* regions.
|
|
|
|
*/
|
|
|
|
static int viommu_domain_map_identity(struct viommu_endpoint *vdev,
|
|
|
|
struct viommu_domain *vdomain)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct iommu_resv_region *resv;
|
|
|
|
u64 iova = vdomain->domain.geometry.aperture_start;
|
|
|
|
u64 limit = vdomain->domain.geometry.aperture_end;
|
|
|
|
u32 flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
|
|
|
|
unsigned long granule = 1UL << __ffs(vdomain->domain.pgsize_bitmap);
|
|
|
|
|
|
|
|
iova = ALIGN(iova, granule);
|
|
|
|
limit = ALIGN_DOWN(limit + 1, granule) - 1;
|
|
|
|
|
|
|
|
list_for_each_entry(resv, &vdev->resv_regions, list) {
|
|
|
|
u64 resv_start = ALIGN_DOWN(resv->start, granule);
|
|
|
|
u64 resv_end = ALIGN(resv->start + resv->length, granule) - 1;
|
|
|
|
|
|
|
|
if (resv_end < iova || resv_start > limit)
|
|
|
|
/* No overlap */
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (resv_start > iova) {
|
|
|
|
ret = viommu_add_mapping(vdomain, iova, resv_start - 1,
|
|
|
|
(phys_addr_t)iova, flags);
|
|
|
|
if (ret)
|
|
|
|
goto err_unmap;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (resv_end >= limit)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
iova = resv_end + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = viommu_add_mapping(vdomain, iova, limit, (phys_addr_t)iova,
|
|
|
|
flags);
|
|
|
|
if (ret)
|
|
|
|
goto err_unmap;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_unmap:
|
|
|
|
viommu_del_mappings(vdomain, 0, iova);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-01-15 12:19:57 +00:00
|
|
|
/*
|
|
|
|
* viommu_replay_mappings - re-send MAP requests
|
|
|
|
*
|
|
|
|
* When reattaching a domain that was previously detached from all endpoints,
|
|
|
|
* mappings were deleted from the device. Re-create the mappings available in
|
|
|
|
* the internal tree.
|
|
|
|
*/
|
|
|
|
static int viommu_replay_mappings(struct viommu_domain *vdomain)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
unsigned long flags;
|
|
|
|
struct viommu_mapping *mapping;
|
|
|
|
struct interval_tree_node *node;
|
|
|
|
struct virtio_iommu_req_map map;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&vdomain->mappings_lock, flags);
|
|
|
|
node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL);
|
|
|
|
while (node) {
|
|
|
|
mapping = container_of(node, struct viommu_mapping, iova);
|
|
|
|
map = (struct virtio_iommu_req_map) {
|
|
|
|
.head.type = VIRTIO_IOMMU_T_MAP,
|
|
|
|
.domain = cpu_to_le32(vdomain->id),
|
|
|
|
.virt_start = cpu_to_le64(mapping->iova.start),
|
|
|
|
.virt_end = cpu_to_le64(mapping->iova.last),
|
|
|
|
.phys_start = cpu_to_le64(mapping->paddr),
|
|
|
|
.flags = cpu_to_le32(mapping->flags),
|
|
|
|
};
|
|
|
|
|
|
|
|
ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
|
|
|
|
node = interval_tree_iter_next(node, 0, -1UL);
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-01-15 12:19:58 +00:00
|
|
|
static int viommu_add_resv_mem(struct viommu_endpoint *vdev,
|
|
|
|
struct virtio_iommu_probe_resv_mem *mem,
|
|
|
|
size_t len)
|
|
|
|
{
|
|
|
|
size_t size;
|
|
|
|
u64 start64, end64;
|
|
|
|
phys_addr_t start, end;
|
2021-12-01 17:33:23 +00:00
|
|
|
struct iommu_resv_region *region = NULL, *next;
|
2019-01-15 12:19:58 +00:00
|
|
|
unsigned long prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
|
|
|
|
|
|
|
|
start = start64 = le64_to_cpu(mem->start);
|
|
|
|
end = end64 = le64_to_cpu(mem->end);
|
|
|
|
size = end64 - start64 + 1;
|
|
|
|
|
|
|
|
/* Catch any overflow, including the unlikely end64 - start64 + 1 = 0 */
|
|
|
|
if (start != start64 || end != end64 || size < end64 - start64)
|
|
|
|
return -EOVERFLOW;
|
|
|
|
|
|
|
|
if (len < sizeof(*mem))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
switch (mem->subtype) {
|
|
|
|
default:
|
|
|
|
dev_warn(vdev->dev, "unknown resv mem subtype 0x%x\n",
|
|
|
|
mem->subtype);
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2019-01-15 12:19:58 +00:00
|
|
|
case VIRTIO_IOMMU_RESV_MEM_T_RESERVED:
|
|
|
|
region = iommu_alloc_resv_region(start, size, 0,
|
2022-10-19 00:44:44 +00:00
|
|
|
IOMMU_RESV_RESERVED,
|
|
|
|
GFP_KERNEL);
|
2019-01-15 12:19:58 +00:00
|
|
|
break;
|
|
|
|
case VIRTIO_IOMMU_RESV_MEM_T_MSI:
|
|
|
|
region = iommu_alloc_resv_region(start, size, prot,
|
2022-10-19 00:44:44 +00:00
|
|
|
IOMMU_RESV_MSI,
|
|
|
|
GFP_KERNEL);
|
2019-01-15 12:19:58 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!region)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2021-12-01 17:33:23 +00:00
|
|
|
/* Keep the list sorted */
|
|
|
|
list_for_each_entry(next, &vdev->resv_regions, list) {
|
|
|
|
if (next->start > region->start)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
list_add_tail(®ion->list, &next->list);
|
2019-01-15 12:19:58 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
u16 type, len;
|
|
|
|
size_t cur = 0;
|
|
|
|
size_t probe_len;
|
|
|
|
struct virtio_iommu_req_probe *probe;
|
|
|
|
struct virtio_iommu_probe_property *prop;
|
|
|
|
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
2020-03-26 15:08:40 +00:00
|
|
|
struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
|
2019-01-15 12:19:58 +00:00
|
|
|
|
|
|
|
if (!fwspec->num_ids)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
probe_len = sizeof(*probe) + viommu->probe_size +
|
|
|
|
sizeof(struct virtio_iommu_req_tail);
|
|
|
|
probe = kzalloc(probe_len, GFP_KERNEL);
|
|
|
|
if (!probe)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
probe->head.type = VIRTIO_IOMMU_T_PROBE;
|
|
|
|
/*
|
|
|
|
* For now, assume that properties of an endpoint that outputs multiple
|
|
|
|
* IDs are consistent. Only probe the first one.
|
|
|
|
*/
|
|
|
|
probe->endpoint = cpu_to_le32(fwspec->ids[0]);
|
|
|
|
|
|
|
|
ret = viommu_send_req_sync(viommu, probe, probe_len);
|
|
|
|
if (ret)
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
prop = (void *)probe->properties;
|
|
|
|
type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK;
|
|
|
|
|
|
|
|
while (type != VIRTIO_IOMMU_PROBE_T_NONE &&
|
|
|
|
cur < viommu->probe_size) {
|
|
|
|
len = le16_to_cpu(prop->length) + sizeof(*prop);
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case VIRTIO_IOMMU_PROBE_T_RESV_MEM:
|
|
|
|
ret = viommu_add_resv_mem(vdev, (void *)prop, len);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
dev_err(dev, "unknown viommu prop 0x%x\n", type);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
dev_err(dev, "failed to parse viommu prop 0x%x\n", type);
|
|
|
|
|
|
|
|
cur += len;
|
|
|
|
if (cur >= viommu->probe_size)
|
|
|
|
break;
|
|
|
|
|
|
|
|
prop = (void *)probe->properties + cur;
|
|
|
|
type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
out_free:
|
|
|
|
kfree(probe);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-01-15 12:19:59 +00:00
|
|
|
static int viommu_fault_handler(struct viommu_dev *viommu,
|
|
|
|
struct virtio_iommu_fault *fault)
|
|
|
|
{
|
|
|
|
char *reason_str;
|
|
|
|
|
|
|
|
u8 reason = fault->reason;
|
|
|
|
u32 flags = le32_to_cpu(fault->flags);
|
|
|
|
u32 endpoint = le32_to_cpu(fault->endpoint);
|
|
|
|
u64 address = le64_to_cpu(fault->address);
|
|
|
|
|
|
|
|
switch (reason) {
|
|
|
|
case VIRTIO_IOMMU_FAULT_R_DOMAIN:
|
|
|
|
reason_str = "domain";
|
|
|
|
break;
|
|
|
|
case VIRTIO_IOMMU_FAULT_R_MAPPING:
|
|
|
|
reason_str = "page";
|
|
|
|
break;
|
|
|
|
case VIRTIO_IOMMU_FAULT_R_UNKNOWN:
|
|
|
|
default:
|
|
|
|
reason_str = "unknown";
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* TODO: find EP by ID and report_iommu_fault */
|
|
|
|
if (flags & VIRTIO_IOMMU_FAULT_F_ADDRESS)
|
|
|
|
dev_err_ratelimited(viommu->dev, "%s fault from EP %u at %#llx [%s%s%s]\n",
|
|
|
|
reason_str, endpoint, address,
|
|
|
|
flags & VIRTIO_IOMMU_FAULT_F_READ ? "R" : "",
|
|
|
|
flags & VIRTIO_IOMMU_FAULT_F_WRITE ? "W" : "",
|
|
|
|
flags & VIRTIO_IOMMU_FAULT_F_EXEC ? "X" : "");
|
|
|
|
else
|
|
|
|
dev_err_ratelimited(viommu->dev, "%s fault from EP %u\n",
|
|
|
|
reason_str, endpoint);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void viommu_event_handler(struct virtqueue *vq)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
unsigned int len;
|
|
|
|
struct scatterlist sg[1];
|
|
|
|
struct viommu_event *evt;
|
|
|
|
struct viommu_dev *viommu = vq->vdev->priv;
|
|
|
|
|
|
|
|
while ((evt = virtqueue_get_buf(vq, &len)) != NULL) {
|
|
|
|
if (len > sizeof(*evt)) {
|
|
|
|
dev_err(viommu->dev,
|
|
|
|
"invalid event buffer (len %u != %zu)\n",
|
|
|
|
len, sizeof(*evt));
|
|
|
|
} else if (!(evt->head & VIOMMU_FAULT_RESV_MASK)) {
|
|
|
|
viommu_fault_handler(viommu, &evt->fault);
|
|
|
|
}
|
|
|
|
|
|
|
|
sg_init_one(sg, evt, sizeof(*evt));
|
|
|
|
ret = virtqueue_add_inbuf(vq, sg, 1, evt, GFP_ATOMIC);
|
|
|
|
if (ret)
|
|
|
|
dev_err(viommu->dev, "could not add event buffer\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
virtqueue_kick(vq);
|
|
|
|
}
|
|
|
|
|
2019-01-15 12:19:57 +00:00
|
|
|
/* IOMMU API */
|
|
|
|
|
|
|
|
static struct iommu_domain *viommu_domain_alloc(unsigned type)
|
|
|
|
{
|
|
|
|
struct viommu_domain *vdomain;
|
|
|
|
|
2021-12-01 17:33:22 +00:00
|
|
|
if (type != IOMMU_DOMAIN_UNMANAGED &&
|
|
|
|
type != IOMMU_DOMAIN_DMA &&
|
|
|
|
type != IOMMU_DOMAIN_IDENTITY)
|
2019-01-15 12:19:57 +00:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
|
|
|
|
if (!vdomain)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
mutex_init(&vdomain->mutex);
|
|
|
|
spin_lock_init(&vdomain->mappings_lock);
|
|
|
|
vdomain->mappings = RB_ROOT_CACHED;
|
|
|
|
|
|
|
|
return &vdomain->domain;
|
|
|
|
}
|
|
|
|
|
2020-03-26 09:35:58 +00:00
|
|
|
static int viommu_domain_finalise(struct viommu_endpoint *vdev,
|
2019-01-15 12:19:57 +00:00
|
|
|
struct iommu_domain *domain)
|
|
|
|
{
|
|
|
|
int ret;
|
2020-03-26 09:35:58 +00:00
|
|
|
unsigned long viommu_page_size;
|
|
|
|
struct viommu_dev *viommu = vdev->viommu;
|
2019-01-15 12:19:57 +00:00
|
|
|
struct viommu_domain *vdomain = to_viommu_domain(domain);
|
|
|
|
|
2020-03-26 09:35:58 +00:00
|
|
|
viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap);
|
|
|
|
if (viommu_page_size > PAGE_SIZE) {
|
|
|
|
dev_err(vdev->dev,
|
|
|
|
"granule 0x%lx larger than system page size 0x%lx\n",
|
|
|
|
viommu_page_size, PAGE_SIZE);
|
2022-10-17 23:02:13 +00:00
|
|
|
return -ENODEV;
|
2020-03-26 09:35:58 +00:00
|
|
|
}
|
|
|
|
|
2020-03-26 09:35:57 +00:00
|
|
|
ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
|
|
|
|
viommu->last_domain, GFP_KERNEL);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
vdomain->id = (unsigned int)ret;
|
2019-01-15 12:19:57 +00:00
|
|
|
|
|
|
|
domain->pgsize_bitmap = viommu->pgsize_bitmap;
|
|
|
|
domain->geometry = viommu->geometry;
|
|
|
|
|
2020-03-26 09:35:57 +00:00
|
|
|
vdomain->map_flags = viommu->map_flags;
|
|
|
|
vdomain->viommu = viommu;
|
2019-01-15 12:19:57 +00:00
|
|
|
|
2021-12-01 17:33:22 +00:00
|
|
|
if (domain->type == IOMMU_DOMAIN_IDENTITY) {
|
2021-12-01 17:33:25 +00:00
|
|
|
if (virtio_has_feature(viommu->vdev,
|
|
|
|
VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
|
|
|
|
vdomain->bypass = true;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = viommu_domain_map_identity(vdev, vdomain);
|
|
|
|
if (ret) {
|
2021-12-01 17:33:22 +00:00
|
|
|
ida_free(&viommu->domain_ids, vdomain->id);
|
|
|
|
vdomain->viommu = NULL;
|
2022-10-17 23:02:36 +00:00
|
|
|
return ret;
|
2021-12-01 17:33:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-26 09:35:57 +00:00
|
|
|
return 0;
|
2019-01-15 12:19:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void viommu_domain_free(struct iommu_domain *domain)
|
|
|
|
{
|
|
|
|
struct viommu_domain *vdomain = to_viommu_domain(domain);
|
|
|
|
|
2021-12-01 17:33:24 +00:00
|
|
|
/* Free all remaining mappings */
|
|
|
|
viommu_del_mappings(vdomain, 0, ULLONG_MAX);
|
2019-01-15 12:19:57 +00:00
|
|
|
|
|
|
|
if (vdomain->viommu)
|
|
|
|
ida_free(&vdomain->viommu->domain_ids, vdomain->id);
|
|
|
|
|
|
|
|
kfree(vdomain);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int ret = 0;
|
|
|
|
struct virtio_iommu_req_attach req;
|
|
|
|
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
2020-03-26 15:08:40 +00:00
|
|
|
struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
|
2019-01-15 12:19:57 +00:00
|
|
|
struct viommu_domain *vdomain = to_viommu_domain(domain);
|
|
|
|
|
|
|
|
mutex_lock(&vdomain->mutex);
|
|
|
|
if (!vdomain->viommu) {
|
|
|
|
/*
|
|
|
|
* Properly initialize the domain now that we know which viommu
|
|
|
|
* owns it.
|
|
|
|
*/
|
2020-03-26 09:35:58 +00:00
|
|
|
ret = viommu_domain_finalise(vdev, domain);
|
2019-01-15 12:19:57 +00:00
|
|
|
} else if (vdomain->viommu != vdev->viommu) {
|
2022-10-17 23:02:21 +00:00
|
|
|
ret = -EINVAL;
|
2019-01-15 12:19:57 +00:00
|
|
|
}
|
|
|
|
mutex_unlock(&vdomain->mutex);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In the virtio-iommu device, when attaching the endpoint to a new
|
2021-12-16 08:33:02 +00:00
|
|
|
* domain, it is detached from the old one and, if as a result the
|
2019-01-15 12:19:57 +00:00
|
|
|
* old domain isn't attached to any endpoint, all mappings are removed
|
|
|
|
* from the old domain and it is freed.
|
|
|
|
*
|
|
|
|
* In the driver the old domain still exists, and its mappings will be
|
|
|
|
* recreated if it gets reattached to an endpoint. Otherwise it will be
|
|
|
|
* freed explicitly.
|
|
|
|
*
|
|
|
|
* vdev->vdomain is protected by group->mutex
|
|
|
|
*/
|
|
|
|
if (vdev->vdomain)
|
|
|
|
vdev->vdomain->nr_endpoints--;
|
|
|
|
|
|
|
|
req = (struct virtio_iommu_req_attach) {
|
|
|
|
.head.type = VIRTIO_IOMMU_T_ATTACH,
|
|
|
|
.domain = cpu_to_le32(vdomain->id),
|
|
|
|
};
|
|
|
|
|
2021-12-01 17:33:22 +00:00
|
|
|
if (vdomain->bypass)
|
|
|
|
req.flags |= cpu_to_le32(VIRTIO_IOMMU_ATTACH_F_BYPASS);
|
|
|
|
|
2019-01-15 12:19:57 +00:00
|
|
|
for (i = 0; i < fwspec->num_ids; i++) {
|
|
|
|
req.endpoint = cpu_to_le32(fwspec->ids[i]);
|
|
|
|
|
|
|
|
ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req));
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vdomain->nr_endpoints) {
|
|
|
|
/*
|
|
|
|
* This endpoint is the first to be attached to the domain.
|
|
|
|
* Replay existing mappings (e.g. SW MSI).
|
|
|
|
*/
|
|
|
|
ret = viommu_replay_mappings(vdomain);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
vdomain->nr_endpoints++;
|
|
|
|
vdev->vdomain = vdomain;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-05-15 11:39:48 +00:00
|
|
|
static void viommu_detach_dev(struct viommu_endpoint *vdev)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct virtio_iommu_req_detach req;
|
|
|
|
struct viommu_domain *vdomain = vdev->vdomain;
|
|
|
|
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(vdev->dev);
|
|
|
|
|
|
|
|
if (!vdomain)
|
|
|
|
return;
|
|
|
|
|
|
|
|
req = (struct virtio_iommu_req_detach) {
|
|
|
|
.head.type = VIRTIO_IOMMU_T_DETACH,
|
|
|
|
.domain = cpu_to_le32(vdomain->id),
|
|
|
|
};
|
|
|
|
|
|
|
|
for (i = 0; i < fwspec->num_ids; i++) {
|
|
|
|
req.endpoint = cpu_to_le32(fwspec->ids[i]);
|
|
|
|
WARN_ON(viommu_send_req_sync(vdev->viommu, &req, sizeof(req)));
|
|
|
|
}
|
|
|
|
vdomain->nr_endpoints--;
|
|
|
|
vdev->vdomain = NULL;
|
|
|
|
}
|
|
|
|
|
2022-06-05 16:11:52 +00:00
|
|
|
static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova,
|
|
|
|
phys_addr_t paddr, size_t pgsize, size_t pgcount,
|
|
|
|
int prot, gfp_t gfp, size_t *mapped)
|
2019-01-15 12:19:57 +00:00
|
|
|
{
|
|
|
|
int ret;
|
2019-07-22 14:40:07 +00:00
|
|
|
u32 flags;
|
2022-06-05 16:11:52 +00:00
|
|
|
size_t size = pgsize * pgcount;
|
2021-12-01 17:33:24 +00:00
|
|
|
u64 end = iova + size - 1;
|
2019-01-15 12:19:57 +00:00
|
|
|
struct virtio_iommu_req_map map;
|
|
|
|
struct viommu_domain *vdomain = to_viommu_domain(domain);
|
|
|
|
|
|
|
|
flags = (prot & IOMMU_READ ? VIRTIO_IOMMU_MAP_F_READ : 0) |
|
|
|
|
(prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) |
|
|
|
|
(prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0);
|
|
|
|
|
2019-07-22 14:40:07 +00:00
|
|
|
if (flags & ~vdomain->map_flags)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2021-12-01 17:33:24 +00:00
|
|
|
ret = viommu_add_mapping(vdomain, iova, end, paddr, flags);
|
2019-01-15 12:19:57 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2023-05-15 11:39:50 +00:00
|
|
|
if (vdomain->nr_endpoints) {
|
|
|
|
map = (struct virtio_iommu_req_map) {
|
|
|
|
.head.type = VIRTIO_IOMMU_T_MAP,
|
|
|
|
.domain = cpu_to_le32(vdomain->id),
|
|
|
|
.virt_start = cpu_to_le64(iova),
|
|
|
|
.phys_start = cpu_to_le64(paddr),
|
|
|
|
.virt_end = cpu_to_le64(end),
|
|
|
|
.flags = cpu_to_le32(flags),
|
|
|
|
};
|
2019-01-15 12:19:57 +00:00
|
|
|
|
2023-11-20 14:51:56 +00:00
|
|
|
ret = viommu_add_req(vdomain->viommu, &map, sizeof(map));
|
2023-05-15 11:39:50 +00:00
|
|
|
if (ret) {
|
|
|
|
viommu_del_mappings(vdomain, iova, end);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (mapped)
|
2022-06-05 16:11:52 +00:00
|
|
|
*mapped = size;
|
2019-01-15 12:19:57 +00:00
|
|
|
|
2023-05-15 11:39:50 +00:00
|
|
|
return 0;
|
2019-01-15 12:19:57 +00:00
|
|
|
}
|
|
|
|
|
2022-06-05 16:11:52 +00:00
|
|
|
static size_t viommu_unmap_pages(struct iommu_domain *domain, unsigned long iova,
|
|
|
|
size_t pgsize, size_t pgcount,
|
|
|
|
struct iommu_iotlb_gather *gather)
|
2019-01-15 12:19:57 +00:00
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
size_t unmapped;
|
|
|
|
struct virtio_iommu_req_unmap unmap;
|
|
|
|
struct viommu_domain *vdomain = to_viommu_domain(domain);
|
2022-06-05 16:11:52 +00:00
|
|
|
size_t size = pgsize * pgcount;
|
2019-01-15 12:19:57 +00:00
|
|
|
|
2021-12-01 17:33:24 +00:00
|
|
|
unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1);
|
2019-01-15 12:19:57 +00:00
|
|
|
if (unmapped < size)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Device already removed all mappings after detach. */
|
|
|
|
if (!vdomain->nr_endpoints)
|
|
|
|
return unmapped;
|
|
|
|
|
|
|
|
unmap = (struct virtio_iommu_req_unmap) {
|
|
|
|
.head.type = VIRTIO_IOMMU_T_UNMAP,
|
|
|
|
.domain = cpu_to_le32(vdomain->id),
|
|
|
|
.virt_start = cpu_to_le64(iova),
|
|
|
|
.virt_end = cpu_to_le64(iova + unmapped - 1),
|
|
|
|
};
|
|
|
|
|
|
|
|
ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap));
|
|
|
|
return ret ? 0 : unmapped;
|
|
|
|
}
|
|
|
|
|
|
|
|
static phys_addr_t viommu_iova_to_phys(struct iommu_domain *domain,
|
|
|
|
dma_addr_t iova)
|
|
|
|
{
|
|
|
|
u64 paddr = 0;
|
|
|
|
unsigned long flags;
|
|
|
|
struct viommu_mapping *mapping;
|
|
|
|
struct interval_tree_node *node;
|
|
|
|
struct viommu_domain *vdomain = to_viommu_domain(domain);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&vdomain->mappings_lock, flags);
|
|
|
|
node = interval_tree_iter_first(&vdomain->mappings, iova, iova);
|
|
|
|
if (node) {
|
|
|
|
mapping = container_of(node, struct viommu_mapping, iova);
|
|
|
|
paddr = mapping->paddr + (iova - mapping->iova.start);
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
|
|
|
|
|
|
|
|
return paddr;
|
|
|
|
}
|
|
|
|
|
2019-07-02 15:44:06 +00:00
|
|
|
static void viommu_iotlb_sync(struct iommu_domain *domain,
|
|
|
|
struct iommu_iotlb_gather *gather)
|
2019-01-15 12:19:57 +00:00
|
|
|
{
|
|
|
|
struct viommu_domain *vdomain = to_viommu_domain(domain);
|
|
|
|
|
|
|
|
viommu_sync_req(vdomain->viommu);
|
|
|
|
}
|
|
|
|
|
2023-11-20 14:51:56 +00:00
|
|
|
static int viommu_iotlb_sync_map(struct iommu_domain *domain,
|
|
|
|
unsigned long iova, size_t size)
|
|
|
|
{
|
|
|
|
struct viommu_domain *vdomain = to_viommu_domain(domain);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* May be called before the viommu is initialized including
|
|
|
|
* while creating direct mapping
|
|
|
|
*/
|
|
|
|
if (!vdomain->nr_endpoints)
|
|
|
|
return 0;
|
|
|
|
return viommu_sync_req(vdomain->viommu);
|
|
|
|
}
|
|
|
|
|
2023-11-20 14:51:57 +00:00
|
|
|
static void viommu_flush_iotlb_all(struct iommu_domain *domain)
|
|
|
|
{
|
|
|
|
struct viommu_domain *vdomain = to_viommu_domain(domain);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* May be called before the viommu is initialized including
|
|
|
|
* while creating direct mapping
|
|
|
|
*/
|
|
|
|
if (!vdomain->nr_endpoints)
|
|
|
|
return;
|
|
|
|
viommu_sync_req(vdomain->viommu);
|
|
|
|
}
|
|
|
|
|
2019-01-15 12:19:57 +00:00
|
|
|
static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
|
|
|
|
{
|
2019-01-15 12:19:58 +00:00
|
|
|
struct iommu_resv_region *entry, *new_entry, *msi = NULL;
|
2020-03-26 15:08:40 +00:00
|
|
|
struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
|
2019-01-15 12:19:57 +00:00
|
|
|
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
|
|
|
|
|
2019-01-15 12:19:58 +00:00
|
|
|
list_for_each_entry(entry, &vdev->resv_regions, list) {
|
|
|
|
if (entry->type == IOMMU_RESV_MSI)
|
|
|
|
msi = entry;
|
|
|
|
|
|
|
|
new_entry = kmemdup(entry, sizeof(*entry), GFP_KERNEL);
|
|
|
|
if (!new_entry)
|
|
|
|
return;
|
|
|
|
list_add_tail(&new_entry->list, head);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the device didn't register any bypass MSI window, add a
|
|
|
|
* software-mapped region.
|
|
|
|
*/
|
|
|
|
if (!msi) {
|
|
|
|
msi = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
|
2022-10-19 00:44:44 +00:00
|
|
|
prot, IOMMU_RESV_SW_MSI,
|
|
|
|
GFP_KERNEL);
|
2019-01-15 12:19:58 +00:00
|
|
|
if (!msi)
|
|
|
|
return;
|
|
|
|
|
|
|
|
list_add_tail(&msi->list, head);
|
|
|
|
}
|
2019-01-15 12:19:57 +00:00
|
|
|
|
|
|
|
iommu_dma_get_resv_regions(dev, head);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct iommu_ops viommu_ops;
|
|
|
|
static struct virtio_driver virtio_iommu_drv;
|
|
|
|
|
virtio, vhost: fixes, features, performance
new iommu device
vhost guest memory access using vmap (just meta-data for now)
minor fixes
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Note: due to code driver changes the driver-core tree, the following
patch is needed when merging tree with commit 92ce7e83b4e5
("driver_find_device: Unify the match function with
class_find_device()") in the driver-core tree:
From: Nathan Chancellor <natechancellor@gmail.com>
Subject: [PATCH] iommu/virtio: Constify data parameter in viommu_match_node
After commit 92ce7e83b4e5 ("driver_find_device: Unify the match
function with class_find_device()") in the driver-core tree.
Signed-off-by: Nathan Chancellor <natechancellor@gmail.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
drivers/iommu/virtio-iommu.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 4620dd221ffd..433f4d2ee956 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -839,7 +839,7 @@ static void viommu_put_resv_regions(struct device *dev, struct list_head *head)
static struct iommu_ops viommu_ops;
static struct virtio_driver virtio_iommu_drv;
-static int viommu_match_node(struct device *dev, void *data)
+static int viommu_match_node(struct device *dev, const void *data)
{
return dev->parent->fwnode == data;
}
-----BEGIN PGP SIGNATURE-----
iQEcBAABAgAGBQJdJ5qUAAoJECgfDbjSjVRpQs0H/2qWcIG1zjGKyh9KWrfgOusG
/QIqeP50d7SC6oqdyd00tzmExqO1xdGLPFzYixdOsU817te1gHBP4Rfmzo01jZRd
CUzZNnZQ2JRsDshiA6G2ui+wn1/a/cB3RPN4rT1mquDYS53QmsRGDQDnpp84TXMV
aocB8TS6halbRzKMq3VmaWHIvzNXnt4dwQR542+PyeLLn9bUx2QwWj2ON3QwxixK
dVRZow3GwLGBhKTA/Z1Z/Bta4fEfOKjUGP2XWgvL6zOr+nZR4eQ8w5WXVJYzR+d6
1JCfqTxleweT2k6Tu5VwtTNlQkxn/XvQAeisppOiEE6NnPjubyI9wMQIvL7bkpo=
=uJbC
-----END PGP SIGNATURE-----
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
Pull virtio, vhost updates from Michael Tsirkin:
"Fixes, features, performance:
- new iommu device
- vhost guest memory access using vmap (just meta-data for now)
- minor fixes"
* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
virtio-mmio: add error check for platform_get_irq
scsi: virtio_scsi: Use struct_size() helper
iommu/virtio: Add event queue
iommu/virtio: Add probe request
iommu: Add virtio-iommu driver
PCI: OF: Initialize dev->fwnode appropriately
of: Allow the iommu-map property to omit untranslated devices
dt-bindings: virtio: Add virtio-pci-iommu node
dt-bindings: virtio-mmio: Add IOMMU description
vhost: fix clang build warning
vhost: access vq metadata through kernel virtual address
vhost: factor out setting vring addr and num
vhost: introduce helpers to get the size of metadata area
vhost: rename vq_iotlb_prefetch() to vq_meta_prefetch()
vhost: fine grain userspace memory accessors
vhost: generalize adding used elem
2019-07-17 18:26:09 +00:00
|
|
|
static int viommu_match_node(struct device *dev, const void *data)
|
2019-01-15 12:19:57 +00:00
|
|
|
{
|
2022-08-01 16:51:42 +00:00
|
|
|
return device_match_fwnode(dev->parent, data);
|
2019-01-15 12:19:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode)
|
|
|
|
{
|
|
|
|
struct device *dev = driver_find_device(&virtio_iommu_drv.driver, NULL,
|
|
|
|
fwnode, viommu_match_node);
|
|
|
|
put_device(dev);
|
|
|
|
|
|
|
|
return dev ? dev_to_virtio(dev)->priv : NULL;
|
|
|
|
}
|
|
|
|
|
2020-04-29 13:36:58 +00:00
|
|
|
static struct iommu_device *viommu_probe_device(struct device *dev)
|
2019-01-15 12:19:57 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct viommu_endpoint *vdev;
|
|
|
|
struct viommu_dev *viommu = NULL;
|
|
|
|
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
|
|
|
|
|
|
|
viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode);
|
|
|
|
if (!viommu)
|
2020-04-29 13:36:58 +00:00
|
|
|
return ERR_PTR(-ENODEV);
|
2019-01-15 12:19:57 +00:00
|
|
|
|
|
|
|
vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
|
|
|
|
if (!vdev)
|
2020-04-29 13:36:58 +00:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2019-01-15 12:19:57 +00:00
|
|
|
|
2019-01-15 12:19:58 +00:00
|
|
|
vdev->dev = dev;
|
2019-01-15 12:19:57 +00:00
|
|
|
vdev->viommu = viommu;
|
2019-01-15 12:19:58 +00:00
|
|
|
INIT_LIST_HEAD(&vdev->resv_regions);
|
2020-03-26 15:08:40 +00:00
|
|
|
dev_iommu_priv_set(dev, vdev);
|
2019-01-15 12:19:57 +00:00
|
|
|
|
2019-01-15 12:19:58 +00:00
|
|
|
if (viommu->probe_size) {
|
|
|
|
/* Get additional information for this endpoint */
|
|
|
|
ret = viommu_probe_endpoint(viommu, dev);
|
|
|
|
if (ret)
|
|
|
|
goto err_free_dev;
|
|
|
|
}
|
|
|
|
|
2020-04-29 13:36:58 +00:00
|
|
|
return &viommu->iommu;
|
2019-01-15 12:19:57 +00:00
|
|
|
|
|
|
|
err_free_dev:
|
2022-07-08 08:06:15 +00:00
|
|
|
iommu_put_resv_regions(dev, &vdev->resv_regions);
|
2019-01-15 12:19:57 +00:00
|
|
|
kfree(vdev);
|
|
|
|
|
2020-04-29 13:36:58 +00:00
|
|
|
return ERR_PTR(ret);
|
2019-01-15 12:19:57 +00:00
|
|
|
}
|
|
|
|
|
2021-06-18 15:21:00 +00:00
|
|
|
static void viommu_probe_finalize(struct device *dev)
|
|
|
|
{
|
|
|
|
#ifndef CONFIG_ARCH_HAS_SETUP_DMA_OPS
|
|
|
|
/* First clear the DMA ops in case we're switching from a DMA domain */
|
|
|
|
set_dma_ops(dev, NULL);
|
|
|
|
iommu_setup_dma_ops(dev, 0, U64_MAX);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2020-04-29 13:36:58 +00:00
|
|
|
static void viommu_release_device(struct device *dev)
|
2019-01-15 12:19:57 +00:00
|
|
|
{
|
2022-06-21 15:14:27 +00:00
|
|
|
struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
|
2019-01-15 12:19:57 +00:00
|
|
|
|
2023-05-15 11:39:48 +00:00
|
|
|
viommu_detach_dev(vdev);
|
2022-07-08 08:06:15 +00:00
|
|
|
iommu_put_resv_regions(dev, &vdev->resv_regions);
|
2019-01-15 12:19:57 +00:00
|
|
|
kfree(vdev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct iommu_group *viommu_device_group(struct device *dev)
|
|
|
|
{
|
|
|
|
if (dev_is_pci(dev))
|
|
|
|
return pci_device_group(dev);
|
|
|
|
else
|
|
|
|
return generic_device_group(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
|
|
|
|
{
|
|
|
|
return iommu_fwspec_add_ids(dev, args->args, 1);
|
|
|
|
}
|
|
|
|
|
2022-09-07 15:11:54 +00:00
|
|
|
static bool viommu_capable(struct device *dev, enum iommu_cap cap)
|
iommu/virtio: Fix interaction with VFIO
Commit e8ae0e140c05 ("vfio: Require that devices support DMA cache
coherence") requires IOMMU drivers to advertise
IOMMU_CAP_CACHE_COHERENCY, in order to be used by VFIO. Since VFIO does
not provide to userspace the ability to maintain coherency through cache
invalidations, it requires hardware coherency. Advertise the capability
in order to restore VFIO support.
The meaning of IOMMU_CAP_CACHE_COHERENCY also changed from "IOMMU can
enforce cache coherent DMA transactions" to "IOMMU_CACHE is supported".
While virtio-iommu cannot enforce coherency (of PCIe no-snoop
transactions), it does support IOMMU_CACHE.
We can distinguish different cases of non-coherent DMA:
(1) When accesses from a hardware endpoint are not coherent. The host
would describe such a device using firmware methods ('dma-coherent'
in device-tree, '_CCA' in ACPI), since they are also needed without
a vIOMMU. In this case mappings are created without IOMMU_CACHE.
virtio-iommu doesn't need any additional support. It sends the same
requests as for coherent devices.
(2) When the physical IOMMU supports non-cacheable mappings. Supporting
those would require a new feature in virtio-iommu, new PROBE request
property and MAP flags. Device drivers would use a new API to
discover this since it depends on the architecture and the physical
IOMMU.
(3) When the hardware supports PCIe no-snoop. It is possible for
assigned PCIe devices to issue no-snoop transactions, and the
virtio-iommu specification is lacking any mention of this.
Arm platforms don't necessarily support no-snoop, and those that do
cannot enforce coherency of no-snoop transactions. Device drivers
must be careful about assuming that no-snoop transactions won't end
up cached; see commit e02f5c1bb228 ("drm: disable uncached DMA
optimization for ARM and arm64"). On x86 platforms, the host may or
may not enforce coherency of no-snoop transactions with the physical
IOMMU. But according to the above commit, on x86 a driver which
assumes that no-snoop DMA is compatible with uncached CPU mappings
will also work if the host enforces coherency.
Although these issues are not specific to virtio-iommu, it could be
used to facilitate discovery and configuration of no-snoop. This
would require a new feature bit, PROBE property and ATTACH/MAP
flags.
Cc: stable@vger.kernel.org
Fixes: e8ae0e140c05 ("vfio: Require that devices support DMA cache coherence")
Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Link: https://lore.kernel.org/r/20220825154622.86759-1-jean-philippe@linaro.org
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-08-25 15:46:24 +00:00
|
|
|
{
|
|
|
|
switch (cap) {
|
|
|
|
case IOMMU_CAP_CACHE_COHERENCY:
|
|
|
|
return true;
|
2023-11-20 14:51:57 +00:00
|
|
|
case IOMMU_CAP_DEFERRED_FLUSH:
|
|
|
|
return true;
|
iommu/virtio: Fix interaction with VFIO
Commit e8ae0e140c05 ("vfio: Require that devices support DMA cache
coherence") requires IOMMU drivers to advertise
IOMMU_CAP_CACHE_COHERENCY, in order to be used by VFIO. Since VFIO does
not provide to userspace the ability to maintain coherency through cache
invalidations, it requires hardware coherency. Advertise the capability
in order to restore VFIO support.
The meaning of IOMMU_CAP_CACHE_COHERENCY also changed from "IOMMU can
enforce cache coherent DMA transactions" to "IOMMU_CACHE is supported".
While virtio-iommu cannot enforce coherency (of PCIe no-snoop
transactions), it does support IOMMU_CACHE.
We can distinguish different cases of non-coherent DMA:
(1) When accesses from a hardware endpoint are not coherent. The host
would describe such a device using firmware methods ('dma-coherent'
in device-tree, '_CCA' in ACPI), since they are also needed without
a vIOMMU. In this case mappings are created without IOMMU_CACHE.
virtio-iommu doesn't need any additional support. It sends the same
requests as for coherent devices.
(2) When the physical IOMMU supports non-cacheable mappings. Supporting
those would require a new feature in virtio-iommu, new PROBE request
property and MAP flags. Device drivers would use a new API to
discover this since it depends on the architecture and the physical
IOMMU.
(3) When the hardware supports PCIe no-snoop. It is possible for
assigned PCIe devices to issue no-snoop transactions, and the
virtio-iommu specification is lacking any mention of this.
Arm platforms don't necessarily support no-snoop, and those that do
cannot enforce coherency of no-snoop transactions. Device drivers
must be careful about assuming that no-snoop transactions won't end
up cached; see commit e02f5c1bb228 ("drm: disable uncached DMA
optimization for ARM and arm64"). On x86 platforms, the host may or
may not enforce coherency of no-snoop transactions with the physical
IOMMU. But according to the above commit, on x86 a driver which
assumes that no-snoop DMA is compatible with uncached CPU mappings
will also work if the host enforces coherency.
Although these issues are not specific to virtio-iommu, it could be
used to facilitate discovery and configuration of no-snoop. This
would require a new feature bit, PROBE property and ATTACH/MAP
flags.
Cc: stable@vger.kernel.org
Fixes: e8ae0e140c05 ("vfio: Require that devices support DMA cache coherence")
Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Link: https://lore.kernel.org/r/20220825154622.86759-1-jean-philippe@linaro.org
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-08-25 15:46:24 +00:00
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-15 12:19:57 +00:00
|
|
|
static struct iommu_ops viommu_ops = {
|
iommu/virtio: Fix interaction with VFIO
Commit e8ae0e140c05 ("vfio: Require that devices support DMA cache
coherence") requires IOMMU drivers to advertise
IOMMU_CAP_CACHE_COHERENCY, in order to be used by VFIO. Since VFIO does
not provide to userspace the ability to maintain coherency through cache
invalidations, it requires hardware coherency. Advertise the capability
in order to restore VFIO support.
The meaning of IOMMU_CAP_CACHE_COHERENCY also changed from "IOMMU can
enforce cache coherent DMA transactions" to "IOMMU_CACHE is supported".
While virtio-iommu cannot enforce coherency (of PCIe no-snoop
transactions), it does support IOMMU_CACHE.
We can distinguish different cases of non-coherent DMA:
(1) When accesses from a hardware endpoint are not coherent. The host
would describe such a device using firmware methods ('dma-coherent'
in device-tree, '_CCA' in ACPI), since they are also needed without
a vIOMMU. In this case mappings are created without IOMMU_CACHE.
virtio-iommu doesn't need any additional support. It sends the same
requests as for coherent devices.
(2) When the physical IOMMU supports non-cacheable mappings. Supporting
those would require a new feature in virtio-iommu, new PROBE request
property and MAP flags. Device drivers would use a new API to
discover this since it depends on the architecture and the physical
IOMMU.
(3) When the hardware supports PCIe no-snoop. It is possible for
assigned PCIe devices to issue no-snoop transactions, and the
virtio-iommu specification is lacking any mention of this.
Arm platforms don't necessarily support no-snoop, and those that do
cannot enforce coherency of no-snoop transactions. Device drivers
must be careful about assuming that no-snoop transactions won't end
up cached; see commit e02f5c1bb228 ("drm: disable uncached DMA
optimization for ARM and arm64"). On x86 platforms, the host may or
may not enforce coherency of no-snoop transactions with the physical
IOMMU. But according to the above commit, on x86 a driver which
assumes that no-snoop DMA is compatible with uncached CPU mappings
will also work if the host enforces coherency.
Although these issues are not specific to virtio-iommu, it could be
used to facilitate discovery and configuration of no-snoop. This
would require a new feature bit, PROBE property and ATTACH/MAP
flags.
Cc: stable@vger.kernel.org
Fixes: e8ae0e140c05 ("vfio: Require that devices support DMA cache coherence")
Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Link: https://lore.kernel.org/r/20220825154622.86759-1-jean-philippe@linaro.org
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-08-25 15:46:24 +00:00
|
|
|
.capable = viommu_capable,
|
2019-01-15 12:19:57 +00:00
|
|
|
.domain_alloc = viommu_domain_alloc,
|
2020-04-29 13:36:58 +00:00
|
|
|
.probe_device = viommu_probe_device,
|
2021-06-18 15:21:00 +00:00
|
|
|
.probe_finalize = viommu_probe_finalize,
|
2020-04-29 13:36:58 +00:00
|
|
|
.release_device = viommu_release_device,
|
2019-01-15 12:19:57 +00:00
|
|
|
.device_group = viommu_device_group,
|
|
|
|
.get_resv_regions = viommu_get_resv_regions,
|
|
|
|
.of_xlate = viommu_of_xlate,
|
2021-04-01 13:56:25 +00:00
|
|
|
.owner = THIS_MODULE,
|
2022-02-16 02:52:49 +00:00
|
|
|
.default_domain_ops = &(const struct iommu_domain_ops) {
|
|
|
|
.attach_dev = viommu_attach_dev,
|
2022-06-05 16:11:52 +00:00
|
|
|
.map_pages = viommu_map_pages,
|
|
|
|
.unmap_pages = viommu_unmap_pages,
|
2022-02-16 02:52:49 +00:00
|
|
|
.iova_to_phys = viommu_iova_to_phys,
|
2023-11-20 14:51:57 +00:00
|
|
|
.flush_iotlb_all = viommu_flush_iotlb_all,
|
2022-02-16 02:52:49 +00:00
|
|
|
.iotlb_sync = viommu_iotlb_sync,
|
2023-11-20 14:51:56 +00:00
|
|
|
.iotlb_sync_map = viommu_iotlb_sync_map,
|
2022-02-16 02:52:49 +00:00
|
|
|
.free = viommu_domain_free,
|
|
|
|
}
|
2019-01-15 12:19:57 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static int viommu_init_vqs(struct viommu_dev *viommu)
|
|
|
|
{
|
|
|
|
struct virtio_device *vdev = dev_to_virtio(viommu->dev);
|
2019-01-15 12:19:59 +00:00
|
|
|
const char *names[] = { "request", "event" };
|
|
|
|
vq_callback_t *callbacks[] = {
|
|
|
|
NULL, /* No async requests */
|
|
|
|
viommu_event_handler,
|
|
|
|
};
|
2019-01-15 12:19:57 +00:00
|
|
|
|
2019-01-15 12:19:59 +00:00
|
|
|
return virtio_find_vqs(vdev, VIOMMU_NR_VQS, viommu->vqs, callbacks,
|
|
|
|
names, NULL);
|
|
|
|
}
|
2019-01-15 12:19:57 +00:00
|
|
|
|
2019-01-15 12:19:59 +00:00
|
|
|
static int viommu_fill_evtq(struct viommu_dev *viommu)
|
|
|
|
{
|
|
|
|
int i, ret;
|
|
|
|
struct scatterlist sg[1];
|
|
|
|
struct viommu_event *evts;
|
|
|
|
struct virtqueue *vq = viommu->vqs[VIOMMU_EVENT_VQ];
|
|
|
|
size_t nr_evts = vq->num_free;
|
|
|
|
|
|
|
|
viommu->evts = evts = devm_kmalloc_array(viommu->dev, nr_evts,
|
|
|
|
sizeof(*evts), GFP_KERNEL);
|
|
|
|
if (!evts)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
for (i = 0; i < nr_evts; i++) {
|
|
|
|
sg_init_one(sg, &evts[i], sizeof(*evts));
|
|
|
|
ret = virtqueue_add_inbuf(vq, sg, 1, &evts[i], GFP_KERNEL);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2019-01-15 12:19:57 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int viommu_probe(struct virtio_device *vdev)
|
|
|
|
{
|
|
|
|
struct device *parent_dev = vdev->dev.parent;
|
|
|
|
struct viommu_dev *viommu = NULL;
|
|
|
|
struct device *dev = &vdev->dev;
|
|
|
|
u64 input_start = 0;
|
|
|
|
u64 input_end = -1UL;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
|
|
|
|
!virtio_has_feature(vdev, VIRTIO_IOMMU_F_MAP_UNMAP))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
viommu = devm_kzalloc(dev, sizeof(*viommu), GFP_KERNEL);
|
|
|
|
if (!viommu)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
spin_lock_init(&viommu->request_lock);
|
|
|
|
ida_init(&viommu->domain_ids);
|
|
|
|
viommu->dev = dev;
|
|
|
|
viommu->vdev = vdev;
|
|
|
|
INIT_LIST_HEAD(&viommu->requests);
|
|
|
|
|
|
|
|
ret = viommu_init_vqs(viommu);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2020-08-05 09:39:36 +00:00
|
|
|
virtio_cread_le(vdev, struct virtio_iommu_config, page_size_mask,
|
|
|
|
&viommu->pgsize_bitmap);
|
2019-01-15 12:19:57 +00:00
|
|
|
|
|
|
|
if (!viommu->pgsize_bitmap) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto err_free_vqs;
|
|
|
|
}
|
|
|
|
|
2019-07-22 14:40:07 +00:00
|
|
|
viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
|
|
|
|
viommu->last_domain = ~0U;
|
2019-01-15 12:19:57 +00:00
|
|
|
|
|
|
|
/* Optional features */
|
2020-08-05 09:39:36 +00:00
|
|
|
virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
|
|
|
|
struct virtio_iommu_config, input_range.start,
|
|
|
|
&input_start);
|
2019-01-15 12:19:57 +00:00
|
|
|
|
2020-08-05 09:39:36 +00:00
|
|
|
virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
|
|
|
|
struct virtio_iommu_config, input_range.end,
|
|
|
|
&input_end);
|
2019-01-15 12:19:57 +00:00
|
|
|
|
2020-08-05 09:39:36 +00:00
|
|
|
virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
|
|
|
|
struct virtio_iommu_config, domain_range.start,
|
|
|
|
&viommu->first_domain);
|
2019-07-22 14:40:07 +00:00
|
|
|
|
2020-08-05 09:39:36 +00:00
|
|
|
virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
|
|
|
|
struct virtio_iommu_config, domain_range.end,
|
|
|
|
&viommu->last_domain);
|
2019-01-15 12:19:57 +00:00
|
|
|
|
2020-08-05 09:39:36 +00:00
|
|
|
virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_PROBE,
|
|
|
|
struct virtio_iommu_config, probe_size,
|
|
|
|
&viommu->probe_size);
|
2019-01-15 12:19:58 +00:00
|
|
|
|
2019-01-15 12:19:57 +00:00
|
|
|
viommu->geometry = (struct iommu_domain_geometry) {
|
|
|
|
.aperture_start = input_start,
|
|
|
|
.aperture_end = input_end,
|
|
|
|
.force_aperture = true,
|
|
|
|
};
|
|
|
|
|
2019-07-22 14:40:07 +00:00
|
|
|
if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO))
|
|
|
|
viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO;
|
|
|
|
|
2019-01-15 12:19:57 +00:00
|
|
|
viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
|
|
|
|
|
|
|
|
virtio_device_ready(vdev);
|
|
|
|
|
2019-01-15 12:19:59 +00:00
|
|
|
/* Populate the event queue with buffers */
|
|
|
|
ret = viommu_fill_evtq(viommu);
|
|
|
|
if (ret)
|
|
|
|
goto err_free_vqs;
|
|
|
|
|
2019-01-15 12:19:57 +00:00
|
|
|
ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s",
|
|
|
|
virtio_bus_name(vdev));
|
|
|
|
if (ret)
|
|
|
|
goto err_free_vqs;
|
|
|
|
|
2021-04-01 13:56:26 +00:00
|
|
|
iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev);
|
2019-01-15 12:19:57 +00:00
|
|
|
|
|
|
|
vdev->priv = viommu;
|
|
|
|
|
|
|
|
dev_info(dev, "input address: %u bits\n",
|
|
|
|
order_base_2(viommu->geometry.aperture_end));
|
|
|
|
dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_free_vqs:
|
|
|
|
vdev->config->del_vqs(vdev);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void viommu_remove(struct virtio_device *vdev)
|
|
|
|
{
|
|
|
|
struct viommu_dev *viommu = vdev->priv;
|
|
|
|
|
|
|
|
iommu_device_sysfs_remove(&viommu->iommu);
|
|
|
|
iommu_device_unregister(&viommu->iommu);
|
|
|
|
|
|
|
|
/* Stop all virtqueues */
|
2021-10-13 10:55:44 +00:00
|
|
|
virtio_reset_device(vdev);
|
2019-01-15 12:19:57 +00:00
|
|
|
vdev->config->del_vqs(vdev);
|
|
|
|
|
|
|
|
dev_info(&vdev->dev, "device removed\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void viommu_config_changed(struct virtio_device *vdev)
|
|
|
|
{
|
|
|
|
dev_warn(&vdev->dev, "config changed\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int features[] = {
|
|
|
|
VIRTIO_IOMMU_F_MAP_UNMAP,
|
|
|
|
VIRTIO_IOMMU_F_INPUT_RANGE,
|
2019-07-22 14:40:07 +00:00
|
|
|
VIRTIO_IOMMU_F_DOMAIN_RANGE,
|
2019-01-15 12:19:58 +00:00
|
|
|
VIRTIO_IOMMU_F_PROBE,
|
2019-07-22 14:40:07 +00:00
|
|
|
VIRTIO_IOMMU_F_MMIO,
|
2021-12-01 17:33:22 +00:00
|
|
|
VIRTIO_IOMMU_F_BYPASS_CONFIG,
|
2019-01-15 12:19:57 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct virtio_device_id id_table[] = {
|
|
|
|
{ VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID },
|
|
|
|
{ 0 },
|
|
|
|
};
|
2021-05-08 03:14:51 +00:00
|
|
|
MODULE_DEVICE_TABLE(virtio, id_table);
|
2019-01-15 12:19:57 +00:00
|
|
|
|
|
|
|
static struct virtio_driver virtio_iommu_drv = {
|
|
|
|
.driver.name = KBUILD_MODNAME,
|
|
|
|
.driver.owner = THIS_MODULE,
|
|
|
|
.id_table = id_table,
|
|
|
|
.feature_table = features,
|
|
|
|
.feature_table_size = ARRAY_SIZE(features),
|
|
|
|
.probe = viommu_probe,
|
|
|
|
.remove = viommu_remove,
|
|
|
|
.config_changed = viommu_config_changed,
|
|
|
|
};
|
|
|
|
|
|
|
|
module_virtio_driver(virtio_iommu_drv);
|
|
|
|
|
|
|
|
MODULE_DESCRIPTION("Virtio IOMMU driver");
|
|
|
|
MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");
|
|
|
|
MODULE_LICENSE("GPL v2");
|