mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 04:31:50 +00:00
iommu: Bind process address spaces to devices
Add bind() and unbind() operations to the IOMMU API. iommu_sva_bind_device() binds a device to an mm, and returns a handle to the bond, which is released by calling iommu_sva_unbind_device(). Each mm bound to devices gets a PASID (by convention, a 20-bit system-wide ID representing the address space), which can be retrieved with iommu_sva_get_pasid(). When programming DMA addresses, device drivers include this PASID in a device-specific manner, to let the device access the given address space. Since the process memory may be paged out, device and IOMMU must support I/O page faults (e.g. PCI PRI). Using iommu_sva_set_ops(), device drivers provide an mm_exit() callback that is called by the IOMMU driver if the process exits before the device driver called unbind(). In mm_exit(), device driver should disable DMA from the given context, so that the core IOMMU can reallocate the PASID. Whether the process exited or nor, the device driver should always release the handle with unbind(). To use these functions, device driver must first enable the IOMMU_DEV_FEAT_SVA device feature with iommu_dev_enable_feature(). Signed-off-by: Jean-Philippe Brucker <jean-philippe.brucker@arm.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
a3a195929d
commit
26b25a2b98
@ -2135,3 +2135,107 @@ int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
|
||||
|
||||
/**
|
||||
* iommu_sva_bind_device() - Bind a process address space to a device
|
||||
* @dev: the device
|
||||
* @mm: the mm to bind, caller must hold a reference to it
|
||||
*
|
||||
* Create a bond between device and address space, allowing the device to access
|
||||
* the mm using the returned PASID. If a bond already exists between @device and
|
||||
* @mm, it is returned and an additional reference is taken. Caller must call
|
||||
* iommu_sva_unbind_device() to release each reference.
|
||||
*
|
||||
* iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
|
||||
* initialize the required SVA features.
|
||||
*
|
||||
* On error, returns an ERR_PTR value.
|
||||
*/
|
||||
struct iommu_sva *
|
||||
iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
|
||||
{
|
||||
struct iommu_group *group;
|
||||
struct iommu_sva *handle = ERR_PTR(-EINVAL);
|
||||
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||
|
||||
if (!ops || !ops->sva_bind)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
group = iommu_group_get(dev);
|
||||
if (!group)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
/* Ensure device count and domain don't change while we're binding */
|
||||
mutex_lock(&group->mutex);
|
||||
|
||||
/*
|
||||
* To keep things simple, SVA currently doesn't support IOMMU groups
|
||||
* with more than one device. Existing SVA-capable systems are not
|
||||
* affected by the problems that required IOMMU groups (lack of ACS
|
||||
* isolation, device ID aliasing and other hardware issues).
|
||||
*/
|
||||
if (iommu_group_device_count(group) != 1)
|
||||
goto out_unlock;
|
||||
|
||||
handle = ops->sva_bind(dev, mm, drvdata);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&group->mutex);
|
||||
iommu_group_put(group);
|
||||
|
||||
return handle;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
|
||||
|
||||
/**
|
||||
* iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
|
||||
* @handle: the handle returned by iommu_sva_bind_device()
|
||||
*
|
||||
* Put reference to a bond between device and address space. The device should
|
||||
* not be issuing any more transaction for this PASID. All outstanding page
|
||||
* requests for this PASID must have been flushed to the IOMMU.
|
||||
*
|
||||
* Returns 0 on success, or an error value
|
||||
*/
|
||||
void iommu_sva_unbind_device(struct iommu_sva *handle)
|
||||
{
|
||||
struct iommu_group *group;
|
||||
struct device *dev = handle->dev;
|
||||
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||
|
||||
if (!ops || !ops->sva_unbind)
|
||||
return;
|
||||
|
||||
group = iommu_group_get(dev);
|
||||
if (!group)
|
||||
return;
|
||||
|
||||
mutex_lock(&group->mutex);
|
||||
ops->sva_unbind(handle);
|
||||
mutex_unlock(&group->mutex);
|
||||
|
||||
iommu_group_put(group);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
|
||||
|
||||
int iommu_sva_set_ops(struct iommu_sva *handle,
|
||||
const struct iommu_sva_ops *sva_ops)
|
||||
{
|
||||
if (handle->ops && handle->ops != sva_ops)
|
||||
return -EEXIST;
|
||||
|
||||
handle->ops = sva_ops;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_sva_set_ops);
|
||||
|
||||
int iommu_sva_get_pasid(struct iommu_sva *handle)
|
||||
{
|
||||
const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
|
||||
|
||||
if (!ops || !ops->sva_get_pasid)
|
||||
return IOMMU_PASID_INVALID;
|
||||
|
||||
return ops->sva_get_pasid(handle);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
|
||||
|
@ -48,6 +48,7 @@ struct bus_type;
|
||||
struct device;
|
||||
struct iommu_domain;
|
||||
struct notifier_block;
|
||||
struct iommu_sva;
|
||||
|
||||
/* iommu fault flags */
|
||||
#define IOMMU_FAULT_READ 0x0
|
||||
@ -55,6 +56,8 @@ struct notifier_block;
|
||||
|
||||
typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
|
||||
struct device *, unsigned long, int, void *);
|
||||
typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *,
|
||||
void *);
|
||||
|
||||
struct iommu_domain_geometry {
|
||||
dma_addr_t aperture_start; /* First address that can be mapped */
|
||||
@ -159,6 +162,28 @@ struct iommu_resv_region {
|
||||
/* Per device IOMMU features */
|
||||
enum iommu_dev_features {
|
||||
IOMMU_DEV_FEAT_AUX, /* Aux-domain feature */
|
||||
IOMMU_DEV_FEAT_SVA, /* Shared Virtual Addresses */
|
||||
};
|
||||
|
||||
#define IOMMU_PASID_INVALID (-1U)
|
||||
|
||||
/**
|
||||
* struct iommu_sva_ops - device driver callbacks for an SVA context
|
||||
*
|
||||
* @mm_exit: called when the mm is about to be torn down by exit_mmap. After
|
||||
* @mm_exit returns, the device must not issue any more transaction
|
||||
* with the PASID given as argument.
|
||||
*
|
||||
* The @mm_exit handler is allowed to sleep. Be careful about the
|
||||
* locks taken in @mm_exit, because they might lead to deadlocks if
|
||||
* they are also held when dropping references to the mm. Consider the
|
||||
* following call chain:
|
||||
* mutex_lock(A); mmput(mm) -> exit_mm() -> @mm_exit() -> mutex_lock(A)
|
||||
* Using mmput_async() prevents this scenario.
|
||||
*
|
||||
*/
|
||||
struct iommu_sva_ops {
|
||||
iommu_mm_exit_handler_t mm_exit;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_IOMMU_API
|
||||
@ -196,6 +221,9 @@ enum iommu_dev_features {
|
||||
* @dev_feat_enabled: check enabled feature
|
||||
* @aux_attach/detach_dev: aux-domain specific attach/detach entries.
|
||||
* @aux_get_pasid: get the pasid given an aux-domain
|
||||
* @sva_bind: Bind process address space to device
|
||||
* @sva_unbind: Unbind process address space from device
|
||||
* @sva_get_pasid: Get PASID associated to a SVA handle
|
||||
* @pgsize_bitmap: bitmap of all possible supported page sizes
|
||||
*/
|
||||
struct iommu_ops {
|
||||
@ -251,6 +279,11 @@ struct iommu_ops {
|
||||
void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev);
|
||||
int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev);
|
||||
|
||||
struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm,
|
||||
void *drvdata);
|
||||
void (*sva_unbind)(struct iommu_sva *handle);
|
||||
int (*sva_get_pasid)(struct iommu_sva *handle);
|
||||
|
||||
unsigned long pgsize_bitmap;
|
||||
};
|
||||
|
||||
@ -417,6 +450,14 @@ struct iommu_fwspec {
|
||||
u32 ids[1];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iommu_sva - handle to a device-mm bond
|
||||
*/
|
||||
struct iommu_sva {
|
||||
struct device *dev;
|
||||
const struct iommu_sva_ops *ops;
|
||||
};
|
||||
|
||||
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
|
||||
const struct iommu_ops *ops);
|
||||
void iommu_fwspec_free(struct device *dev);
|
||||
@ -445,6 +486,14 @@ int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev);
|
||||
void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev);
|
||||
int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev);
|
||||
|
||||
struct iommu_sva *iommu_sva_bind_device(struct device *dev,
|
||||
struct mm_struct *mm,
|
||||
void *drvdata);
|
||||
void iommu_sva_unbind_device(struct iommu_sva *handle);
|
||||
int iommu_sva_set_ops(struct iommu_sva *handle,
|
||||
const struct iommu_sva_ops *ops);
|
||||
int iommu_sva_get_pasid(struct iommu_sva *handle);
|
||||
|
||||
#else /* CONFIG_IOMMU_API */
|
||||
|
||||
struct iommu_ops {};
|
||||
@ -770,6 +819,27 @@ iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline struct iommu_sva *
|
||||
iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int iommu_sva_set_ops(struct iommu_sva *handle,
|
||||
const struct iommu_sva_ops *ops)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int iommu_sva_get_pasid(struct iommu_sva *handle)
|
||||
{
|
||||
return IOMMU_PASID_INVALID;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_IOMMU_API */
|
||||
|
||||
#ifdef CONFIG_IOMMU_DEBUGFS
|
||||
|
Loading…
Reference in New Issue
Block a user