mirror of
https://github.com/torvalds/linux.git
synced 2024-11-29 07:31:29 +00:00
iommu: Make iommu_queue_iopf() more generic
Make iommu_queue_iopf() more generic by making the iopf_group a minimal set of iopf's that an iopf handler of domain should handle and respond to. Add domain parameter to struct iopf_group so that the handler can retrieve and use it directly. Change iommu_queue_iopf() to forward groups of iopf's to the domain's iopf handler. This is also a necessary step to decouple the sva iopf handling code from this interface. Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Yi Liu <yi.l.liu@intel.com> Tested-by: Yan Zhao <yan.y.zhao@intel.com> Tested-by: Longfang Liu <liulongfang@huawei.com> Link: https://lore.kernel.org/r/20240212012227.119381-10-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
24b5d268b5
commit
351ffcb11c
@ -13,6 +13,9 @@
|
||||
|
||||
#include "iommu-sva.h"
|
||||
|
||||
enum iommu_page_response_code
|
||||
iommu_sva_handle_mm(struct iommu_fault *fault, struct mm_struct *mm);
|
||||
|
||||
static void iopf_free_group(struct iopf_group *group)
|
||||
{
|
||||
struct iopf_fault *iopf, *next;
|
||||
@ -45,29 +48,48 @@ static void iopf_handler(struct work_struct *work)
|
||||
{
|
||||
struct iopf_fault *iopf;
|
||||
struct iopf_group *group;
|
||||
struct iommu_domain *domain;
|
||||
enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
|
||||
|
||||
group = container_of(work, struct iopf_group, work);
|
||||
domain = iommu_get_domain_for_dev_pasid(group->dev,
|
||||
group->last_fault.fault.prm.pasid, 0);
|
||||
if (!domain || !domain->iopf_handler)
|
||||
status = IOMMU_PAGE_RESP_INVALID;
|
||||
|
||||
list_for_each_entry(iopf, &group->faults, list) {
|
||||
/*
|
||||
* For the moment, errors are sticky: don't handle subsequent
|
||||
* faults in the group if there is an error.
|
||||
*/
|
||||
if (status == IOMMU_PAGE_RESP_SUCCESS)
|
||||
status = domain->iopf_handler(&iopf->fault,
|
||||
domain->fault_data);
|
||||
if (status != IOMMU_PAGE_RESP_SUCCESS)
|
||||
break;
|
||||
|
||||
status = iommu_sva_handle_mm(&iopf->fault, group->domain->mm);
|
||||
}
|
||||
|
||||
iopf_complete_group(group->dev, &group->last_fault, status);
|
||||
iopf_free_group(group);
|
||||
}
|
||||
|
||||
static struct iommu_domain *get_domain_for_iopf(struct device *dev,
|
||||
struct iommu_fault *fault)
|
||||
{
|
||||
struct iommu_domain *domain;
|
||||
|
||||
if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) {
|
||||
domain = iommu_get_domain_for_dev_pasid(dev, fault->prm.pasid, 0);
|
||||
if (IS_ERR(domain))
|
||||
domain = NULL;
|
||||
} else {
|
||||
domain = iommu_get_domain_for_dev(dev);
|
||||
}
|
||||
|
||||
if (!domain || !domain->iopf_handler) {
|
||||
dev_warn_ratelimited(dev,
|
||||
"iopf (pasid %d) without domain attached or handler installed\n",
|
||||
fault->prm.pasid);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return domain;
|
||||
}
|
||||
|
||||
/**
|
||||
* iommu_queue_iopf - IO Page Fault handler
|
||||
* @fault: fault event
|
||||
@ -112,6 +134,7 @@ int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev)
|
||||
{
|
||||
int ret;
|
||||
struct iopf_group *group;
|
||||
struct iommu_domain *domain;
|
||||
struct iopf_fault *iopf, *next;
|
||||
struct iommu_fault_param *iopf_param;
|
||||
struct dev_iommu *param = dev->iommu;
|
||||
@ -143,6 +166,12 @@ int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
domain = get_domain_for_iopf(dev, fault);
|
||||
if (!domain) {
|
||||
ret = -EINVAL;
|
||||
goto cleanup_partial;
|
||||
}
|
||||
|
||||
group = kzalloc(sizeof(*group), GFP_KERNEL);
|
||||
if (!group) {
|
||||
/*
|
||||
@ -157,8 +186,8 @@ int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev)
|
||||
group->dev = dev;
|
||||
group->last_fault.fault = *fault;
|
||||
INIT_LIST_HEAD(&group->faults);
|
||||
group->domain = domain;
|
||||
list_add(&group->last_fault.list, &group->faults);
|
||||
INIT_WORK(&group->work, iopf_handler);
|
||||
|
||||
/* See if we have partial faults for this group */
|
||||
list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
|
||||
@ -167,9 +196,13 @@ int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev)
|
||||
list_move(&iopf->list, &group->faults);
|
||||
}
|
||||
|
||||
queue_work(iopf_param->queue->wq, &group->work);
|
||||
return 0;
|
||||
mutex_unlock(&iopf_param->lock);
|
||||
ret = domain->iopf_handler(group);
|
||||
mutex_lock(&iopf_param->lock);
|
||||
if (ret)
|
||||
iopf_free_group(group);
|
||||
|
||||
return ret;
|
||||
cleanup_partial:
|
||||
list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
|
||||
if (iopf->fault.prm.grpid == fault->prm.grpid) {
|
||||
@ -181,6 +214,17 @@ cleanup_partial:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_queue_iopf);
|
||||
|
||||
int iommu_sva_handle_iopf(struct iopf_group *group)
|
||||
{
|
||||
struct iommu_fault_param *fault_param = group->dev->iommu->fault_param;
|
||||
|
||||
INIT_WORK(&group->work, iopf_handler);
|
||||
if (!queue_work(fault_param->queue->wq, &group->work))
|
||||
return -EBUSY;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* iopf_queue_flush_dev - Ensure that all queued faults have been processed
|
||||
* @dev: the endpoint whose faults need to be flushed.
|
||||
|
@ -163,11 +163,10 @@ EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
|
||||
* I/O page fault handler for SVA
|
||||
*/
|
||||
enum iommu_page_response_code
|
||||
iommu_sva_handle_iopf(struct iommu_fault *fault, void *data)
|
||||
iommu_sva_handle_mm(struct iommu_fault *fault, struct mm_struct *mm)
|
||||
{
|
||||
vm_fault_t ret;
|
||||
struct vm_area_struct *vma;
|
||||
struct mm_struct *mm = data;
|
||||
unsigned int access_flags = 0;
|
||||
unsigned int fault_flags = FAULT_FLAG_REMOTE;
|
||||
struct iommu_fault_page_request *prm = &fault->prm;
|
||||
|
@ -22,8 +22,7 @@ int iopf_queue_flush_dev(struct device *dev);
|
||||
struct iopf_queue *iopf_queue_alloc(const char *name);
|
||||
void iopf_queue_free(struct iopf_queue *queue);
|
||||
int iopf_queue_discard_partial(struct iopf_queue *queue);
|
||||
enum iommu_page_response_code
|
||||
iommu_sva_handle_iopf(struct iommu_fault *fault, void *data);
|
||||
int iommu_sva_handle_iopf(struct iopf_group *group);
|
||||
|
||||
#else /* CONFIG_IOMMU_SVA */
|
||||
static inline int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev)
|
||||
@ -62,8 +61,7 @@ static inline int iopf_queue_discard_partial(struct iopf_queue *queue)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline enum iommu_page_response_code
|
||||
iommu_sva_handle_iopf(struct iommu_fault *fault, void *data)
|
||||
static inline int iommu_sva_handle_iopf(struct iopf_group *group)
|
||||
{
|
||||
return IOMMU_PAGE_RESP_INVALID;
|
||||
}
|
||||
|
@ -130,6 +130,7 @@ struct iopf_group {
|
||||
struct list_head faults;
|
||||
struct work_struct work;
|
||||
struct device *dev;
|
||||
struct iommu_domain *domain;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -209,8 +210,7 @@ struct iommu_domain {
|
||||
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
|
||||
struct iommu_domain_geometry geometry;
|
||||
struct iommu_dma_cookie *iova_cookie;
|
||||
enum iommu_page_response_code (*iopf_handler)(struct iommu_fault *fault,
|
||||
void *data);
|
||||
int (*iopf_handler)(struct iopf_group *group);
|
||||
void *fault_data;
|
||||
union {
|
||||
struct {
|
||||
|
Loading…
Reference in New Issue
Block a user