mirror of
https://github.com/torvalds/linux.git
synced 2024-12-15 07:33:56 +00:00
iommu/vt-d: Add page request draining support
When a PASID is stopped or terminated, there can be pending PRQs (requests that haven't received responses) in remapping hardware. This adds the interface to drain page requests and call it when a PASID is terminated. Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com> Signed-off-by: Liu Yi L <yi.l.liu@intel.com> Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/20200516062101.29541-16-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
37e91bd4b3
commit
66ac4db36f
@ -23,6 +23,7 @@
|
||||
#include "intel-pasid.h"
|
||||
|
||||
static irqreturn_t prq_event_thread(int irq, void *d);
|
||||
static void intel_svm_drain_prq(struct device *dev, int pasid);
|
||||
|
||||
#define PRQ_ORDER 0
|
||||
|
||||
@ -66,6 +67,8 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
|
||||
dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
|
||||
dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER);
|
||||
|
||||
init_completion(&iommu->prq_complete);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -399,12 +402,8 @@ int intel_svm_unbind_gpasid(struct device *dev, int pasid)
|
||||
list_del_rcu(&sdev->list);
|
||||
intel_pasid_tear_down_entry(iommu, dev,
|
||||
svm->pasid, false);
|
||||
intel_svm_drain_prq(dev, svm->pasid);
|
||||
intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
|
||||
/* TODO: Drain in flight PRQ for the PASID since it
|
||||
* may get reused soon, we don't want to
|
||||
* confuse with its previous life.
|
||||
* intel_svm_drain_prq(dev, pasid);
|
||||
*/
|
||||
kfree_rcu(sdev, rcu);
|
||||
|
||||
if (list_empty(&svm->devs)) {
|
||||
@ -643,6 +642,7 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
|
||||
* hard to be as defensive as we might like. */
|
||||
intel_pasid_tear_down_entry(iommu, dev,
|
||||
svm->pasid, false);
|
||||
intel_svm_drain_prq(dev, svm->pasid);
|
||||
intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
|
||||
kfree_rcu(sdev, rcu);
|
||||
|
||||
@ -721,6 +721,93 @@ static bool is_canonical_address(u64 addr)
|
||||
return (((saddr << shift) >> shift) == saddr);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_svm_drain_prq - Drain page requests and responses for a pasid
|
||||
* @dev: target device
|
||||
* @pasid: pasid for draining
|
||||
*
|
||||
* Drain all pending page requests and responses related to @pasid in both
|
||||
* software and hardware. This is supposed to be called after the device
|
||||
* driver has stopped DMA, the pasid entry has been cleared, and both IOTLB
|
||||
* and DevTLB have been invalidated.
|
||||
*
|
||||
* It waits until all pending page requests for @pasid in the page fault
|
||||
* queue are completed by the prq handling thread. Then follow the steps
|
||||
* described in VT-d spec CH7.10 to drain all page requests and page
|
||||
* responses pending in the hardware.
|
||||
*/
|
||||
static void intel_svm_drain_prq(struct device *dev, int pasid)
|
||||
{
|
||||
struct device_domain_info *info;
|
||||
struct dmar_domain *domain;
|
||||
struct intel_iommu *iommu;
|
||||
struct qi_desc desc[3];
|
||||
struct pci_dev *pdev;
|
||||
int head, tail;
|
||||
u16 sid, did;
|
||||
int qdep;
|
||||
|
||||
info = get_domain_info(dev);
|
||||
if (WARN_ON(!info || !dev_is_pci(dev)))
|
||||
return;
|
||||
|
||||
if (!info->pri_enabled)
|
||||
return;
|
||||
|
||||
iommu = info->iommu;
|
||||
domain = info->domain;
|
||||
pdev = to_pci_dev(dev);
|
||||
sid = PCI_DEVID(info->bus, info->devfn);
|
||||
did = domain->iommu_did[iommu->seq_id];
|
||||
qdep = pci_ats_queue_depth(pdev);
|
||||
|
||||
/*
|
||||
* Check and wait until all pending page requests in the queue are
|
||||
* handled by the prq handling thread.
|
||||
*/
|
||||
prq_retry:
|
||||
reinit_completion(&iommu->prq_complete);
|
||||
tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
|
||||
head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
|
||||
while (head != tail) {
|
||||
struct page_req_dsc *req;
|
||||
|
||||
req = &iommu->prq[head / sizeof(*req)];
|
||||
if (!req->pasid_present || req->pasid != pasid) {
|
||||
head = (head + sizeof(*req)) & PRQ_RING_MASK;
|
||||
continue;
|
||||
}
|
||||
|
||||
wait_for_completion(&iommu->prq_complete);
|
||||
goto prq_retry;
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform steps described in VT-d spec CH7.10 to drain page
|
||||
* requests and responses in hardware.
|
||||
*/
|
||||
memset(desc, 0, sizeof(desc));
|
||||
desc[0].qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
|
||||
QI_IWD_FENCE |
|
||||
QI_IWD_TYPE;
|
||||
desc[1].qw0 = QI_EIOTLB_PASID(pasid) |
|
||||
QI_EIOTLB_DID(did) |
|
||||
QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
|
||||
QI_EIOTLB_TYPE;
|
||||
desc[2].qw0 = QI_DEV_EIOTLB_PASID(pasid) |
|
||||
QI_DEV_EIOTLB_SID(sid) |
|
||||
QI_DEV_EIOTLB_QDEP(qdep) |
|
||||
QI_DEIOTLB_TYPE |
|
||||
QI_DEV_IOTLB_PFSID(info->pfsid);
|
||||
qi_retry:
|
||||
reinit_completion(&iommu->prq_complete);
|
||||
qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN);
|
||||
if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
|
||||
wait_for_completion(&iommu->prq_complete);
|
||||
goto qi_retry;
|
||||
}
|
||||
}
|
||||
|
||||
static irqreturn_t prq_event_thread(int irq, void *d)
|
||||
{
|
||||
struct intel_iommu *iommu = d;
|
||||
@ -856,6 +943,16 @@ static irqreturn_t prq_event_thread(int irq, void *d)
|
||||
|
||||
dmar_writeq(iommu->reg + DMAR_PQH_REG, tail);
|
||||
|
||||
/*
|
||||
* Clear the page request overflow bit and wake up all threads that
|
||||
* are waiting for the completion of this handling.
|
||||
*/
|
||||
if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO)
|
||||
writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG);
|
||||
|
||||
if (!completion_done(&iommu->prq_complete))
|
||||
complete(&iommu->prq_complete);
|
||||
|
||||
return IRQ_RETVAL(handled);
|
||||
}
|
||||
|
||||
|
@ -292,6 +292,8 @@
|
||||
|
||||
/* PRS_REG */
|
||||
#define DMA_PRS_PPR ((u32)1)
|
||||
#define DMA_PRS_PRO ((u32)2)
|
||||
|
||||
#define DMA_VCS_PAS ((u64)1)
|
||||
|
||||
#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
|
||||
@ -333,6 +335,7 @@ enum {
|
||||
|
||||
#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32)
|
||||
#define QI_IWD_STATUS_WRITE (((u64)1) << 5)
|
||||
#define QI_IWD_FENCE (((u64)1) << 6)
|
||||
#define QI_IWD_PRQ_DRAIN (((u64)1) << 7)
|
||||
|
||||
#define QI_IOTLB_DID(did) (((u64)did) << 16)
|
||||
@ -582,6 +585,7 @@ struct intel_iommu {
|
||||
#ifdef CONFIG_INTEL_IOMMU_SVM
|
||||
struct page_req_dsc *prq;
|
||||
unsigned char prq_name[16]; /* Name for PRQ interrupt */
|
||||
struct completion prq_complete;
|
||||
struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */
|
||||
#endif
|
||||
struct q_inval *qi; /* Queued invalidation info */
|
||||
|
Loading…
Reference in New Issue
Block a user