forked from Minki/linux
iommu/amd: Remove unnecessary locking from AMD iommu driver
With or without locking it doesn't make sense for two writers to be writing to the same IOVA range at the same time. Even with locking we still have a race condition, whoever gets the lock first, so we still can't be sure what the result will be. With locking the result will be more sane, it will be correct for the last writer, but still useless because we can't be sure which writer will get the lock last. It's a fundamentally broken design to have two writers writing to the same IOVA range at the same time. So we can remove the locking and work on the assumption that no two writers will be writing to the same IOVA range at the same time. The only exception is when we have to allocate a middle page in the page tables, the middle page can cover more than just the IOVA range a writer has been allocated. However this isn't an issue in the AMD driver because it can atomically allocate middle pages using "cmpxchg64()". Signed-off-by: Tom Murphy <murphyt7@tcd.ie> Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
4f5cafb5cb
commit
37ec8eb851
@ -2934,7 +2934,6 @@ static void protection_domain_free(struct protection_domain *domain)
|
||||
static int protection_domain_init(struct protection_domain *domain)
|
||||
{
|
||||
spin_lock_init(&domain->lock);
|
||||
mutex_init(&domain->api_lock);
|
||||
domain->id = domain_id_alloc();
|
||||
if (!domain->id)
|
||||
return -ENOMEM;
|
||||
@ -3121,9 +3120,7 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
|
||||
if (iommu_prot & IOMMU_WRITE)
|
||||
prot |= IOMMU_PROT_IW;
|
||||
|
||||
mutex_lock(&domain->api_lock);
|
||||
ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL);
|
||||
mutex_unlock(&domain->api_lock);
|
||||
|
||||
domain_flush_np_cache(domain, iova, page_size);
|
||||
|
||||
@ -3135,16 +3132,11 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
|
||||
struct iommu_iotlb_gather *gather)
|
||||
{
|
||||
struct protection_domain *domain = to_pdomain(dom);
|
||||
size_t unmap_size;
|
||||
|
||||
if (domain->mode == PAGE_MODE_NONE)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&domain->api_lock);
|
||||
unmap_size = iommu_unmap_page(domain, iova, page_size);
|
||||
mutex_unlock(&domain->api_lock);
|
||||
|
||||
return unmap_size;
|
||||
return iommu_unmap_page(domain, iova, page_size);
|
||||
}
|
||||
|
||||
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
|
||||
|
@ -468,7 +468,6 @@ struct protection_domain {
|
||||
struct iommu_domain domain; /* generic domain handle used by
|
||||
iommu core code */
|
||||
spinlock_t lock; /* mostly used to lock the page table*/
|
||||
struct mutex api_lock; /* protect page tables in the iommu-api path */
|
||||
u16 id; /* the domain id written to the device table */
|
||||
int mode; /* paging mode (0-6 levels) */
|
||||
u64 *pt_root; /* page table root pointer */
|
||||
|
Loading…
Reference in New Issue
Block a user