swiotlb: search the software IO TLB only if the device makes use of it

Skip searching the software IO TLB if a device has never used it, making
sure these devices are not affected by the introduction of multiple IO TLB
memory pools.

Additional memory barrier is required to ensure that the new value of the
flag is visible to other CPUs after mapping a new bounce buffer. For
efficiency, the flag check should be inlined, and then the memory barrier
must be moved to is_swiotlb_buffer(). However, it can replace the existing
barrier in swiotlb_find_pool(), because all callers use is_swiotlb_buffer()
first to verify that the buffer address belongs to the software IO TLB.

Signed-off-by: Petr Tesarik <petr.tesarik.ext@huawei.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Petr Tesarik 2023-08-01 08:24:04 +02:00 committed by Christoph Hellwig
parent 1aaa736815
commit 1395706a14
3 changed files with 14 additions and 9 deletions

View File

@ -628,6 +628,7 @@ struct device_physical_location {
* @dma_io_tlb_mem: Software IO TLB allocator. Not for driver use. * @dma_io_tlb_mem: Software IO TLB allocator. Not for driver use.
* @dma_io_tlb_pools: List of transient swiotlb memory pools. * @dma_io_tlb_pools: List of transient swiotlb memory pools.
* @dma_io_tlb_lock: Protects changes to the list of active pools. * @dma_io_tlb_lock: Protects changes to the list of active pools.
* @dma_uses_io_tlb: %true if device has used the software IO TLB.
* @archdata: For arch-specific additions. * @archdata: For arch-specific additions.
* @of_node: Associated device tree node. * @of_node: Associated device tree node.
* @fwnode: Associated device node supplied by platform firmware. * @fwnode: Associated device node supplied by platform firmware.
@ -737,6 +738,7 @@ struct device {
#ifdef CONFIG_SWIOTLB_DYNAMIC #ifdef CONFIG_SWIOTLB_DYNAMIC
struct list_head dma_io_tlb_pools; struct list_head dma_io_tlb_pools;
spinlock_t dma_io_tlb_lock; spinlock_t dma_io_tlb_lock;
bool dma_uses_io_tlb;
#endif #endif
/* arch specific additions */ /* arch specific additions */
struct dev_archdata archdata; struct dev_archdata archdata;

View File

@ -172,8 +172,13 @@ static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
if (!mem) if (!mem)
return false; return false;
if (IS_ENABLED(CONFIG_SWIOTLB_DYNAMIC)) if (IS_ENABLED(CONFIG_SWIOTLB_DYNAMIC)) {
/* Pairs with smp_wmb() in swiotlb_find_slots() and
* swiotlb_dyn_alloc(), which modify the RCU lists.
*/
smp_rmb();
return swiotlb_find_pool(dev, paddr); return swiotlb_find_pool(dev, paddr);
}
return paddr >= mem->defpool.start && paddr < mem->defpool.end; return paddr >= mem->defpool.start && paddr < mem->defpool.end;
} }

View File

@ -730,7 +730,7 @@ static void swiotlb_dyn_alloc(struct work_struct *work)
add_mem_pool(mem, pool); add_mem_pool(mem, pool);
/* Pairs with smp_rmb() in swiotlb_find_pool(). */ /* Pairs with smp_rmb() in is_swiotlb_buffer(). */
smp_wmb(); smp_wmb();
} }
@ -764,11 +764,6 @@ struct io_tlb_pool *swiotlb_find_pool(struct device *dev, phys_addr_t paddr)
struct io_tlb_mem *mem = dev->dma_io_tlb_mem; struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
struct io_tlb_pool *pool; struct io_tlb_pool *pool;
/* Pairs with smp_wmb() in swiotlb_find_slots() and
* swiotlb_dyn_alloc(), which modify the RCU lists.
*/
smp_rmb();
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(pool, &mem->pools, node) { list_for_each_entry_rcu(pool, &mem->pools, node) {
if (paddr >= pool->start && paddr < pool->end) if (paddr >= pool->start && paddr < pool->end)
@ -813,6 +808,7 @@ void swiotlb_dev_init(struct device *dev)
#ifdef CONFIG_SWIOTLB_DYNAMIC #ifdef CONFIG_SWIOTLB_DYNAMIC
INIT_LIST_HEAD(&dev->dma_io_tlb_pools); INIT_LIST_HEAD(&dev->dma_io_tlb_pools);
spin_lock_init(&dev->dma_io_tlb_lock); spin_lock_init(&dev->dma_io_tlb_lock);
dev->dma_uses_io_tlb = false;
#endif #endif
} }
@ -1157,9 +1153,11 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
list_add_rcu(&pool->node, &dev->dma_io_tlb_pools); list_add_rcu(&pool->node, &dev->dma_io_tlb_pools);
spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags); spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags);
/* Pairs with smp_rmb() in swiotlb_find_pool(). */
smp_wmb();
found: found:
dev->dma_uses_io_tlb = true;
/* Pairs with smp_rmb() in is_swiotlb_buffer() */
smp_wmb();
*retpool = pool; *retpool = pool;
return index; return index;
} }