mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
dma-mapping fixes for Linux 6.9
This has a set of swiotlb alignment fixes for sometimes very long standing bugs from Will. We've been discussion them for a while and they should be solid now. -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAmX/bmILHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYOuKQ//cUR3EywszAc04x8dIYsfegFGdQxUeJD0+1elAPss ELiqrlg5A/Yn4uHKpXjWbvJ+v1Ywh3o8+vlgUiG4aFeg4xEd+FsJqm2SDa3jhdMP 2hV8pwB92kpkKCxyCAqx8O/4o4fY++KCFsOtnammEudFjurJaCrRTlauOn6D1t/i JsBYCFtjFIhIPHQe7jmZ6dNiLEfiIJ+q8ImW+UxuB+gOGgU8C4VVW3tHuo3KeU7n yVOcz4yJrQ4xYzG3RKtaU0FE0ybA860xwiA5oPvqpI9A2ISGovv7ik0QCUlHXhff z+iL8Lj/KsOucq5pBDhbRYeN2n4VVogEwb/hut6mgyqj1ESjqeZaLioVHqOTDbmB +vNTVBt6OGTOq1YkNKttK9vBBXs5RdZSBalzBG/QO1ewmrNVVZ7z8fWXVRDipoIl sAIXmI8xAy5TNL6UbJ+RDfYeLlTzHjXGKQGB49gumOA8s4w5P5v9diYegX6GcVZV PKkYLOvprwcyi8Xxx2mNxFDxh+LWqzMYqzwsN7AoRTW4TRc7Tel0G6Axs+V/cL/Y 23IHfFfT2HqDUM5PuBfUcgCrtw1hinuD80xqXVcvaU+AYoQhrGHJFLHkj6lTwV2b hmuul170froI2A/vm8yGGqcn2Me55AexlpMab+UWL+iisGtqFTWi9b9vK/2Vi+Zj wBg= =Xaob -----END PGP SIGNATURE----- Merge tag 'dma-mapping-6.9-2024-03-24' of git://git.infradead.org/users/hch/dma-mapping Pull dma-mapping fixes from Christoph Hellwig: "This has a set of swiotlb alignment fixes for sometimes very long standing bugs from Will. We've been discussion them for a while and they should be solid now" * tag 'dma-mapping-6.9-2024-03-24' of git://git.infradead.org/users/hch/dma-mapping: swiotlb: Reinstate page-alignment for mappings >= PAGE_SIZE iommu/dma: Force swiotlb_max_mapping_size on an untrusted device swiotlb: Fix alignment checks when both allocation and DMA masks are present swiotlb: Honour dma_alloc_coherent() alignment in swiotlb_alloc() swiotlb: Enforce page alignment in swiotlb_alloc() swiotlb: Fix double-allocation of slots due to broken alignment handling
This commit is contained in:
commit
864ad046c1
@ -1711,6 +1711,14 @@ static size_t iommu_dma_opt_mapping_size(void)
|
||||
return iova_rcache_range();
|
||||
}
|
||||
|
||||
static size_t iommu_dma_max_mapping_size(struct device *dev)
|
||||
{
|
||||
if (dev_is_untrusted(dev))
|
||||
return swiotlb_max_mapping_size(dev);
|
||||
|
||||
return SIZE_MAX;
|
||||
}
|
||||
|
||||
static const struct dma_map_ops iommu_dma_ops = {
|
||||
.flags = DMA_F_PCI_P2PDMA_SUPPORTED,
|
||||
.alloc = iommu_dma_alloc,
|
||||
@ -1733,6 +1741,7 @@ static const struct dma_map_ops iommu_dma_ops = {
|
||||
.unmap_resource = iommu_dma_unmap_resource,
|
||||
.get_merge_boundary = iommu_dma_get_merge_boundary,
|
||||
.opt_mapping_size = iommu_dma_opt_mapping_size,
|
||||
.max_mapping_size = iommu_dma_max_mapping_size,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -1003,8 +1003,7 @@ static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool
|
||||
dma_addr_t tbl_dma_addr =
|
||||
phys_to_dma_unencrypted(dev, pool->start) & boundary_mask;
|
||||
unsigned long max_slots = get_max_slots(boundary_mask);
|
||||
unsigned int iotlb_align_mask =
|
||||
dma_get_min_align_mask(dev) | alloc_align_mask;
|
||||
unsigned int iotlb_align_mask = dma_get_min_align_mask(dev);
|
||||
unsigned int nslots = nr_slots(alloc_size), stride;
|
||||
unsigned int offset = swiotlb_align_offset(dev, orig_addr);
|
||||
unsigned int index, slots_checked, count = 0, i;
|
||||
@ -1016,18 +1015,29 @@ static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool
|
||||
BUG_ON(area_index >= pool->nareas);
|
||||
|
||||
/*
|
||||
* For allocations of PAGE_SIZE or larger only look for page aligned
|
||||
* allocations.
|
||||
* Historically, swiotlb allocations >= PAGE_SIZE were guaranteed to be
|
||||
* page-aligned in the absence of any other alignment requirements.
|
||||
* 'alloc_align_mask' was later introduced to specify the alignment
|
||||
* explicitly, however this is passed as zero for streaming mappings
|
||||
* and so we preserve the old behaviour there in case any drivers are
|
||||
* relying on it.
|
||||
*/
|
||||
if (alloc_size >= PAGE_SIZE)
|
||||
iotlb_align_mask |= ~PAGE_MASK;
|
||||
iotlb_align_mask &= ~(IO_TLB_SIZE - 1);
|
||||
if (!alloc_align_mask && !iotlb_align_mask && alloc_size >= PAGE_SIZE)
|
||||
alloc_align_mask = PAGE_SIZE - 1;
|
||||
|
||||
/*
|
||||
* Ensure that the allocation is at least slot-aligned and update
|
||||
* 'iotlb_align_mask' to ignore bits that will be preserved when
|
||||
* offsetting into the allocation.
|
||||
*/
|
||||
alloc_align_mask |= (IO_TLB_SIZE - 1);
|
||||
iotlb_align_mask &= ~alloc_align_mask;
|
||||
|
||||
/*
|
||||
* For mappings with an alignment requirement don't bother looping to
|
||||
* unaligned slots once we found an aligned one.
|
||||
*/
|
||||
stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
|
||||
stride = get_max_slots(max(alloc_align_mask, iotlb_align_mask));
|
||||
|
||||
spin_lock_irqsave(&area->lock, flags);
|
||||
if (unlikely(nslots > pool->area_nslabs - area->used))
|
||||
@ -1037,11 +1047,14 @@ static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool
|
||||
index = area->index;
|
||||
|
||||
for (slots_checked = 0; slots_checked < pool->area_nslabs; ) {
|
||||
slot_index = slot_base + index;
|
||||
phys_addr_t tlb_addr;
|
||||
|
||||
if (orig_addr &&
|
||||
(slot_addr(tbl_dma_addr, slot_index) &
|
||||
iotlb_align_mask) != (orig_addr & iotlb_align_mask)) {
|
||||
slot_index = slot_base + index;
|
||||
tlb_addr = slot_addr(tbl_dma_addr, slot_index);
|
||||
|
||||
if ((tlb_addr & alloc_align_mask) ||
|
||||
(orig_addr && (tlb_addr & iotlb_align_mask) !=
|
||||
(orig_addr & iotlb_align_mask))) {
|
||||
index = wrap_area_index(pool, index + 1);
|
||||
slots_checked++;
|
||||
continue;
|
||||
@ -1677,16 +1690,24 @@ struct page *swiotlb_alloc(struct device *dev, size_t size)
|
||||
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
|
||||
struct io_tlb_pool *pool;
|
||||
phys_addr_t tlb_addr;
|
||||
unsigned int align;
|
||||
int index;
|
||||
|
||||
if (!mem)
|
||||
return NULL;
|
||||
|
||||
index = swiotlb_find_slots(dev, 0, size, 0, &pool);
|
||||
align = (1 << (get_order(size) + PAGE_SHIFT)) - 1;
|
||||
index = swiotlb_find_slots(dev, 0, size, align, &pool);
|
||||
if (index == -1)
|
||||
return NULL;
|
||||
|
||||
tlb_addr = slot_addr(pool->start, index);
|
||||
if (unlikely(!PAGE_ALIGNED(tlb_addr))) {
|
||||
dev_WARN_ONCE(dev, 1, "Cannot allocate pages from non page-aligned swiotlb addr 0x%pa.\n",
|
||||
&tlb_addr);
|
||||
swiotlb_release_slots(dev, tlb_addr);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return pfn_to_page(PFN_DOWN(tlb_addr));
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user