OF: Simplify DMA range calculations

Juggling start, end, and size values for a range is somewhat redundant
and a little hard to follow. Consolidate down to just using inclusive
start and end, which saves us worrying about size overflows for full
64-bit ranges (note that passing a potentially-overflowed value through
to arch_setup_dma_ops() is benign for all current implementations, and
this is working towards removing that anyway).

Acked-by: Rob Herring <robh@kernel.org>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Link: https://lore.kernel.org/r/3e0a72fe3d79eae660e4284bb32f2cb39868ccd7.1713523152.git.robin.murphy@arm.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Robin Murphy 2024-04-19 17:54:41 +01:00 committed by Joerg Roedel
parent 0c3457926e
commit ba503cf41c

View File

@ -96,7 +96,7 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
const struct bus_dma_region *map = NULL;
struct device_node *bus_np;
u64 dma_start = 0;
u64 mask, end, size = 0;
u64 mask, end = 0;
bool coherent;
int iommu_ret;
int ret;
@ -118,17 +118,15 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
return ret == -ENODEV ? 0 : ret;
} else {
const struct bus_dma_region *r = map;
u64 dma_end = 0;
/* Determine the overall bounds of all DMA regions */
for (dma_start = ~0; r->size; r++) {
/* Take lower and upper limits */
if (r->dma_start < dma_start)
dma_start = r->dma_start;
if (r->dma_start + r->size > dma_end)
dma_end = r->dma_start + r->size;
if (r->dma_start + r->size > end)
end = r->dma_start + r->size;
}
size = dma_end - dma_start;
}
/*
@ -142,16 +140,15 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
dev->dma_mask = &dev->coherent_dma_mask;
}
if (!size && dev->coherent_dma_mask)
size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
else if (!size)
size = 1ULL << 32;
if (!end && dev->coherent_dma_mask)
end = dev->coherent_dma_mask;
else if (!end)
end = (1ULL << 32) - 1;
/*
* Limit coherent and dma mask based on size and default mask
* set by the driver.
*/
end = dma_start + size - 1;
mask = DMA_BIT_MASK(ilog2(end) + 1);
dev->coherent_dma_mask &= mask;
*dev->dma_mask &= mask;
@ -185,7 +182,7 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
} else
dev_dbg(dev, "device is behind an iommu\n");
arch_setup_dma_ops(dev, dma_start, size, coherent);
arch_setup_dma_ops(dev, dma_start, end - dma_start + 1, coherent);
if (iommu_ret)
of_dma_set_restricted_buffer(dev, np);