mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 12:42:02 +00:00
powerpc/dma: remove set_dma_offset
There is no good reason for this helper, just opencode it. Signed-off-by: Christoph Hellwig <hch@lst.de> Tested-by: Christian Zigotzky <chzigotzky@xenosoft.de> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
parent
7610fdf5e0
commit
0617fc0ca4
@ -43,11 +43,5 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void set_dma_offset(struct device *dev, dma_addr_t off)
|
||||
{
|
||||
if (dev)
|
||||
dev->archdata.dma_offset = off;
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_DMA_MAPPING_H */
|
||||
|
@ -966,7 +966,7 @@ static void pcibios_setup_device(struct pci_dev *dev)
|
||||
|
||||
/* Hook up default DMA ops */
|
||||
set_dma_ops(&dev->dev, pci_dma_ops);
|
||||
set_dma_offset(&dev->dev, PCI_DRAM_OFFSET);
|
||||
dev->dev.archdata.dma_offset = PCI_DRAM_OFFSET;
|
||||
|
||||
/* Additional platform DMA/iommu setup */
|
||||
phb = pci_bus_to_host(dev->bus);
|
||||
|
@ -577,10 +577,10 @@ static void cell_dma_dev_setup(struct device *dev)
|
||||
u64 addr = cell_iommu_get_fixed_address(dev);
|
||||
|
||||
if (addr != OF_BAD_ADDR)
|
||||
set_dma_offset(dev, addr + dma_iommu_fixed_base);
|
||||
dev->archdata.dma_offset = addr + dma_iommu_fixed_base;
|
||||
set_iommu_table_base(dev, cell_get_iommu_table(dev));
|
||||
} else {
|
||||
set_dma_offset(dev, cell_dma_nommu_offset);
|
||||
dev->archdata.dma_offset = cell_dma_nommu_offset;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1746,7 +1746,7 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
|
||||
|
||||
pe = &phb->ioda.pe_array[pdn->pe_number];
|
||||
WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
|
||||
set_dma_offset(&pdev->dev, pe->tce_bypass_base);
|
||||
pdev->dev.archdata.dma_offset = pe->tce_bypass_base;
|
||||
set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
|
||||
/*
|
||||
* Note: iommu_add_device() will fail here as
|
||||
@ -1859,7 +1859,7 @@ static bool pnv_pci_ioda_iommu_bypass_supported(struct pci_dev *pdev,
|
||||
if (rc)
|
||||
return rc;
|
||||
/* 4GB offset bypasses 32-bit space */
|
||||
set_dma_offset(&pdev->dev, (1ULL << 32));
|
||||
pdev->dev.archdata.dma_offset = (1ULL << 32);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1872,7 +1872,7 @@ static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
|
||||
|
||||
list_for_each_entry(dev, &bus->devices, bus_list) {
|
||||
set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
|
||||
set_dma_offset(&dev->dev, pe->tce_bypass_base);
|
||||
dev->dev.archdata.dma_offset = pe->tce_bypass_base;
|
||||
|
||||
if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
|
||||
pnv_ioda_setup_bus_dma(pe, dev->subordinate);
|
||||
|
@ -1202,7 +1202,6 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask)
|
||||
{
|
||||
struct device_node *dn = pci_device_to_OF_node(pdev), *pdn;
|
||||
const __be32 *dma_window = NULL;
|
||||
u64 dma_offset;
|
||||
|
||||
/* only attempt to use a new window if 64-bit DMA is requested */
|
||||
if (dma_mask < DMA_BIT_MASK(64))
|
||||
@ -1224,11 +1223,9 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask)
|
||||
}
|
||||
|
||||
if (pdn && PCI_DN(pdn)) {
|
||||
dma_offset = enable_ddw(pdev, pdn);
|
||||
if (dma_offset != 0) {
|
||||
set_dma_offset(&pdev->dev, dma_offset);
|
||||
pdev->dev.archdata.dma_offset = enable_ddw(pdev, pdn);
|
||||
if (pdev->dev.archdata.dma_offset)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -386,7 +386,7 @@ static bool dart_device_on_pcie(struct device *dev)
|
||||
static void pci_dma_dev_setup_dart(struct pci_dev *dev)
|
||||
{
|
||||
if (dart_is_u4 && dart_device_on_pcie(&dev->dev))
|
||||
set_dma_offset(&dev->dev, DART_U4_BYPASS_BASE);
|
||||
dev->dev.archdata.dma_offset = DART_U4_BYPASS_BASE;
|
||||
set_iommu_table_base(&dev->dev, &iommu_table_dart);
|
||||
}
|
||||
|
||||
|
@ -139,7 +139,7 @@ static void fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
|
||||
*/
|
||||
if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) {
|
||||
dev->bus_dma_mask = 0;
|
||||
set_dma_offset(dev, pci64_dma_offset);
|
||||
dev->archdata.dma_offset = pci64_dma_offset;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
|
||||
return false;
|
||||
}
|
||||
|
||||
set_dma_offset(&dev->dev, PAGE_OFFSET);
|
||||
dev->dev.archdata.dma_offset = PAGE_OFFSET;
|
||||
|
||||
/*
|
||||
* Allocate a context to do cxl things too. If we eventually do real
|
||||
|
Loading…
Reference in New Issue
Block a user