forked from Minki/linux
Merge branch 'pci/iova-dma-ranges'
- Add list of legal DMA address ranges to PCI host bridge (Srinath Mannam) - Reserve inaccessible DMA ranges so IOMMU doesn't allocate them (Srinath Mannam) - Parse iProc DT dma-ranges to learn what PCI devices can reach via DMA (Srinath Mannam) * pci/iova-dma-ranges: PCI: iproc: Add sorted dma ranges resource entries to host bridge iommu/dma: Reserve IOVA for PCIe inaccessible DMA address PCI: Add dma_ranges window list # Conflicts: # drivers/pci/probe.c
This commit is contained in:
commit
f2e9468316
@ -206,12 +206,13 @@ static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iova_reserve_pci_windows(struct pci_dev *dev,
|
||||
static int iova_reserve_pci_windows(struct pci_dev *dev,
|
||||
struct iova_domain *iovad)
|
||||
{
|
||||
struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
|
||||
struct resource_entry *window;
|
||||
unsigned long lo, hi;
|
||||
phys_addr_t start = 0, end;
|
||||
|
||||
resource_list_for_each_entry(window, &bridge->windows) {
|
||||
if (resource_type(window->res) != IORESOURCE_MEM)
|
||||
@ -221,6 +222,31 @@ static void iova_reserve_pci_windows(struct pci_dev *dev,
|
||||
hi = iova_pfn(iovad, window->res->end - window->offset);
|
||||
reserve_iova(iovad, lo, hi);
|
||||
}
|
||||
|
||||
/* Get reserved DMA windows from host bridge */
|
||||
resource_list_for_each_entry(window, &bridge->dma_ranges) {
|
||||
end = window->res->start - window->offset;
|
||||
resv_iova:
|
||||
if (end > start) {
|
||||
lo = iova_pfn(iovad, start);
|
||||
hi = iova_pfn(iovad, end);
|
||||
reserve_iova(iovad, lo, hi);
|
||||
} else {
|
||||
/* dma_ranges list should be sorted */
|
||||
dev_err(&dev->dev, "Failed to reserve IOVA\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
start = window->res->end - window->offset + 1;
|
||||
/* If window is last entry */
|
||||
if (window->node.next == &bridge->dma_ranges &&
|
||||
end != ~(dma_addr_t)0) {
|
||||
end = ~(dma_addr_t)0;
|
||||
goto resv_iova;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iova_reserve_iommu_regions(struct device *dev,
|
||||
@ -232,8 +258,11 @@ static int iova_reserve_iommu_regions(struct device *dev,
|
||||
LIST_HEAD(resv_regions);
|
||||
int ret = 0;
|
||||
|
||||
if (dev_is_pci(dev))
|
||||
iova_reserve_pci_windows(to_pci_dev(dev), iovad);
|
||||
if (dev_is_pci(dev)) {
|
||||
ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
iommu_get_resv_regions(dev, &resv_regions);
|
||||
list_for_each_entry(region, &resv_regions, list) {
|
||||
|
@ -1182,11 +1182,43 @@ err_ib:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iproc_pcie_add_dma_range(struct device *dev,
|
||||
struct list_head *resources,
|
||||
struct of_pci_range *range)
|
||||
{
|
||||
struct resource *res;
|
||||
struct resource_entry *entry, *tmp;
|
||||
struct list_head *head = resources;
|
||||
|
||||
res = devm_kzalloc(dev, sizeof(struct resource), GFP_KERNEL);
|
||||
if (!res)
|
||||
return -ENOMEM;
|
||||
|
||||
resource_list_for_each_entry(tmp, resources) {
|
||||
if (tmp->res->start < range->cpu_addr)
|
||||
head = &tmp->node;
|
||||
}
|
||||
|
||||
res->start = range->cpu_addr;
|
||||
res->end = res->start + range->size - 1;
|
||||
|
||||
entry = resource_list_create_entry(res, 0);
|
||||
if (!entry)
|
||||
return -ENOMEM;
|
||||
|
||||
entry->offset = res->start - range->cpu_addr;
|
||||
resource_list_add(entry, head);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
|
||||
{
|
||||
struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
|
||||
struct of_pci_range range;
|
||||
struct of_pci_range_parser parser;
|
||||
int ret;
|
||||
LIST_HEAD(resources);
|
||||
|
||||
/* Get the dma-ranges from DT */
|
||||
ret = of_pci_dma_range_parser_init(&parser, pcie->dev->of_node);
|
||||
@ -1194,13 +1226,23 @@ static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
|
||||
return ret;
|
||||
|
||||
for_each_of_pci_range(&parser, &range) {
|
||||
ret = iproc_pcie_add_dma_range(pcie->dev,
|
||||
&resources,
|
||||
&range);
|
||||
if (ret)
|
||||
goto out;
|
||||
/* Each range entry corresponds to an inbound mapping region */
|
||||
ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_MEM);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_splice_init(&resources, &host->dma_ranges);
|
||||
|
||||
return 0;
|
||||
out:
|
||||
pci_free_resource_list(&resources);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iproce_pcie_get_msi(struct iproc_pcie *pcie,
|
||||
|
@ -589,6 +589,7 @@ static void pci_release_host_bridge_dev(struct device *dev)
|
||||
static void pci_init_host_bridge(struct pci_host_bridge *bridge)
|
||||
{
|
||||
INIT_LIST_HEAD(&bridge->windows);
|
||||
INIT_LIST_HEAD(&bridge->dma_ranges);
|
||||
|
||||
/*
|
||||
* We assume we can manage these PCIe features. Some systems may
|
||||
@ -637,6 +638,7 @@ EXPORT_SYMBOL(devm_pci_alloc_host_bridge);
|
||||
void pci_free_host_bridge(struct pci_host_bridge *bridge)
|
||||
{
|
||||
pci_free_resource_list(&bridge->windows);
|
||||
pci_free_resource_list(&bridge->dma_ranges);
|
||||
|
||||
kfree(bridge);
|
||||
}
|
||||
|
@ -492,6 +492,7 @@ struct pci_host_bridge {
|
||||
void *sysdata;
|
||||
int busnr;
|
||||
struct list_head windows; /* resource_entry */
|
||||
struct list_head dma_ranges; /* dma ranges resource list */
|
||||
u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
|
||||
int (*map_irq)(const struct pci_dev *, u8, u8);
|
||||
void (*release_fn)(struct pci_host_bridge *);
|
||||
|
Loading…
Reference in New Issue
Block a user