forked from Minki/linux
PCI/P2PDMA: dma_map() requests that traverse the host bridge
Any requests that traverse the host bridge will need to be mapped into the IOMMU, so call dma_map_sg() inside pci_p2pdma_map_sg() when appropriate. Similarly, call dma_unmap_sg() inside pci_p2pdma_unmap_sg(). Link: https://lore.kernel.org/r/20190730163545.4915-13-logang@deltatee.com Link: https://lore.kernel.org/r/20190812173048.9186-13-logang@deltatee.com Signed-off-by: Logan Gunthorpe <logang@deltatee.com> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com> Reviewed-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
110203bee0
commit
5d52e1abcd
@ -807,6 +807,16 @@ void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(pci_p2pmem_publish);
|
EXPORT_SYMBOL_GPL(pci_p2pmem_publish);
|
||||||
|
|
||||||
|
static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct pci_dev *provider,
|
||||||
|
struct pci_dev *client)
|
||||||
|
{
|
||||||
|
if (!provider->p2pdma)
|
||||||
|
return PCI_P2PDMA_MAP_NOT_SUPPORTED;
|
||||||
|
|
||||||
|
return xa_to_value(xa_load(&provider->p2pdma->map_types,
|
||||||
|
map_types_idx(client)));
|
||||||
|
}
|
||||||
|
|
||||||
static int __pci_p2pdma_map_sg(struct pci_p2pdma_pagemap *p2p_pgmap,
|
static int __pci_p2pdma_map_sg(struct pci_p2pdma_pagemap *p2p_pgmap,
|
||||||
struct device *dev, struct scatterlist *sg, int nents)
|
struct device *dev, struct scatterlist *sg, int nents)
|
||||||
{
|
{
|
||||||
@ -852,8 +862,22 @@ int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
|
|||||||
{
|
{
|
||||||
struct pci_p2pdma_pagemap *p2p_pgmap =
|
struct pci_p2pdma_pagemap *p2p_pgmap =
|
||||||
to_p2p_pgmap(sg_page(sg)->pgmap);
|
to_p2p_pgmap(sg_page(sg)->pgmap);
|
||||||
|
struct pci_dev *client;
|
||||||
|
|
||||||
return __pci_p2pdma_map_sg(p2p_pgmap, dev, sg, nents);
|
if (WARN_ON_ONCE(!dev_is_pci(dev)))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
client = to_pci_dev(dev);
|
||||||
|
|
||||||
|
switch (pci_p2pdma_map_type(p2p_pgmap->provider, client)) {
|
||||||
|
case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
|
||||||
|
return dma_map_sg_attrs(dev, sg, nents, dir, attrs);
|
||||||
|
case PCI_P2PDMA_MAP_BUS_ADDR:
|
||||||
|
return __pci_p2pdma_map_sg(p2p_pgmap, dev, sg, nents);
|
||||||
|
default:
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg_attrs);
|
EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg_attrs);
|
||||||
|
|
||||||
@ -869,6 +893,20 @@ EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg_attrs);
|
|||||||
void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
|
void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
|
||||||
int nents, enum dma_data_direction dir, unsigned long attrs)
|
int nents, enum dma_data_direction dir, unsigned long attrs)
|
||||||
{
|
{
|
||||||
|
struct pci_p2pdma_pagemap *p2p_pgmap =
|
||||||
|
to_p2p_pgmap(sg_page(sg)->pgmap);
|
||||||
|
enum pci_p2pdma_map_type map_type;
|
||||||
|
struct pci_dev *client;
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(!dev_is_pci(dev)))
|
||||||
|
return;
|
||||||
|
|
||||||
|
client = to_pci_dev(dev);
|
||||||
|
|
||||||
|
map_type = pci_p2pdma_map_type(p2p_pgmap->provider, client);
|
||||||
|
|
||||||
|
if (map_type == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE)
|
||||||
|
dma_unmap_sg_attrs(dev, sg, nents, dir, attrs);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(pci_p2pdma_unmap_sg_attrs);
|
EXPORT_SYMBOL_GPL(pci_p2pdma_unmap_sg_attrs);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user