mirror of
https://github.com/torvalds/linux.git
synced 2024-12-11 05:33:09 +00:00
6e4bf58677
The generic swiotlb DMA ops were based on the x86 ones and provide equivalent functionality, so use them. Also fix the sta2x11 case. For that SOC the DMA map ops need an additional physical to DMA address translations. For swiotlb buffers that is done throught the phys_to_dma helper, but the sta2x11_dma_ops also added an additional translation on the return value from x86_swiotlb_alloc_coherent, which is only correct if that functions returns a direct allocation and not a swiotlb buffer. With the generic swiotlb and DMA-direct code phys_to_dma is not always used and the separate sta2x11_dma_ops can be replaced with a simple bit that marks if the additional physical to DMA address translation is needed. Tested-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Joerg Roedel <joro@8bytes.org> Cc: Jon Mason <jdmason@kudzu.us> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Muli Ben-Yehuda <mulix@mulix.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/20180319103826.12853-5-hch@lst.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
84 lines
1.8 KiB
C
84 lines
1.8 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/* Glue code to lib/swiotlb.c */
|
|
|
|
#include <linux/pci.h>
|
|
#include <linux/cache.h>
|
|
#include <linux/init.h>
|
|
#include <linux/swiotlb.h>
|
|
#include <linux/bootmem.h>
|
|
#include <linux/dma-direct.h>
|
|
#include <linux/mem_encrypt.h>
|
|
|
|
#include <asm/iommu.h>
|
|
#include <asm/swiotlb.h>
|
|
#include <asm/dma.h>
|
|
#include <asm/xen/swiotlb-xen.h>
|
|
#include <asm/iommu_table.h>
|
|
|
|
int swiotlb __read_mostly;
|
|
|
|
/*
|
|
* pci_swiotlb_detect_override - set swiotlb to 1 if necessary
|
|
*
|
|
* This returns non-zero if we are forced to use swiotlb (by the boot
|
|
* option).
|
|
*/
|
|
int __init pci_swiotlb_detect_override(void)
|
|
{
|
|
if (swiotlb_force == SWIOTLB_FORCE)
|
|
swiotlb = 1;
|
|
|
|
return swiotlb;
|
|
}
|
|
IOMMU_INIT_FINISH(pci_swiotlb_detect_override,
|
|
pci_xen_swiotlb_detect,
|
|
pci_swiotlb_init,
|
|
pci_swiotlb_late_init);
|
|
|
|
/*
|
|
* If 4GB or more detected (and iommu=off not set) or if SME is active
|
|
* then set swiotlb to 1 and return 1.
|
|
*/
|
|
int __init pci_swiotlb_detect_4gb(void)
|
|
{
|
|
/* don't initialize swiotlb if iommu=off (no_iommu=1) */
|
|
#ifdef CONFIG_X86_64
|
|
if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN)
|
|
swiotlb = 1;
|
|
#endif
|
|
|
|
/*
|
|
* If SME is active then swiotlb will be set to 1 so that bounce
|
|
* buffers are allocated and used for devices that do not support
|
|
* the addressing range required for the encryption mask.
|
|
*/
|
|
if (sme_active())
|
|
swiotlb = 1;
|
|
|
|
return swiotlb;
|
|
}
|
|
IOMMU_INIT(pci_swiotlb_detect_4gb,
|
|
pci_swiotlb_detect_override,
|
|
pci_swiotlb_init,
|
|
pci_swiotlb_late_init);
|
|
|
|
void __init pci_swiotlb_init(void)
|
|
{
|
|
if (swiotlb) {
|
|
swiotlb_init(0);
|
|
dma_ops = &swiotlb_dma_ops;
|
|
}
|
|
}
|
|
|
|
void __init pci_swiotlb_late_init(void)
|
|
{
|
|
/* An IOMMU turned us off. */
|
|
if (!swiotlb)
|
|
swiotlb_exit();
|
|
else {
|
|
printk(KERN_INFO "PCI-DMA: "
|
|
"Using software bounce buffering for IO (SWIOTLB)\n");
|
|
swiotlb_print_info();
|
|
}
|
|
}
|