forked from Minki/linux
1f85b42a69
Commit 9730348075
("arm64: Increase the max granular size") increased
the cache line size to 128 to match Cavium ThunderX, apparently for some
performance benefit which could not be confirmed. This change, however,
has an impact on the network packets allocation in certain
circumstances, requiring slightly over a 4K page with a significant
performance degradation.
This patch reverts L1_CACHE_SHIFT back to 6 (64-byte cache line) while
keeping ARCH_DMA_MINALIGN at 128. The cache_line_size() function was
changed to default to ARCH_DMA_MINALIGN in the absence of a meaningful
CTR_EL0.CWG bit field.
In addition, if a system with ARCH_DMA_MINALIGN < CTR_EL0.CWG is
detected, the kernel will force swiotlb bounce buffering for all
non-coherent devices since DMA cache maintenance on sub-CWG ranges is
not safe, leading to data corruption.
Cc: Tirumalesh Chalamarla <tchalamarla@cavium.com>
Cc: Timur Tabi <timur@codeaurora.org>
Cc: Florian Fainelli <f.fainelli@gmail.com>
Acked-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
44 lines
1.1 KiB
C
44 lines
1.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ASM_DMA_DIRECT_H
|
|
#define __ASM_DMA_DIRECT_H
|
|
|
|
#include <linux/jump_label.h>
|
|
#include <linux/swiotlb.h>
|
|
|
|
#include <asm/cache.h>
|
|
|
|
DECLARE_STATIC_KEY_FALSE(swiotlb_noncoherent_bounce);
|
|
|
|
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
|
{
|
|
dma_addr_t dev_addr = (dma_addr_t)paddr;
|
|
|
|
return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
|
|
}
|
|
|
|
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
|
|
{
|
|
phys_addr_t paddr = (phys_addr_t)dev_addr;
|
|
|
|
return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
|
|
}
|
|
|
|
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
|
{
|
|
if (!dev->dma_mask)
|
|
return false;
|
|
|
|
/*
|
|
* Force swiotlb buffer bouncing when ARCH_DMA_MINALIGN < CWG. The
|
|
* swiotlb bounce buffers are aligned to (1 << IO_TLB_SHIFT).
|
|
*/
|
|
if (static_branch_unlikely(&swiotlb_noncoherent_bounce) &&
|
|
!is_device_dma_coherent(dev) &&
|
|
!is_swiotlb_buffer(dma_to_phys(dev, addr)))
|
|
return false;
|
|
|
|
return addr + size - 1 <= *dev->dma_mask;
|
|
}
|
|
|
|
#endif /* __ASM_DMA_DIRECT_H */
|