mirror of
https://github.com/torvalds/linux.git
synced 2024-11-08 05:01:48 +00:00
d0164adc89
__GFP_WAIT has been used to identify atomic context in callers that hold spinlocks or are in interrupts. They are expected to be high priority and have access one of two watermarks lower than "min" which can be referred to as the "atomic reserve". __GFP_HIGH users get access to the first lower watermark and can be called the "high priority reserve". Over time, callers had a requirement to not block when fallback options were available. Some have abused __GFP_WAIT leading to a situation where an optimisitic allocation with a fallback option can access atomic reserves. This patch uses __GFP_ATOMIC to identify callers that are truely atomic, cannot sleep and have no alternative. High priority users continue to use __GFP_HIGH. __GFP_DIRECT_RECLAIM identifies callers that can sleep and are willing to enter direct reclaim. __GFP_KSWAPD_RECLAIM to identify callers that want to wake kswapd for background reclaim. __GFP_WAIT is redefined as a caller that is willing to enter direct reclaim and wake kswapd for background reclaim. This patch then converts a number of sites o __GFP_ATOMIC is used by callers that are high priority and have memory pools for those requests. GFP_ATOMIC uses this flag. o Callers that have a limited mempool to guarantee forward progress clear __GFP_DIRECT_RECLAIM but keep __GFP_KSWAPD_RECLAIM. bio allocations fall into this category where kswapd will still be woken but atomic reserves are not used as there is a one-entry mempool to guarantee progress. o Callers that are checking if they are non-blocking should use the helper gfpflags_allow_blocking() where possible. This is because checking for __GFP_WAIT as was done historically now can trigger false positives. Some exceptions like dm-crypt.c exist where the code intent is clearer if __GFP_DIRECT_RECLAIM is used instead of the helper due to flag manipulations. o Callers that built their own GFP flags instead of starting with GFP_KERNEL and friends now also need to specify __GFP_KSWAPD_RECLAIM. The first key hazard to watch out for is callers that removed __GFP_WAIT and was depending on access to atomic reserves for inconspicuous reasons. In some cases it may be appropriate for them to use __GFP_HIGH. The second key hazard is callers that assembled their own combination of GFP flags instead of starting with something like GFP_KERNEL. They may now wish to specify __GFP_KSWAPD_RECLAIM. It's almost certainly harmless if it's missed in most cases as other activity will wake kswapd. Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Cc: Vitaly Wool <vitalywool@gmail.com> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
221 lines
5.9 KiB
C
221 lines
5.9 KiB
C
#include <linux/cpu.h>
|
|
#include <linux/dma-mapping.h>
|
|
#include <linux/bootmem.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/export.h>
|
|
#include <linux/memblock.h>
|
|
#include <linux/of_address.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/types.h>
|
|
#include <linux/dma-mapping.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/swiotlb.h>
|
|
|
|
#include <xen/xen.h>
|
|
#include <xen/interface/grant_table.h>
|
|
#include <xen/interface/memory.h>
|
|
#include <xen/page.h>
|
|
#include <xen/swiotlb-xen.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/xen/hypercall.h>
|
|
#include <asm/xen/interface.h>
|
|
|
|
unsigned long xen_get_swiotlb_free_pages(unsigned int order)
|
|
{
|
|
struct memblock_region *reg;
|
|
gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM;
|
|
|
|
for_each_memblock(memory, reg) {
|
|
if (reg->base < (phys_addr_t)0xffffffff) {
|
|
flags |= __GFP_DMA;
|
|
break;
|
|
}
|
|
}
|
|
return __get_free_pages(flags, order);
|
|
}
|
|
|
|
enum dma_cache_op {
|
|
DMA_UNMAP,
|
|
DMA_MAP,
|
|
};
|
|
static bool hypercall_cflush = false;
|
|
|
|
/* functions called by SWIOTLB */
|
|
|
|
static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
|
|
size_t size, enum dma_data_direction dir, enum dma_cache_op op)
|
|
{
|
|
struct gnttab_cache_flush cflush;
|
|
unsigned long xen_pfn;
|
|
size_t left = size;
|
|
|
|
xen_pfn = (handle >> XEN_PAGE_SHIFT) + offset / XEN_PAGE_SIZE;
|
|
offset %= XEN_PAGE_SIZE;
|
|
|
|
do {
|
|
size_t len = left;
|
|
|
|
/* buffers in highmem or foreign pages cannot cross page
|
|
* boundaries */
|
|
if (len + offset > XEN_PAGE_SIZE)
|
|
len = XEN_PAGE_SIZE - offset;
|
|
|
|
cflush.op = 0;
|
|
cflush.a.dev_bus_addr = xen_pfn << XEN_PAGE_SHIFT;
|
|
cflush.offset = offset;
|
|
cflush.length = len;
|
|
|
|
if (op == DMA_UNMAP && dir != DMA_TO_DEVICE)
|
|
cflush.op = GNTTAB_CACHE_INVAL;
|
|
if (op == DMA_MAP) {
|
|
if (dir == DMA_FROM_DEVICE)
|
|
cflush.op = GNTTAB_CACHE_INVAL;
|
|
else
|
|
cflush.op = GNTTAB_CACHE_CLEAN;
|
|
}
|
|
if (cflush.op)
|
|
HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
|
|
|
|
offset = 0;
|
|
xen_pfn++;
|
|
left -= len;
|
|
} while (left);
|
|
}
|
|
|
|
static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
|
|
size_t size, enum dma_data_direction dir)
|
|
{
|
|
dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
|
|
}
|
|
|
|
static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
|
|
size_t size, enum dma_data_direction dir)
|
|
{
|
|
dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
|
|
}
|
|
|
|
void __xen_dma_map_page(struct device *hwdev, struct page *page,
|
|
dma_addr_t dev_addr, unsigned long offset, size_t size,
|
|
enum dma_data_direction dir, struct dma_attrs *attrs)
|
|
{
|
|
if (is_device_dma_coherent(hwdev))
|
|
return;
|
|
if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
return;
|
|
|
|
__xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
|
|
}
|
|
|
|
void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
|
|
size_t size, enum dma_data_direction dir,
|
|
struct dma_attrs *attrs)
|
|
|
|
{
|
|
if (is_device_dma_coherent(hwdev))
|
|
return;
|
|
if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
return;
|
|
|
|
__xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
|
|
}
|
|
|
|
void __xen_dma_sync_single_for_cpu(struct device *hwdev,
|
|
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
{
|
|
if (is_device_dma_coherent(hwdev))
|
|
return;
|
|
__xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
|
|
}
|
|
|
|
void __xen_dma_sync_single_for_device(struct device *hwdev,
|
|
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
{
|
|
if (is_device_dma_coherent(hwdev))
|
|
return;
|
|
__xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
|
|
}
|
|
|
|
bool xen_arch_need_swiotlb(struct device *dev,
|
|
phys_addr_t phys,
|
|
dma_addr_t dev_addr)
|
|
{
|
|
unsigned int xen_pfn = XEN_PFN_DOWN(phys);
|
|
unsigned int bfn = XEN_PFN_DOWN(dev_addr);
|
|
|
|
/*
|
|
* The swiotlb buffer should be used if
|
|
* - Xen doesn't have the cache flush hypercall
|
|
* - The Linux page refers to foreign memory
|
|
* - The device doesn't support coherent DMA request
|
|
*
|
|
* The Linux page may be spanned acrros multiple Xen page, although
|
|
* it's not possible to have a mix of local and foreign Xen page.
|
|
* Furthermore, range_straddles_page_boundary is already checking
|
|
* if buffer is physically contiguous in the host RAM.
|
|
*
|
|
* Therefore we only need to check the first Xen page to know if we
|
|
* require a bounce buffer because the device doesn't support coherent
|
|
* memory and we are not able to flush the cache.
|
|
*/
|
|
return (!hypercall_cflush && (xen_pfn != bfn) &&
|
|
!is_device_dma_coherent(dev));
|
|
}
|
|
|
|
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
|
|
unsigned int address_bits,
|
|
dma_addr_t *dma_handle)
|
|
{
|
|
if (!xen_initial_domain())
|
|
return -EINVAL;
|
|
|
|
/* we assume that dom0 is mapped 1:1 for now */
|
|
*dma_handle = pstart;
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
|
|
|
|
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
|
|
{
|
|
return;
|
|
}
|
|
EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
|
|
|
|
struct dma_map_ops *xen_dma_ops;
|
|
EXPORT_SYMBOL(xen_dma_ops);
|
|
|
|
static struct dma_map_ops xen_swiotlb_dma_ops = {
|
|
.mapping_error = xen_swiotlb_dma_mapping_error,
|
|
.alloc = xen_swiotlb_alloc_coherent,
|
|
.free = xen_swiotlb_free_coherent,
|
|
.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
|
|
.sync_single_for_device = xen_swiotlb_sync_single_for_device,
|
|
.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
|
|
.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
|
|
.map_sg = xen_swiotlb_map_sg_attrs,
|
|
.unmap_sg = xen_swiotlb_unmap_sg_attrs,
|
|
.map_page = xen_swiotlb_map_page,
|
|
.unmap_page = xen_swiotlb_unmap_page,
|
|
.dma_supported = xen_swiotlb_dma_supported,
|
|
.set_dma_mask = xen_swiotlb_set_dma_mask,
|
|
};
|
|
|
|
int __init xen_mm_init(void)
|
|
{
|
|
struct gnttab_cache_flush cflush;
|
|
if (!xen_initial_domain())
|
|
return 0;
|
|
xen_swiotlb_init(1, false);
|
|
xen_dma_ops = &xen_swiotlb_dma_ops;
|
|
|
|
cflush.op = 0;
|
|
cflush.a.dev_bus_addr = 0;
|
|
cflush.offset = 0;
|
|
cflush.length = 0;
|
|
if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS)
|
|
hypercall_cflush = true;
|
|
return 0;
|
|
}
|
|
arch_initcall(xen_mm_init);
|