forked from Minki/linux
arch/x86/kernel/pci-dma.c: fix dma_generic_alloc_coherent() when CONFIG_DMA_CMA is enabled
dma_generic_alloc_coherent() firstly attempts to allocate by dma_alloc_from_contiguous() if CONFIG_DMA_CMA is enabled. But the memory region allocated by it may not fit within the device's DMA mask. This change makes it fall back to usual alloc_pages_node() allocation for such cases. Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Don Dutile <ddutile@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Andi Kleen <andi@firstfloor.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
5ea3b1b2f8
commit
38f7ea5a08
@ -101,8 +101,13 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
||||
again:
|
||||
page = NULL;
|
||||
/* CMA can be used only in the context which permits sleeping */
|
||||
if (flag & __GFP_WAIT)
|
||||
if (flag & __GFP_WAIT) {
|
||||
page = dma_alloc_from_contiguous(dev, count, get_order(size));
|
||||
if (page && page_to_phys(page) + size > dma_mask) {
|
||||
dma_release_from_contiguous(dev, page, count);
|
||||
page = NULL;
|
||||
}
|
||||
}
|
||||
/* fallback */
|
||||
if (!page)
|
||||
page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
|
||||
|
Loading…
Reference in New Issue
Block a user