mirror of
https://github.com/torvalds/linux.git
synced 2024-12-15 23:51:46 +00:00
x86: move dma_supported and dma_set_mask to pci-dma_32.c
This is the way x86_64 does, so this make them equal. They have to be extern now in the header, and the extern definition is moved to the common dma-mapping.h header. Signed-off-by: Glauber Costa <gcosta@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
3cb6a91711
commit
802c1f6648
@ -156,6 +156,39 @@ EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
|
||||
int forbid_dac;
|
||||
EXPORT_SYMBOL(forbid_dac);
|
||||
|
||||
int
|
||||
dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
/*
|
||||
* we fall back to GFP_DMA when the mask isn't all 1s,
|
||||
* so we can't guarantee allocations that must be
|
||||
* within a tighter range than GFP_DMA..
|
||||
*/
|
||||
if (mask < 0x00ffffff)
|
||||
return 0;
|
||||
|
||||
/* Work around chipset bugs */
|
||||
if (forbid_dac > 0 && mask > 0xffffffffULL)
|
||||
return 0;
|
||||
|
||||
if (dma_ops->dma_supported)
|
||||
return dma_ops->dma_supported(dev, mask);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int
|
||||
dma_set_mask(struct device *dev, u64 mask)
|
||||
{
|
||||
if (!dev->dma_mask || !dma_supported(dev, mask))
|
||||
return -EIO;
|
||||
|
||||
*dev->dma_mask = mask;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static __devinit void via_no_dac(struct pci_dev *dev)
|
||||
{
|
||||
if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
|
||||
|
@ -62,6 +62,9 @@ void dma_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle);
|
||||
|
||||
|
||||
extern int dma_supported(struct device *hwdev, u64 mask);
|
||||
extern int dma_set_mask(struct device *dev, u64 mask);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
# include "dma-mapping_32.h"
|
||||
#else
|
||||
|
@ -16,35 +16,6 @@ dma_mapping_error(dma_addr_t dma_addr)
|
||||
|
||||
extern int forbid_dac;
|
||||
|
||||
static inline int
|
||||
dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
/*
|
||||
* we fall back to GFP_DMA when the mask isn't all 1s,
|
||||
* so we can't guarantee allocations that must be
|
||||
* within a tighter range than GFP_DMA..
|
||||
*/
|
||||
if(mask < 0x00ffffff)
|
||||
return 0;
|
||||
|
||||
/* Work around chipset bugs */
|
||||
if (forbid_dac > 0 && mask > 0xffffffffULL)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int
|
||||
dma_set_mask(struct device *dev, u64 mask)
|
||||
{
|
||||
if(!dev->dma_mask || !dma_supported(dev, mask))
|
||||
return -EIO;
|
||||
|
||||
*dev->dma_mask = mask;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
dma_get_cache_alignment(void)
|
||||
{
|
||||
|
@ -12,8 +12,6 @@ static inline int dma_mapping_error(dma_addr_t dma_addr)
|
||||
return (dma_addr == bad_dma_address);
|
||||
}
|
||||
|
||||
extern int dma_supported(struct device *hwdev, u64 mask);
|
||||
|
||||
/* same for gart, swiotlb, and nommu */
|
||||
static inline int dma_get_cache_alignment(void)
|
||||
{
|
||||
@ -22,8 +20,6 @@ static inline int dma_get_cache_alignment(void)
|
||||
|
||||
#define dma_is_consistent(d, h) 1
|
||||
|
||||
extern int dma_set_mask(struct device *dev, u64 mask);
|
||||
|
||||
extern struct device fallback_dev;
|
||||
extern int panic_on_overflow;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user