mirror of
https://github.com/torvalds/linux.git
synced 2024-12-27 13:22:23 +00:00
26f09e9b3a
Introduce memblock memory allocation APIs which allow to support PAE or LPAE extension on 32 bits archs where the physical memory start address can be beyond 4GB. In such cases, existing bootmem APIs which operate on 32 bit addresses won't work and needs memblock layer which operates on 64 bit addresses. So we add equivalent APIs so that we can replace usage of bootmem with memblock interfaces. Architectures already converted to NO_BOOTMEM use these new memblock interfaces. The architectures which are still not converted to NO_BOOTMEM continue to function as is because we still maintain the fal lback option of bootmem back-end supporting these new interfaces. So no functional change as such. In long run, once all the architectures moves to NO_BOOTMEM, we can get rid of bootmem layer completely. This is one step to remove the core code dependency with bootmem and also gives path for architectures to move away from bootmem. The proposed interface will became active if both CONFIG_HAVE_MEMBLOCK and CONFIG_NO_BOOTMEM are specified by arch. In case !CONFIG_NO_BOOTMEM, the memblock() wrappers will fallback to the existing bootmem apis so that arch's not converted to NO_BOOTMEM continue to work as is. The meaning of MEMBLOCK_ALLOC_ACCESSIBLE and MEMBLOCK_ALLOC_ANYWHERE is kept same. [akpm@linux-foundation.org: s/depricated/deprecated/] Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com> Signed-off-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Tejun Heo <tj@kernel.org> Cc: "Rafael J. Wysocki" <rjw@sisk.pl> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Paul Walmsley <paul@pwsan.com> Cc: Pavel Machek <pavel@ucw.cz> Cc: Russell King <linux@arm.linux.org.uk> Cc: Tony Lindgren <tony@atomide.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
152 lines
4.1 KiB
C
152 lines
4.1 KiB
C
#ifndef __ASM_ARM_DMA_H
|
|
#define __ASM_ARM_DMA_H
|
|
|
|
/*
|
|
* This is the maximum virtual address which can be DMA'd from.
|
|
*/
|
|
#ifndef CONFIG_ZONE_DMA
|
|
#define MAX_DMA_ADDRESS 0xffffffffUL
|
|
#else
|
|
#define MAX_DMA_ADDRESS ({ \
|
|
extern phys_addr_t arm_dma_zone_size; \
|
|
arm_dma_zone_size && arm_dma_zone_size < (0x10000000 - PAGE_OFFSET) ? \
|
|
(PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; })
|
|
#endif
|
|
|
|
#ifdef CONFIG_ISA_DMA_API
|
|
/*
|
|
* This is used to support drivers written for the x86 ISA DMA API.
|
|
* It should not be re-used except for that purpose.
|
|
*/
|
|
#include <linux/spinlock.h>
|
|
#include <asm/scatterlist.h>
|
|
|
|
#include <mach/isa-dma.h>
|
|
|
|
/*
|
|
* The DMA modes reflect the settings for the ISA DMA controller
|
|
*/
|
|
#define DMA_MODE_MASK 0xcc
|
|
|
|
#define DMA_MODE_READ 0x44
|
|
#define DMA_MODE_WRITE 0x48
|
|
#define DMA_MODE_CASCADE 0xc0
|
|
#define DMA_AUTOINIT 0x10
|
|
|
|
extern raw_spinlock_t dma_spin_lock;
|
|
|
|
static inline unsigned long claim_dma_lock(void)
|
|
{
|
|
unsigned long flags;
|
|
raw_spin_lock_irqsave(&dma_spin_lock, flags);
|
|
return flags;
|
|
}
|
|
|
|
static inline void release_dma_lock(unsigned long flags)
|
|
{
|
|
raw_spin_unlock_irqrestore(&dma_spin_lock, flags);
|
|
}
|
|
|
|
/* Clear the 'DMA Pointer Flip Flop'.
|
|
* Write 0 for LSB/MSB, 1 for MSB/LSB access.
|
|
*/
|
|
#define clear_dma_ff(chan)
|
|
|
|
/* Set only the page register bits of the transfer address.
|
|
*
|
|
* NOTE: This is an architecture specific function, and should
|
|
* be hidden from the drivers
|
|
*/
|
|
extern void set_dma_page(unsigned int chan, char pagenr);
|
|
|
|
/* Request a DMA channel
|
|
*
|
|
* Some architectures may need to do allocate an interrupt
|
|
*/
|
|
extern int request_dma(unsigned int chan, const char * device_id);
|
|
|
|
/* Free a DMA channel
|
|
*
|
|
* Some architectures may need to do free an interrupt
|
|
*/
|
|
extern void free_dma(unsigned int chan);
|
|
|
|
/* Enable DMA for this channel
|
|
*
|
|
* On some architectures, this may have other side effects like
|
|
* enabling an interrupt and setting the DMA registers.
|
|
*/
|
|
extern void enable_dma(unsigned int chan);
|
|
|
|
/* Disable DMA for this channel
|
|
*
|
|
* On some architectures, this may have other side effects like
|
|
* disabling an interrupt or whatever.
|
|
*/
|
|
extern void disable_dma(unsigned int chan);
|
|
|
|
/* Test whether the specified channel has an active DMA transfer
|
|
*/
|
|
extern int dma_channel_active(unsigned int chan);
|
|
|
|
/* Set the DMA scatter gather list for this channel
|
|
*
|
|
* This should not be called if a DMA channel is enabled,
|
|
* especially since some DMA architectures don't update the
|
|
* DMA address immediately, but defer it to the enable_dma().
|
|
*/
|
|
extern void set_dma_sg(unsigned int chan, struct scatterlist *sg, int nr_sg);
|
|
|
|
/* Set the DMA address for this channel
|
|
*
|
|
* This should not be called if a DMA channel is enabled,
|
|
* especially since some DMA architectures don't update the
|
|
* DMA address immediately, but defer it to the enable_dma().
|
|
*/
|
|
extern void __set_dma_addr(unsigned int chan, void *addr);
|
|
#define set_dma_addr(chan, addr) \
|
|
__set_dma_addr(chan, (void *)__bus_to_virt(addr))
|
|
|
|
/* Set the DMA byte count for this channel
|
|
*
|
|
* This should not be called if a DMA channel is enabled,
|
|
* especially since some DMA architectures don't update the
|
|
* DMA count immediately, but defer it to the enable_dma().
|
|
*/
|
|
extern void set_dma_count(unsigned int chan, unsigned long count);
|
|
|
|
/* Set the transfer direction for this channel
|
|
*
|
|
* This should not be called if a DMA channel is enabled,
|
|
* especially since some DMA architectures don't update the
|
|
* DMA transfer direction immediately, but defer it to the
|
|
* enable_dma().
|
|
*/
|
|
extern void set_dma_mode(unsigned int chan, unsigned int mode);
|
|
|
|
/* Set the transfer speed for this channel
|
|
*/
|
|
extern void set_dma_speed(unsigned int chan, int cycle_ns);
|
|
|
|
/* Get DMA residue count. After a DMA transfer, this
|
|
* should return zero. Reading this while a DMA transfer is
|
|
* still in progress will return unpredictable results.
|
|
* If called before the channel has been used, it may return 1.
|
|
* Otherwise, it returns the number of _bytes_ left to transfer.
|
|
*/
|
|
extern int get_dma_residue(unsigned int chan);
|
|
|
|
#ifndef NO_DMA
|
|
#define NO_DMA 255
|
|
#endif
|
|
|
|
#endif /* CONFIG_ISA_DMA_API */
|
|
|
|
#ifdef CONFIG_PCI
|
|
extern int isa_dma_bridge_buggy;
|
|
#else
|
|
#define isa_dma_bridge_buggy (0)
|
|
#endif
|
|
|
|
#endif /* __ASM_ARM_DMA_H */
|