forked from Minki/linux
Merge branches 'machtypes', 'core', 'ep93xx', 'ks8695', 'netdev' and 'sa1100' into devel
This commit is contained in:
commit
90bb28b064
@ -148,7 +148,6 @@ config ARCH_MAY_HAVE_PC_FDC
|
||||
|
||||
config ZONE_DMA
|
||||
bool
|
||||
default y
|
||||
|
||||
config GENERIC_ISA_DMA
|
||||
bool
|
||||
@ -178,6 +177,11 @@ config OPROFILE_MPCORE
|
||||
config OPROFILE_ARM11_CORE
|
||||
bool
|
||||
|
||||
config OPROFILE_ARMV7
|
||||
def_bool y
|
||||
depends on CPU_V7 && !SMP
|
||||
bool
|
||||
|
||||
endif
|
||||
|
||||
config VECTORS_BASE
|
||||
@ -245,6 +249,7 @@ config ARCH_CLPS7500
|
||||
select TIMER_ACORN
|
||||
select ISA
|
||||
select NO_IOPORT
|
||||
select ARCH_SPARSEMEM_ENABLE
|
||||
help
|
||||
Support for the Cirrus Logic PS7500FE system-on-a-chip.
|
||||
|
||||
@ -306,6 +311,7 @@ config ARCH_IOP13XX
|
||||
select PLAT_IOP
|
||||
select PCI
|
||||
select ARCH_SUPPORTS_MSI
|
||||
select VMSPLIT_1G
|
||||
help
|
||||
Support for Intel's IOP13XX (XScale) family of processors.
|
||||
|
||||
@ -350,6 +356,7 @@ config ARCH_IXP4XX
|
||||
select GENERIC_GPIO
|
||||
select GENERIC_TIME
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select ZONE_DMA if PCI
|
||||
help
|
||||
Support for Intel's IXP4XX (XScale) family of processors.
|
||||
|
||||
@ -464,6 +471,7 @@ config ARCH_RPC
|
||||
select HAVE_PATA_PLATFORM
|
||||
select ISA_DMA_API
|
||||
select NO_IOPORT
|
||||
select ARCH_SPARSEMEM_ENABLE
|
||||
help
|
||||
On the Acorn Risc-PC, Linux can support the internal IDE disk and
|
||||
CD-ROM interface, serial and parallel port, and the floppy drive.
|
||||
@ -471,9 +479,7 @@ config ARCH_RPC
|
||||
config ARCH_SA1100
|
||||
bool "SA1100-based"
|
||||
select ISA
|
||||
select ARCH_DISCONTIGMEM_ENABLE
|
||||
select ARCH_SPARSEMEM_ENABLE
|
||||
select ARCH_SELECT_MEMORY_MODEL
|
||||
select ARCH_MTD_XIP
|
||||
select GENERIC_GPIO
|
||||
select GENERIC_TIME
|
||||
@ -497,6 +503,7 @@ config ARCH_SHARK
|
||||
bool "Shark"
|
||||
select ISA
|
||||
select ISA_DMA
|
||||
select ZONE_DMA
|
||||
select PCI
|
||||
help
|
||||
Support for the StrongARM based Digital DNARD machine, also known
|
||||
@ -504,6 +511,8 @@ config ARCH_SHARK
|
||||
|
||||
config ARCH_LH7A40X
|
||||
bool "Sharp LH7A40X"
|
||||
select ARCH_DISCONTIGMEM_ENABLE if !LH7A40X_CONTIGMEM
|
||||
select ARCH_SPARSEMEM_ENABLE if !LH7A40X_CONTIGMEM
|
||||
help
|
||||
Say Y here for systems based on one of the Sharp LH7A40X
|
||||
System on a Chip processors. These CPUs include an ARM922T
|
||||
@ -516,6 +525,7 @@ config ARCH_DAVINCI
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select GENERIC_GPIO
|
||||
select HAVE_CLK
|
||||
select ZONE_DMA
|
||||
help
|
||||
Support for TI's DaVinci platform.
|
||||
|
||||
@ -734,6 +744,29 @@ config SMP
|
||||
|
||||
If you don't know what to do here, say N.
|
||||
|
||||
choice
|
||||
prompt "Memory split"
|
||||
default VMSPLIT_3G
|
||||
help
|
||||
Select the desired split between kernel and user memory.
|
||||
|
||||
If you are not absolutely sure what you are doing, leave this
|
||||
option alone!
|
||||
|
||||
config VMSPLIT_3G
|
||||
bool "3G/1G user/kernel split"
|
||||
config VMSPLIT_2G
|
||||
bool "2G/2G user/kernel split"
|
||||
config VMSPLIT_1G
|
||||
bool "1G/3G user/kernel split"
|
||||
endchoice
|
||||
|
||||
config PAGE_OFFSET
|
||||
hex
|
||||
default 0x40000000 if VMSPLIT_1G
|
||||
default 0x80000000 if VMSPLIT_2G
|
||||
default 0xC0000000
|
||||
|
||||
config NR_CPUS
|
||||
int "Maximum number of CPUs (2-32)"
|
||||
range 2 32
|
||||
@ -810,20 +843,18 @@ config OABI_COMPAT
|
||||
UNPREDICTABLE (in fact it can be predicted that it won't work
|
||||
at all). If in doubt say Y.
|
||||
|
||||
# Discontigmem is deprecated
|
||||
config ARCH_DISCONTIGMEM_ENABLE
|
||||
bool
|
||||
default (ARCH_LH7A40X && !LH7A40X_CONTIGMEM)
|
||||
help
|
||||
Say Y to support efficient handling of discontiguous physical memory,
|
||||
for architectures which are either NUMA (Non-Uniform Memory Access)
|
||||
or have huge holes in the physical address space for other reasons.
|
||||
See <file:Documentation/vm/numa> for more.
|
||||
|
||||
config ARCH_SPARSEMEM_ENABLE
|
||||
bool
|
||||
|
||||
config ARCH_SPARSEMEM_DEFAULT
|
||||
def_bool ARCH_SPARSEMEM_ENABLE
|
||||
|
||||
config ARCH_SELECT_MEMORY_MODEL
|
||||
bool
|
||||
def_bool ARCH_DISCONTIGMEM_ENABLE && ARCH_SPARSEMEM_ENABLE
|
||||
|
||||
config NODES_SHIFT
|
||||
int
|
||||
@ -1000,9 +1031,9 @@ config ATAGS_PROC
|
||||
|
||||
endmenu
|
||||
|
||||
if (ARCH_SA1100 || ARCH_INTEGRATOR || ARCH_OMAP || ARCH_IMX || ARCH_PXA)
|
||||
menu "CPU Power Management"
|
||||
|
||||
menu "CPU Frequency scaling"
|
||||
if (ARCH_SA1100 || ARCH_INTEGRATOR || ARCH_OMAP || ARCH_IMX || ARCH_PXA)
|
||||
|
||||
source "drivers/cpufreq/Kconfig"
|
||||
|
||||
@ -1042,10 +1073,12 @@ config CPU_FREQ_PXA
|
||||
default y
|
||||
select CPU_FREQ_DEFAULT_GOV_USERSPACE
|
||||
|
||||
endmenu
|
||||
|
||||
endif
|
||||
|
||||
source "drivers/cpuidle/Kconfig"
|
||||
|
||||
endmenu
|
||||
|
||||
menu "Floating point emulation"
|
||||
|
||||
comment "At least one emulation must be selected"
|
||||
@ -1197,6 +1230,8 @@ source "drivers/power/Kconfig"
|
||||
|
||||
source "drivers/hwmon/Kconfig"
|
||||
|
||||
source "drivers/thermal/Kconfig"
|
||||
|
||||
source "drivers/watchdog/Kconfig"
|
||||
|
||||
source "drivers/ssb/Kconfig"
|
||||
@ -1217,6 +1252,10 @@ source "drivers/usb/Kconfig"
|
||||
|
||||
source "drivers/mmc/Kconfig"
|
||||
|
||||
source "drivers/memstick/Kconfig"
|
||||
|
||||
source "drivers/accessibility/Kconfig"
|
||||
|
||||
source "drivers/leds/Kconfig"
|
||||
|
||||
source "drivers/rtc/Kconfig"
|
||||
@ -1225,6 +1264,8 @@ source "drivers/dma/Kconfig"
|
||||
|
||||
source "drivers/dca/Kconfig"
|
||||
|
||||
source "drivers/auxdisplay/Kconfig"
|
||||
|
||||
source "drivers/regulator/Kconfig"
|
||||
|
||||
source "drivers/uio/Kconfig"
|
||||
|
@ -47,7 +47,7 @@ comma = ,
|
||||
# Note that GCC does not numerically define an architecture version
|
||||
# macro, but instead defines a whole series of macros which makes
|
||||
# testing for a specific architecture or later rather impossible.
|
||||
arch-$(CONFIG_CPU_32v7) :=-D__LINUX_ARM_ARCH__=7 $(call cc-option,-march=armv7a,-march=armv5t -Wa$(comma)-march=armv7a)
|
||||
arch-$(CONFIG_CPU_32v7) :=-D__LINUX_ARM_ARCH__=7 $(call cc-option,-march=armv7-a,-march=armv5t -Wa$(comma)-march=armv7-a)
|
||||
arch-$(CONFIG_CPU_32v6) :=-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6,-march=armv5t -Wa$(comma)-march=armv6)
|
||||
# Only override the compiler option if ARMv6. The ARMv6K extensions are
|
||||
# always available in ARMv7
|
||||
|
@ -76,7 +76,7 @@ KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
|
||||
endif
|
||||
|
||||
EXTRA_CFLAGS := -fpic -fno-builtin
|
||||
EXTRA_AFLAGS :=
|
||||
EXTRA_AFLAGS := -Wa,-march=all
|
||||
|
||||
# Supply ZRELADDR, INITRD_PHYS and PARAMS_PHYS to the decompressor via
|
||||
# linker symbols. We only define initrd_phys and params_phys if the
|
||||
|
@ -421,6 +421,7 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size
|
||||
add r1, r1, #1048576
|
||||
str r1, [r0]
|
||||
mov pc, lr
|
||||
ENDPROC(__setup_mmu)
|
||||
|
||||
__armv4_mmu_cache_on:
|
||||
mov r12, lr
|
||||
@ -801,7 +802,7 @@ loop1:
|
||||
add r2, r2, #4 @ add 4 (line length offset)
|
||||
ldr r4, =0x3ff
|
||||
ands r4, r4, r1, lsr #3 @ find maximum number on the way size
|
||||
.word 0xe16f5f14 @ clz r5, r4 - find bit position of way size increment
|
||||
clz r5, r4 @ find bit position of way size increment
|
||||
ldr r7, =0x7fff
|
||||
ands r7, r7, r1, lsr #13 @ extract max number of the index size
|
||||
loop2:
|
||||
|
@ -12,7 +12,8 @@ config ICST307
|
||||
|
||||
config SA1111
|
||||
bool
|
||||
select DMABOUNCE
|
||||
select DMABOUNCE if !ARCH_PXA
|
||||
select ZONE_DMA if !ARCH_PXA
|
||||
|
||||
config DMABOUNCE
|
||||
bool
|
||||
|
@ -154,9 +154,7 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
|
||||
#endif
|
||||
|
||||
write_lock_irqsave(&device_info->lock, flags);
|
||||
|
||||
list_add(&buf->node, &device_info->safe_buffers);
|
||||
|
||||
write_unlock_irqrestore(&device_info->lock, flags);
|
||||
|
||||
return buf;
|
||||
@ -205,8 +203,22 @@ free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *
|
||||
|
||||
/* ************************************************** */
|
||||
|
||||
static inline dma_addr_t
|
||||
map_single(struct device *dev, void *ptr, size_t size,
|
||||
static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
|
||||
dma_addr_t dma_addr, const char *where)
|
||||
{
|
||||
if (!dev || !dev->archdata.dmabounce)
|
||||
return NULL;
|
||||
if (dma_mapping_error(dev, dma_addr)) {
|
||||
if (dev)
|
||||
dev_err(dev, "Trying to %s invalid mapping\n", where);
|
||||
else
|
||||
pr_err("unknown device: Trying to %s invalid mapping\n", where);
|
||||
return NULL;
|
||||
}
|
||||
return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
|
||||
}
|
||||
|
||||
static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
|
||||
@ -270,33 +282,21 @@ map_single(struct device *dev, void *ptr, size_t size,
|
||||
return dma_addr;
|
||||
}
|
||||
|
||||
static inline void
|
||||
unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
|
||||
struct safe_buffer *buf = NULL;
|
||||
|
||||
/*
|
||||
* Trying to unmap an invalid mapping
|
||||
*/
|
||||
if (dma_mapping_error(dev, dma_addr)) {
|
||||
dev_err(dev, "Trying to unmap invalid mapping\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (device_info)
|
||||
buf = find_safe_buffer(device_info, dma_addr);
|
||||
struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");
|
||||
|
||||
if (buf) {
|
||||
BUG_ON(buf->size != size);
|
||||
BUG_ON(buf->direction != dir);
|
||||
|
||||
dev_dbg(dev,
|
||||
"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
|
||||
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
|
||||
buf->safe, buf->safe_dma_addr);
|
||||
|
||||
DO_STATS ( device_info->bounce_count++ );
|
||||
DO_STATS(dev->archdata.dmabounce->bounce_count++);
|
||||
|
||||
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
|
||||
void *ptr = buf->ptr;
|
||||
@ -317,74 +317,7 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||
dmac_clean_range(ptr, ptr + size);
|
||||
outer_clean_range(__pa(ptr), __pa(ptr) + size);
|
||||
}
|
||||
free_safe_buffer(device_info, buf);
|
||||
}
|
||||
}
|
||||
|
||||
static int sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
|
||||
struct safe_buffer *buf = NULL;
|
||||
|
||||
if (device_info)
|
||||
buf = find_safe_buffer(device_info, dma_addr);
|
||||
|
||||
if (buf) {
|
||||
/*
|
||||
* Both of these checks from original code need to be
|
||||
* commented out b/c some drivers rely on the following:
|
||||
*
|
||||
* 1) Drivers may map a large chunk of memory into DMA space
|
||||
* but only sync a small portion of it. Good example is
|
||||
* allocating a large buffer, mapping it, and then
|
||||
* breaking it up into small descriptors. No point
|
||||
* in syncing the whole buffer if you only have to
|
||||
* touch one descriptor.
|
||||
*
|
||||
* 2) Buffers that are mapped as DMA_BIDIRECTIONAL are
|
||||
* usually only synced in one dir at a time.
|
||||
*
|
||||
* See drivers/net/eepro100.c for examples of both cases.
|
||||
*
|
||||
* -ds
|
||||
*
|
||||
* BUG_ON(buf->size != size);
|
||||
* BUG_ON(buf->direction != dir);
|
||||
*/
|
||||
|
||||
dev_dbg(dev,
|
||||
"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
|
||||
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
|
||||
buf->safe, buf->safe_dma_addr);
|
||||
|
||||
DO_STATS ( device_info->bounce_count++ );
|
||||
|
||||
switch (dir) {
|
||||
case DMA_FROM_DEVICE:
|
||||
dev_dbg(dev,
|
||||
"%s: copy back safe %p to unsafe %p size %d\n",
|
||||
__func__, buf->safe, buf->ptr, size);
|
||||
memcpy(buf->ptr, buf->safe, size);
|
||||
break;
|
||||
case DMA_TO_DEVICE:
|
||||
dev_dbg(dev,
|
||||
"%s: copy out unsafe %p to safe %p, size %d\n",
|
||||
__func__,buf->ptr, buf->safe, size);
|
||||
memcpy(buf->safe, buf->ptr, size);
|
||||
break;
|
||||
case DMA_BIDIRECTIONAL:
|
||||
BUG(); /* is this allowed? what does it mean? */
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
/*
|
||||
* No need to sync the safe buffer - it was allocated
|
||||
* via the coherent allocators.
|
||||
*/
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
free_safe_buffer(dev->archdata.dmabounce, buf);
|
||||
}
|
||||
}
|
||||
|
||||
@ -396,21 +329,29 @@ static int sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||
* substitute the safe buffer for the unsafe one.
|
||||
* (basically move the buffer from an unsafe area to a safe one)
|
||||
*/
|
||||
dma_addr_t
|
||||
dma_map_single(struct device *dev, void *ptr, size_t size,
|
||||
dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
dma_addr_t dma_addr;
|
||||
|
||||
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
|
||||
__func__, ptr, size, dir);
|
||||
|
||||
BUG_ON(dir == DMA_NONE);
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
dma_addr = map_single(dev, ptr, size, dir);
|
||||
|
||||
return dma_addr;
|
||||
return map_single(dev, ptr, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_map_single);
|
||||
|
||||
dma_addr_t dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
|
||||
__func__, page, offset, size, dir);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
return map_single(dev, page_address(page) + offset, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_map_page);
|
||||
|
||||
/*
|
||||
* see if a mapped address was really a "safe" buffer and if so, copy
|
||||
@ -419,126 +360,76 @@ dma_map_single(struct device *dev, void *ptr, size_t size,
|
||||
* should be)
|
||||
*/
|
||||
|
||||
void
|
||||
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
|
||||
__func__, (void *) dma_addr, size, dir);
|
||||
|
||||
BUG_ON(dir == DMA_NONE);
|
||||
|
||||
unmap_single(dev, dma_addr, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_unmap_single);
|
||||
|
||||
int
|
||||
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir)
|
||||
int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
|
||||
unsigned long off, size_t sz, enum dma_data_direction dir)
|
||||
{
|
||||
int i;
|
||||
struct safe_buffer *buf;
|
||||
|
||||
dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
|
||||
__func__, sg, nents, dir);
|
||||
dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
|
||||
__func__, addr, off, sz, dir);
|
||||
|
||||
BUG_ON(dir == DMA_NONE);
|
||||
buf = find_safe_buffer_dev(dev, addr, __func__);
|
||||
if (!buf)
|
||||
return 1;
|
||||
|
||||
for (i = 0; i < nents; i++, sg++) {
|
||||
struct page *page = sg_page(sg);
|
||||
unsigned int offset = sg->offset;
|
||||
unsigned int length = sg->length;
|
||||
void *ptr = page_address(page) + offset;
|
||||
BUG_ON(buf->direction != dir);
|
||||
|
||||
sg->dma_address =
|
||||
map_single(dev, ptr, length, dir);
|
||||
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
|
||||
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
|
||||
buf->safe, buf->safe_dma_addr);
|
||||
|
||||
DO_STATS(dev->archdata.dmabounce->bounce_count++);
|
||||
|
||||
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
|
||||
dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
|
||||
__func__, buf->safe + off, buf->ptr + off, sz);
|
||||
memcpy(buf->ptr + off, buf->safe + off, sz);
|
||||
}
|
||||
|
||||
return nents;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(dmabounce_sync_for_cpu);
|
||||
|
||||
void
|
||||
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir)
|
||||
int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
|
||||
unsigned long off, size_t sz, enum dma_data_direction dir)
|
||||
{
|
||||
int i;
|
||||
struct safe_buffer *buf;
|
||||
|
||||
dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
|
||||
__func__, sg, nents, dir);
|
||||
dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
|
||||
__func__, addr, off, sz, dir);
|
||||
|
||||
BUG_ON(dir == DMA_NONE);
|
||||
buf = find_safe_buffer_dev(dev, addr, __func__);
|
||||
if (!buf)
|
||||
return 1;
|
||||
|
||||
for (i = 0; i < nents; i++, sg++) {
|
||||
dma_addr_t dma_addr = sg->dma_address;
|
||||
unsigned int length = sg->length;
|
||||
BUG_ON(buf->direction != dir);
|
||||
|
||||
unmap_single(dev, dma_addr, length, dir);
|
||||
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
|
||||
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
|
||||
buf->safe, buf->safe_dma_addr);
|
||||
|
||||
DO_STATS(dev->archdata.dmabounce->bounce_count++);
|
||||
|
||||
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
|
||||
dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n",
|
||||
__func__,buf->ptr + off, buf->safe + off, sz);
|
||||
memcpy(buf->safe + off, buf->ptr + off, sz);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(dmabounce_sync_for_device);
|
||||
|
||||
void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_addr,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
dev_dbg(dev, "%s(dma=%#x,off=%#lx,size=%zx,dir=%x)\n",
|
||||
__func__, dma_addr, offset, size, dir);
|
||||
|
||||
if (sync_single(dev, dma_addr, offset + size, dir))
|
||||
dma_cache_maint(dma_to_virt(dev, dma_addr) + offset, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
|
||||
|
||||
void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_addr,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
dev_dbg(dev, "%s(dma=%#x,off=%#lx,size=%zx,dir=%x)\n",
|
||||
__func__, dma_addr, offset, size, dir);
|
||||
|
||||
if (sync_single(dev, dma_addr, offset + size, dir))
|
||||
dma_cache_maint(dma_to_virt(dev, dma_addr) + offset, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_sync_single_range_for_device);
|
||||
|
||||
void
|
||||
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
int i;
|
||||
|
||||
dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
|
||||
__func__, sg, nents, dir);
|
||||
|
||||
BUG_ON(dir == DMA_NONE);
|
||||
|
||||
for (i = 0; i < nents; i++, sg++) {
|
||||
dma_addr_t dma_addr = sg->dma_address;
|
||||
unsigned int length = sg->length;
|
||||
|
||||
sync_single(dev, dma_addr, length, dir);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
int i;
|
||||
|
||||
dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
|
||||
__func__, sg, nents, dir);
|
||||
|
||||
BUG_ON(dir == DMA_NONE);
|
||||
|
||||
for (i = 0; i < nents; i++, sg++) {
|
||||
dma_addr_t dma_addr = sg->dma_address;
|
||||
unsigned int length = sg->length;
|
||||
|
||||
sync_single(dev, dma_addr, length, dir);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char *name,
|
||||
unsigned long size)
|
||||
static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
|
||||
const char *name, unsigned long size)
|
||||
{
|
||||
pool->size = size;
|
||||
DO_STATS(pool->allocs = 0);
|
||||
@ -549,9 +440,8 @@ dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char
|
||||
return pool->pool ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
int
|
||||
dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
|
||||
unsigned long large_buffer_size)
|
||||
int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
|
||||
unsigned long large_buffer_size)
|
||||
{
|
||||
struct dmabounce_device_info *device_info;
|
||||
int ret;
|
||||
@ -607,9 +497,9 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
|
||||
kfree(device_info);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(dmabounce_register_dev);
|
||||
|
||||
void
|
||||
dmabounce_unregister_dev(struct device *dev)
|
||||
void dmabounce_unregister_dev(struct device *dev)
|
||||
{
|
||||
struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
|
||||
|
||||
@ -642,15 +532,6 @@ dmabounce_unregister_dev(struct device *dev)
|
||||
|
||||
dev_info(dev, "dmabounce: device unregistered\n");
|
||||
}
|
||||
|
||||
|
||||
EXPORT_SYMBOL(dma_map_single);
|
||||
EXPORT_SYMBOL(dma_unmap_single);
|
||||
EXPORT_SYMBOL(dma_map_sg);
|
||||
EXPORT_SYMBOL(dma_unmap_sg);
|
||||
EXPORT_SYMBOL(dma_sync_sg_for_cpu);
|
||||
EXPORT_SYMBOL(dma_sync_sg_for_device);
|
||||
EXPORT_SYMBOL(dmabounce_register_dev);
|
||||
EXPORT_SYMBOL(dmabounce_unregister_dev);
|
||||
|
||||
MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
|
||||
|
@ -27,9 +27,9 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/irq.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/mach/irq.h>
|
||||
#include <asm/hardware/gic.h>
|
||||
|
||||
|
@ -24,9 +24,9 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <mach/hardware.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mach/irq.h>
|
||||
|
||||
|
@ -25,10 +25,10 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <mach/hardware.h>
|
||||
#include <asm/mach-types.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mach/irq.h>
|
||||
#include <asm/sizes.h>
|
||||
|
@ -15,7 +15,7 @@
|
||||
#include <linux/string.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <asm/io.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/gpio.h>
|
||||
#include <asm/hardware/scoop.h>
|
||||
|
||||
|
@ -17,9 +17,9 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <mach/hardware.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/hardware/ioc.h>
|
||||
|
||||
#include <asm/mach/time.h>
|
||||
|
@ -16,9 +16,9 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/io.h>
|
||||
#include <mach/hardware.h>
|
||||
#include <asm/hardware/uengine.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#if defined(CONFIG_ARCH_IXP2000)
|
||||
#define IXP_UENGINE_CSR_VIRT_BASE IXP2000_UENGINE_CSR_VIRT_BASE
|
||||
|
@ -4,8 +4,8 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
#include <asm/mach/pci.h>
|
||||
|
@ -20,8 +20,8 @@
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/mach/irq.h>
|
||||
#include <asm/hardware/vic.h>
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -12,7 +12,7 @@ extern void __bug(const char *file, int line) __attribute__((noreturn));
|
||||
#else
|
||||
|
||||
/* this just causes an oops */
|
||||
#define BUG() (*(int *)0 = 0)
|
||||
#define BUG() do { *(int *)0 = 0; } while (1)
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -444,94 +444,4 @@ static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt,
|
||||
dmac_inv_range(start, start + size);
|
||||
}
|
||||
|
||||
#define __cacheid_present(val) (val != read_cpuid(CPUID_ID))
|
||||
#define __cacheid_type_v7(val) ((val & (7 << 29)) == (4 << 29))
|
||||
|
||||
#define __cacheid_vivt_prev7(val) ((val & (15 << 25)) != (14 << 25))
|
||||
#define __cacheid_vipt_prev7(val) ((val & (15 << 25)) == (14 << 25))
|
||||
#define __cacheid_vipt_nonaliasing_prev7(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25))
|
||||
#define __cacheid_vipt_aliasing_prev7(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23))
|
||||
|
||||
#define __cacheid_vivt(val) (__cacheid_type_v7(val) ? 0 : __cacheid_vivt_prev7(val))
|
||||
#define __cacheid_vipt(val) (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_prev7(val))
|
||||
#define __cacheid_vipt_nonaliasing(val) (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_nonaliasing_prev7(val))
|
||||
#define __cacheid_vipt_aliasing(val) (__cacheid_type_v7(val) ? 0 : __cacheid_vipt_aliasing_prev7(val))
|
||||
#define __cacheid_vivt_asid_tagged_instr(val) (__cacheid_type_v7(val) ? ((val & (3 << 14)) == (1 << 14)) : 0)
|
||||
|
||||
#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT)
|
||||
/*
|
||||
* VIVT caches only
|
||||
*/
|
||||
#define cache_is_vivt() 1
|
||||
#define cache_is_vipt() 0
|
||||
#define cache_is_vipt_nonaliasing() 0
|
||||
#define cache_is_vipt_aliasing() 0
|
||||
#define icache_is_vivt_asid_tagged() 0
|
||||
|
||||
#elif !defined(CONFIG_CPU_CACHE_VIVT) && defined(CONFIG_CPU_CACHE_VIPT)
|
||||
/*
|
||||
* VIPT caches only
|
||||
*/
|
||||
#define cache_is_vivt() 0
|
||||
#define cache_is_vipt() 1
|
||||
#define cache_is_vipt_nonaliasing() \
|
||||
({ \
|
||||
unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
|
||||
__cacheid_vipt_nonaliasing(__val); \
|
||||
})
|
||||
|
||||
#define cache_is_vipt_aliasing() \
|
||||
({ \
|
||||
unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
|
||||
__cacheid_vipt_aliasing(__val); \
|
||||
})
|
||||
|
||||
#define icache_is_vivt_asid_tagged() \
|
||||
({ \
|
||||
unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
|
||||
__cacheid_vivt_asid_tagged_instr(__val); \
|
||||
})
|
||||
|
||||
#else
|
||||
/*
|
||||
* VIVT or VIPT caches. Note that this is unreliable since ARM926
|
||||
* and V6 CPUs satisfy the "(val & (15 << 25)) == (14 << 25)" test.
|
||||
* There's no way to tell from the CacheType register what type (!)
|
||||
* the cache is.
|
||||
*/
|
||||
#define cache_is_vivt() \
|
||||
({ \
|
||||
unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
|
||||
(!__cacheid_present(__val)) || __cacheid_vivt(__val); \
|
||||
})
|
||||
|
||||
#define cache_is_vipt() \
|
||||
({ \
|
||||
unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
|
||||
__cacheid_present(__val) && __cacheid_vipt(__val); \
|
||||
})
|
||||
|
||||
#define cache_is_vipt_nonaliasing() \
|
||||
({ \
|
||||
unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
|
||||
__cacheid_present(__val) && \
|
||||
__cacheid_vipt_nonaliasing(__val); \
|
||||
})
|
||||
|
||||
#define cache_is_vipt_aliasing() \
|
||||
({ \
|
||||
unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
|
||||
__cacheid_present(__val) && \
|
||||
__cacheid_vipt_aliasing(__val); \
|
||||
})
|
||||
|
||||
#define icache_is_vivt_asid_tagged() \
|
||||
({ \
|
||||
unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
|
||||
__cacheid_present(__val) && \
|
||||
__cacheid_vivt_asid_tagged_instr(__val); \
|
||||
})
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
52
arch/arm/include/asm/cachetype.h
Normal file
52
arch/arm/include/asm/cachetype.h
Normal file
@ -0,0 +1,52 @@
|
||||
#ifndef __ASM_ARM_CACHETYPE_H
|
||||
#define __ASM_ARM_CACHETYPE_H
|
||||
|
||||
#define CACHEID_VIVT (1 << 0)
|
||||
#define CACHEID_VIPT_NONALIASING (1 << 1)
|
||||
#define CACHEID_VIPT_ALIASING (1 << 2)
|
||||
#define CACHEID_VIPT (CACHEID_VIPT_ALIASING|CACHEID_VIPT_NONALIASING)
|
||||
#define CACHEID_ASID_TAGGED (1 << 3)
|
||||
|
||||
extern unsigned int cacheid;
|
||||
|
||||
#define cache_is_vivt() cacheid_is(CACHEID_VIVT)
|
||||
#define cache_is_vipt() cacheid_is(CACHEID_VIPT)
|
||||
#define cache_is_vipt_nonaliasing() cacheid_is(CACHEID_VIPT_NONALIASING)
|
||||
#define cache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_ALIASING)
|
||||
#define icache_is_vivt_asid_tagged() cacheid_is(CACHEID_ASID_TAGGED)
|
||||
|
||||
/*
|
||||
* __LINUX_ARM_ARCH__ is the minimum supported CPU architecture
|
||||
* Mask out support which will never be present on newer CPUs.
|
||||
* - v6+ is never VIVT
|
||||
* - v7+ VIPT never aliases
|
||||
*/
|
||||
#if __LINUX_ARM_ARCH__ >= 7
|
||||
#define __CACHEID_ARCH_MIN (CACHEID_VIPT_NONALIASING | CACHEID_ASID_TAGGED)
|
||||
#elif __LINUX_ARM_ARCH__ >= 6
|
||||
#define __CACHEID_ARCH_MIN (~CACHEID_VIVT)
|
||||
#else
|
||||
#define __CACHEID_ARCH_MIN (~0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Mask out support which isn't configured
|
||||
*/
|
||||
#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT)
|
||||
#define __CACHEID_ALWAYS (CACHEID_VIVT)
|
||||
#define __CACHEID_NEVER (~CACHEID_VIVT)
|
||||
#elif !defined(CONFIG_CPU_CACHE_VIVT) && defined(CONFIG_CPU_CACHE_VIPT)
|
||||
#define __CACHEID_ALWAYS (0)
|
||||
#define __CACHEID_NEVER (CACHEID_VIVT)
|
||||
#else
|
||||
#define __CACHEID_ALWAYS (0)
|
||||
#define __CACHEID_NEVER (0)
|
||||
#endif
|
||||
|
||||
static inline unsigned int __attribute__((pure)) cacheid_is(unsigned int mask)
|
||||
{
|
||||
return (__CACHEID_ALWAYS & mask) |
|
||||
(~__CACHEID_NEVER & __CACHEID_ARCH_MIN & mask & cacheid);
|
||||
}
|
||||
|
||||
#endif
|
64
arch/arm/include/asm/cputype.h
Normal file
64
arch/arm/include/asm/cputype.h
Normal file
@ -0,0 +1,64 @@
|
||||
#ifndef __ASM_ARM_CPUTYPE_H
|
||||
#define __ASM_ARM_CPUTYPE_H
|
||||
|
||||
#include <linux/stringify.h>
|
||||
|
||||
#define CPUID_ID 0
|
||||
#define CPUID_CACHETYPE 1
|
||||
#define CPUID_TCM 2
|
||||
#define CPUID_TLBTYPE 3
|
||||
|
||||
#ifdef CONFIG_CPU_CP15
|
||||
#define read_cpuid(reg) \
|
||||
({ \
|
||||
unsigned int __val; \
|
||||
asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \
|
||||
: "=r" (__val) \
|
||||
: \
|
||||
: "cc"); \
|
||||
__val; \
|
||||
})
|
||||
#else
|
||||
extern unsigned int processor_id;
|
||||
#define read_cpuid(reg) (processor_id)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The CPU ID never changes at run time, so we might as well tell the
|
||||
* compiler that it's constant. Use this function to read the CPU ID
|
||||
* rather than directly reading processor_id or read_cpuid() directly.
|
||||
*/
|
||||
static inline unsigned int __attribute_const__ read_cpuid_id(void)
|
||||
{
|
||||
return read_cpuid(CPUID_ID);
|
||||
}
|
||||
|
||||
static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
|
||||
{
|
||||
return read_cpuid(CPUID_CACHETYPE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Intel's XScale3 core supports some v6 features (supersections, L2)
|
||||
* but advertises itself as v5 as it does not support the v6 ISA. For
|
||||
* this reason, we need a way to explicitly test for this type of CPU.
|
||||
*/
|
||||
#ifndef CONFIG_CPU_XSC3
|
||||
#define cpu_is_xsc3() 0
|
||||
#else
|
||||
static inline int cpu_is_xsc3(void)
|
||||
{
|
||||
if ((read_cpuid_id() & 0xffffe000) == 0x69056000)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3)
|
||||
#define cpu_is_xscale() 0
|
||||
#else
|
||||
#define cpu_is_xscale() 1
|
||||
#endif
|
||||
|
||||
#endif
|
@ -104,15 +104,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
* Dummy noncoherent implementation. We don't provide a dma_cache_sync
|
||||
* function so drivers using this API are highlighted with build warnings.
|
||||
*/
|
||||
static inline void *
|
||||
dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
|
||||
static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
|
||||
dma_addr_t *handle, gfp_t gfp)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t handle)
|
||||
static inline void dma_free_noncoherent(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t handle)
|
||||
{
|
||||
}
|
||||
|
||||
@ -127,8 +126,7 @@ dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr,
|
||||
* return the CPU-viewed address, and sets @handle to be the
|
||||
* device-viewed address.
|
||||
*/
|
||||
extern void *
|
||||
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp);
|
||||
extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
|
||||
|
||||
/**
|
||||
* dma_free_coherent - free memory allocated by dma_alloc_coherent
|
||||
@ -143,9 +141,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gf
|
||||
* References to memory and mappings associated with cpu_addr/handle
|
||||
* during and after this call executing are illegal.
|
||||
*/
|
||||
extern void
|
||||
dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t handle);
|
||||
extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
|
||||
|
||||
/**
|
||||
* dma_mmap_coherent - map a coherent DMA allocation into user space
|
||||
@ -159,8 +155,8 @@ dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
|
||||
* into user space. The coherent DMA buffer must not be freed by the
|
||||
* driver until the user space mapping has been released.
|
||||
*/
|
||||
int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t handle, size_t size);
|
||||
int dma_mmap_coherent(struct device *, struct vm_area_struct *,
|
||||
void *, dma_addr_t, size_t);
|
||||
|
||||
|
||||
/**
|
||||
@ -174,282 +170,16 @@ int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
|
||||
* return the CPU-viewed address, and sets @handle to be the
|
||||
* device-viewed address.
|
||||
*/
|
||||
extern void *
|
||||
dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp);
|
||||
extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
|
||||
gfp_t);
|
||||
|
||||
#define dma_free_writecombine(dev,size,cpu_addr,handle) \
|
||||
dma_free_coherent(dev,size,cpu_addr,handle)
|
||||
|
||||
int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t handle, size_t size);
|
||||
int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
|
||||
void *, dma_addr_t, size_t);
|
||||
|
||||
|
||||
/**
|
||||
* dma_map_single - map a single buffer for streaming DMA
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @cpu_addr: CPU direct mapped address of buffer
|
||||
* @size: size of buffer to map
|
||||
* @dir: DMA transfer direction
|
||||
*
|
||||
* Ensure that any data held in the cache is appropriately discarded
|
||||
* or written back.
|
||||
*
|
||||
* The device owns this memory once this call has completed. The CPU
|
||||
* can regain ownership by calling dma_unmap_single() or
|
||||
* dma_sync_single_for_cpu().
|
||||
*/
|
||||
#ifndef CONFIG_DMABOUNCE
|
||||
static inline dma_addr_t
|
||||
dma_map_single(struct device *dev, void *cpu_addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
if (!arch_is_coherent())
|
||||
dma_cache_maint(cpu_addr, size, dir);
|
||||
|
||||
return virt_to_dma(dev, cpu_addr);
|
||||
}
|
||||
#else
|
||||
extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* dma_map_page - map a portion of a page for streaming DMA
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @page: page that buffer resides in
|
||||
* @offset: offset into page for start of buffer
|
||||
* @size: size of buffer to map
|
||||
* @dir: DMA transfer direction
|
||||
*
|
||||
* Ensure that any data held in the cache is appropriately discarded
|
||||
* or written back.
|
||||
*
|
||||
* The device owns this memory once this call has completed. The CPU
|
||||
* can regain ownership by calling dma_unmap_page() or
|
||||
* dma_sync_single_for_cpu().
|
||||
*/
|
||||
static inline dma_addr_t
|
||||
dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
return dma_map_single(dev, page_address(page) + offset, size, dir);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_unmap_single - unmap a single buffer previously mapped
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @handle: DMA address of buffer
|
||||
* @size: size of buffer to map
|
||||
* @dir: DMA transfer direction
|
||||
*
|
||||
* Unmap a single streaming mode DMA translation. The handle and size
|
||||
* must match what was provided in the previous dma_map_single() call.
|
||||
* All other usages are undefined.
|
||||
*
|
||||
* After this call, reads by the CPU to the buffer are guaranteed to see
|
||||
* whatever the device wrote there.
|
||||
*/
|
||||
#ifndef CONFIG_DMABOUNCE
|
||||
static inline void
|
||||
dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
/* nothing to do */
|
||||
}
|
||||
#else
|
||||
extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @handle: DMA address of buffer
|
||||
* @size: size of buffer to map
|
||||
* @dir: DMA transfer direction
|
||||
*
|
||||
* Unmap a single streaming mode DMA translation. The handle and size
|
||||
* must match what was provided in the previous dma_map_single() call.
|
||||
* All other usages are undefined.
|
||||
*
|
||||
* After this call, reads by the CPU to the buffer are guaranteed to see
|
||||
* whatever the device wrote there.
|
||||
*/
|
||||
static inline void
|
||||
dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
dma_unmap_single(dev, handle, size, dir);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_map_sg - map a set of SG buffers for streaming mode DMA
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @sg: list of buffers
|
||||
* @nents: number of buffers to map
|
||||
* @dir: DMA transfer direction
|
||||
*
|
||||
* Map a set of buffers described by scatterlist in streaming
|
||||
* mode for DMA. This is the scatter-gather version of the
|
||||
* above dma_map_single interface. Here the scatter gather list
|
||||
* elements are each tagged with the appropriate dma address
|
||||
* and length. They are obtained via sg_dma_{address,length}(SG).
|
||||
*
|
||||
* NOTE: An implementation may be able to use a smaller number of
|
||||
* DMA address/length pairs than there are SG table elements.
|
||||
* (for example via virtual mapping capabilities)
|
||||
* The routine returns the number of addr/length pairs actually
|
||||
* used, at most nents.
|
||||
*
|
||||
* Device ownership issues as mentioned above for dma_map_single are
|
||||
* the same here.
|
||||
*/
|
||||
#ifndef CONFIG_DMABOUNCE
|
||||
static inline int
|
||||
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nents; i++, sg++) {
|
||||
char *virt;
|
||||
|
||||
sg->dma_address = page_to_dma(dev, sg_page(sg)) + sg->offset;
|
||||
virt = sg_virt(sg);
|
||||
|
||||
if (!arch_is_coherent())
|
||||
dma_cache_maint(virt, sg->length, dir);
|
||||
}
|
||||
|
||||
return nents;
|
||||
}
|
||||
#else
|
||||
extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @sg: list of buffers
|
||||
* @nents: number of buffers to map
|
||||
* @dir: DMA transfer direction
|
||||
*
|
||||
* Unmap a set of streaming mode DMA translations.
|
||||
* Again, CPU read rules concerning calls here are the same as for
|
||||
* dma_unmap_single() above.
|
||||
*/
|
||||
#ifndef CONFIG_DMABOUNCE
|
||||
static inline void
|
||||
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
|
||||
/* nothing to do */
|
||||
}
|
||||
#else
|
||||
extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction);
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
* dma_sync_single_range_for_cpu
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @handle: DMA address of buffer
|
||||
* @offset: offset of region to start sync
|
||||
* @size: size of region to sync
|
||||
* @dir: DMA transfer direction (same as passed to dma_map_single)
|
||||
*
|
||||
* Make physical memory consistent for a single streaming mode DMA
|
||||
* translation after a transfer.
|
||||
*
|
||||
* If you perform a dma_map_single() but wish to interrogate the
|
||||
* buffer using the cpu, yet do not wish to teardown the PCI dma
|
||||
* mapping, you must call this function before doing so. At the
|
||||
* next point you give the PCI dma address back to the card, you
|
||||
* must first the perform a dma_sync_for_device, and then the
|
||||
* device again owns the buffer.
|
||||
*/
|
||||
#ifndef CONFIG_DMABOUNCE
|
||||
static inline void
|
||||
dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
if (!arch_is_coherent())
|
||||
dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
if (!arch_is_coherent())
|
||||
dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
|
||||
}
|
||||
#else
|
||||
extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction);
|
||||
extern void dma_sync_single_range_for_device(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction);
|
||||
#endif
|
||||
|
||||
static inline void
|
||||
dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
dma_sync_single_range_for_device(dev, handle, 0, size, dir);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* dma_sync_sg_for_cpu
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @sg: list of buffers
|
||||
* @nents: number of buffers to map
|
||||
* @dir: DMA transfer direction
|
||||
*
|
||||
* Make physical memory consistent for a set of streaming
|
||||
* mode DMA translations after a transfer.
|
||||
*
|
||||
* The same as dma_sync_single_for_* but for a scatter-gather list,
|
||||
* same rules and usage.
|
||||
*/
|
||||
#ifndef CONFIG_DMABOUNCE
|
||||
static inline void
|
||||
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nents; i++, sg++) {
|
||||
char *virt = sg_virt(sg);
|
||||
if (!arch_is_coherent())
|
||||
dma_cache_maint(virt, sg->length, dir);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nents; i++, sg++) {
|
||||
char *virt = sg_virt(sg);
|
||||
if (!arch_is_coherent())
|
||||
dma_cache_maint(virt, sg->length, dir);
|
||||
}
|
||||
}
|
||||
#else
|
||||
extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum dma_data_direction);
|
||||
extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DMABOUNCE
|
||||
/*
|
||||
* For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
|
||||
@ -475,7 +205,8 @@ extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enu
|
||||
* appropriate DMA pools for the device.
|
||||
*
|
||||
*/
|
||||
extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long);
|
||||
extern int dmabounce_register_dev(struct device *, unsigned long,
|
||||
unsigned long);
|
||||
|
||||
/**
|
||||
* dmabounce_unregister_dev
|
||||
@ -506,7 +237,184 @@ extern void dmabounce_unregister_dev(struct device *);
|
||||
*
|
||||
*/
|
||||
extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
|
||||
|
||||
/*
|
||||
* The DMA API, implemented by dmabounce.c. See below for descriptions.
|
||||
*/
|
||||
extern dma_addr_t dma_map_single(struct device *, void *, size_t,
|
||||
enum dma_data_direction);
|
||||
extern dma_addr_t dma_map_page(struct device *, struct page *,
|
||||
unsigned long, size_t, enum dma_data_direction);
|
||||
extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
|
||||
enum dma_data_direction);
|
||||
|
||||
/*
|
||||
* Private functions
|
||||
*/
|
||||
int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
|
||||
size_t, enum dma_data_direction);
|
||||
int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
|
||||
size_t, enum dma_data_direction);
|
||||
#else
|
||||
#define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1)
|
||||
#define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1)
|
||||
|
||||
|
||||
/**
|
||||
* dma_map_single - map a single buffer for streaming DMA
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @cpu_addr: CPU direct mapped address of buffer
|
||||
* @size: size of buffer to map
|
||||
* @dir: DMA transfer direction
|
||||
*
|
||||
* Ensure that any data held in the cache is appropriately discarded
|
||||
* or written back.
|
||||
*
|
||||
* The device owns this memory once this call has completed. The CPU
|
||||
* can regain ownership by calling dma_unmap_single() or
|
||||
* dma_sync_single_for_cpu().
|
||||
*/
|
||||
static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
if (!arch_is_coherent())
|
||||
dma_cache_maint(cpu_addr, size, dir);
|
||||
|
||||
return virt_to_dma(dev, cpu_addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_map_page - map a portion of a page for streaming DMA
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @page: page that buffer resides in
|
||||
* @offset: offset into page for start of buffer
|
||||
* @size: size of buffer to map
|
||||
* @dir: DMA transfer direction
|
||||
*
|
||||
* Ensure that any data held in the cache is appropriately discarded
|
||||
* or written back.
|
||||
*
|
||||
* The device owns this memory once this call has completed. The CPU
|
||||
* can regain ownership by calling dma_unmap_page().
|
||||
*/
|
||||
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
if (!arch_is_coherent())
|
||||
dma_cache_maint(page_address(page) + offset, size, dir);
|
||||
|
||||
return page_to_dma(dev, page) + offset;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_unmap_single - unmap a single buffer previously mapped
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @handle: DMA address of buffer
|
||||
* @size: size of buffer (same as passed to dma_map_single)
|
||||
* @dir: DMA transfer direction (same as passed to dma_map_single)
|
||||
*
|
||||
* Unmap a single streaming mode DMA translation. The handle and size
|
||||
* must match what was provided in the previous dma_map_single() call.
|
||||
* All other usages are undefined.
|
||||
*
|
||||
* After this call, reads by the CPU to the buffer are guaranteed to see
|
||||
* whatever the device wrote there.
|
||||
*/
|
||||
static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
/* nothing to do */
|
||||
}
|
||||
#endif /* CONFIG_DMABOUNCE */
|
||||
|
||||
/**
|
||||
* dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @handle: DMA address of buffer
|
||||
* @size: size of buffer (same as passed to dma_map_page)
|
||||
* @dir: DMA transfer direction (same as passed to dma_map_page)
|
||||
*
|
||||
* Unmap a page streaming mode DMA translation. The handle and size
|
||||
* must match what was provided in the previous dma_map_page() call.
|
||||
* All other usages are undefined.
|
||||
*
|
||||
* After this call, reads by the CPU to the buffer are guaranteed to see
|
||||
* whatever the device wrote there.
|
||||
*/
|
||||
static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
dma_unmap_single(dev, handle, size, dir);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_sync_single_range_for_cpu
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @handle: DMA address of buffer
|
||||
* @offset: offset of region to start sync
|
||||
* @size: size of region to sync
|
||||
* @dir: DMA transfer direction (same as passed to dma_map_single)
|
||||
*
|
||||
* Make physical memory consistent for a single streaming mode DMA
|
||||
* translation after a transfer.
|
||||
*
|
||||
* If you perform a dma_map_single() but wish to interrogate the
|
||||
* buffer using the cpu, yet do not wish to teardown the PCI dma
|
||||
* mapping, you must call this function before doing so. At the
|
||||
* next point you give the PCI dma address back to the card, you
|
||||
* must first the perform a dma_sync_for_device, and then the
|
||||
* device again owns the buffer.
|
||||
*/
|
||||
static inline void dma_sync_single_range_for_cpu(struct device *dev,
|
||||
dma_addr_t handle, unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
dmabounce_sync_for_cpu(dev, handle, offset, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_range_for_device(struct device *dev,
|
||||
dma_addr_t handle, unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
|
||||
return;
|
||||
|
||||
if (!arch_is_coherent())
|
||||
dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
dma_sync_single_range_for_device(dev, handle, 0, size, dir);
|
||||
}
|
||||
|
||||
/*
|
||||
* The scatter list versions of the above methods.
|
||||
*/
|
||||
extern int dma_map_sg(struct device *, struct scatterlist *, int,
|
||||
enum dma_data_direction);
|
||||
extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
|
||||
enum dma_data_direction);
|
||||
extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
|
||||
enum dma_data_direction);
|
||||
extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
|
||||
enum dma_data_direction);
|
||||
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif
|
||||
|
@ -1,6 +1,124 @@
|
||||
#ifndef _ASM_FUTEX_H
|
||||
#define _ASM_FUTEX_H
|
||||
#ifndef _ASM_ARM_FUTEX_H
|
||||
#define _ASM_ARM_FUTEX_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#include <asm-generic/futex.h>
|
||||
|
||||
#endif
|
||||
#else /* !SMP, we can work around lack of atomic ops by disabling preemption */
|
||||
|
||||
#include <linux/futex.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/errno.h>
|
||||
|
||||
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
|
||||
__asm__ __volatile__( \
|
||||
"1: ldrt %1, [%2]\n" \
|
||||
" " insn "\n" \
|
||||
"2: strt %0, [%2]\n" \
|
||||
" mov %0, #0\n" \
|
||||
"3:\n" \
|
||||
" .section __ex_table,\"a\"\n" \
|
||||
" .align 3\n" \
|
||||
" .long 1b, 4f, 2b, 4f\n" \
|
||||
" .previous\n" \
|
||||
" .section .fixup,\"ax\"\n" \
|
||||
"4: mov %0, %4\n" \
|
||||
" b 3b\n" \
|
||||
" .previous" \
|
||||
: "=&r" (ret), "=&r" (oldval) \
|
||||
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
|
||||
: "cc", "memory")
|
||||
|
||||
static inline int
|
||||
futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
||||
{
|
||||
int op = (encoded_op >> 28) & 7;
|
||||
int cmp = (encoded_op >> 24) & 15;
|
||||
int oparg = (encoded_op << 8) >> 20;
|
||||
int cmparg = (encoded_op << 20) >> 20;
|
||||
int oldval = 0, ret;
|
||||
|
||||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||
oparg = 1 << oparg;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
return -EFAULT;
|
||||
|
||||
pagefault_disable(); /* implies preempt_disable() */
|
||||
|
||||
switch (op) {
|
||||
case FUTEX_OP_SET:
|
||||
__futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg);
|
||||
break;
|
||||
case FUTEX_OP_ADD:
|
||||
__futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg);
|
||||
break;
|
||||
case FUTEX_OP_OR:
|
||||
__futex_atomic_op("orr %0, %1, %3", ret, oldval, uaddr, oparg);
|
||||
break;
|
||||
case FUTEX_OP_ANDN:
|
||||
__futex_atomic_op("and %0, %1, %3", ret, oldval, uaddr, ~oparg);
|
||||
break;
|
||||
case FUTEX_OP_XOR:
|
||||
__futex_atomic_op("eor %0, %1, %3", ret, oldval, uaddr, oparg);
|
||||
break;
|
||||
default:
|
||||
ret = -ENOSYS;
|
||||
}
|
||||
|
||||
pagefault_enable(); /* subsumes preempt_enable() */
|
||||
|
||||
if (!ret) {
|
||||
switch (cmp) {
|
||||
case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
|
||||
case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
|
||||
case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
|
||||
case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
|
||||
case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
|
||||
case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
|
||||
default: ret = -ENOSYS;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
|
||||
{
|
||||
int val;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
return -EFAULT;
|
||||
|
||||
pagefault_disable(); /* implies preempt_disable() */
|
||||
|
||||
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
|
||||
"1: ldrt %0, [%3]\n"
|
||||
" teq %0, %1\n"
|
||||
"2: streqt %2, [%3]\n"
|
||||
"3:\n"
|
||||
" .section __ex_table,\"a\"\n"
|
||||
" .align 3\n"
|
||||
" .long 1b, 4f, 2b, 4f\n"
|
||||
" .previous\n"
|
||||
" .section .fixup,\"ax\"\n"
|
||||
"4: mov %0, %4\n"
|
||||
" b 3b\n"
|
||||
" .previous"
|
||||
: "=&r" (val)
|
||||
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
|
||||
: "cc", "memory");
|
||||
|
||||
pagefault_enable(); /* subsumes preempt_enable() */
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
#endif /* !SMP */
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_ARM_FUTEX_H */
|
||||
|
@ -22,6 +22,10 @@
|
||||
#ifndef __ASSEMBLY__
|
||||
struct irqaction;
|
||||
extern void migrate_irqs(void);
|
||||
|
||||
extern void asm_do_IRQ(unsigned int, struct pt_regs *);
|
||||
void init_IRQ(void);
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -61,7 +61,6 @@ struct kprobe_ctlblk {
|
||||
void arch_remove_kprobe(struct kprobe *);
|
||||
void kretprobe_trampoline(void);
|
||||
|
||||
int kprobe_trap_handler(struct pt_regs *regs, unsigned int instr);
|
||||
int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr);
|
||||
int kprobe_exceptions_notify(struct notifier_block *self,
|
||||
unsigned long val, void *data);
|
||||
|
@ -4,8 +4,8 @@
|
||||
#ifndef _ASM_MC146818RTC_H
|
||||
#define _ASM_MC146818RTC_H
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <mach/irqs.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#ifndef RTC_PORT
|
||||
#define RTC_PORT(x) (0x70 + (x))
|
||||
|
@ -13,43 +13,33 @@
|
||||
#ifndef __ASM_ARM_MEMORY_H
|
||||
#define __ASM_ARM_MEMORY_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/const.h>
|
||||
#include <mach/memory.h>
|
||||
#include <asm/sizes.h>
|
||||
|
||||
/*
|
||||
* Allow for constants defined here to be used from assembly code
|
||||
* by prepending the UL suffix only with actual C code compilation.
|
||||
*/
|
||||
#ifndef __ASSEMBLY__
|
||||
#define UL(x) (x##UL)
|
||||
#else
|
||||
#define UL(x) (x)
|
||||
#endif
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <mach/memory.h>
|
||||
#include <asm/sizes.h>
|
||||
#define UL(x) _AC(x, UL)
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
|
||||
#ifndef TASK_SIZE
|
||||
/*
|
||||
* PAGE_OFFSET - the virtual address of the start of the kernel image
|
||||
* TASK_SIZE - the maximum size of a user space task.
|
||||
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
|
||||
*/
|
||||
#define TASK_SIZE UL(0xbf000000)
|
||||
#define TASK_UNMAPPED_BASE UL(0x40000000)
|
||||
#endif
|
||||
#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
|
||||
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000))
|
||||
#define TASK_UNMAPPED_BASE (UL(CONFIG_PAGE_OFFSET) / 3)
|
||||
|
||||
/*
|
||||
* The maximum size of a 26-bit user space task.
|
||||
*/
|
||||
#define TASK_SIZE_26 UL(0x04000000)
|
||||
|
||||
/*
|
||||
* Page offset: 3GB
|
||||
*/
|
||||
#ifndef PAGE_OFFSET
|
||||
#define PAGE_OFFSET UL(0xc0000000)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The module space lives between the addresses given by TASK_SIZE
|
||||
* and PAGE_OFFSET - it must be within 32MB of the kernel text.
|
||||
@ -147,16 +137,10 @@
|
||||
|
||||
#ifndef arch_adjust_zones
|
||||
#define arch_adjust_zones(node,size,holes) do { } while (0)
|
||||
#elif !defined(CONFIG_ZONE_DMA)
|
||||
#error "custom arch_adjust_zones() requires CONFIG_ZONE_DMA"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Amount of memory reserved for the vmalloc() area, and minimum
|
||||
* address for vmalloc mappings.
|
||||
*/
|
||||
extern unsigned long vmalloc_reserve;
|
||||
|
||||
#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve)
|
||||
|
||||
/*
|
||||
* PFNs are used to describe any physical page; this means
|
||||
* PFN 0 == physical address 0.
|
||||
|
@ -15,6 +15,7 @@
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cachetype.h>
|
||||
#include <asm/proc-fns.h>
|
||||
#include <asm-generic/mm_hooks.h>
|
||||
|
||||
|
@ -319,11 +319,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
|
||||
|
||||
#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
|
||||
|
||||
/*
|
||||
* Permanent address of a page. We never have highmem, so this is trivial.
|
||||
*/
|
||||
#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))
|
||||
|
||||
/*
|
||||
* Conversion functions: convert a page and protection to a page entry,
|
||||
* and a page entry and page directory to the page they refer to.
|
||||
|
@ -54,7 +54,6 @@
|
||||
#define PSR_C_BIT 0x20000000
|
||||
#define PSR_Z_BIT 0x40000000
|
||||
#define PSR_N_BIT 0x80000000
|
||||
#define PCMASK 0
|
||||
|
||||
/*
|
||||
* Groups of PSR bits
|
||||
@ -139,11 +138,7 @@ static inline int valid_user_regs(struct pt_regs *regs)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define pc_pointer(v) \
|
||||
((v) & ~PCMASK)
|
||||
|
||||
#define instruction_pointer(regs) \
|
||||
(pc_pointer((regs)->ARM_pc))
|
||||
#define instruction_pointer(regs) (regs)->ARM_pc
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern unsigned long profile_pc(struct pt_regs *regs);
|
||||
|
@ -209,6 +209,17 @@ struct meminfo {
|
||||
struct membank bank[NR_BANKS];
|
||||
};
|
||||
|
||||
#define for_each_nodebank(iter,mi,no) \
|
||||
for (iter = 0; iter < mi->nr_banks; iter++) \
|
||||
if (mi->bank[iter].node == no)
|
||||
|
||||
#define bank_pfn_start(bank) __phys_to_pfn((bank)->start)
|
||||
#define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size)
|
||||
#define bank_pfn_size(bank) ((bank)->size >> PAGE_SHIFT)
|
||||
#define bank_phys_start(bank) (bank)->start
|
||||
#define bank_phys_end(bank) ((bank)->start + (bank)->size)
|
||||
#define bank_phys_size(bank) (bank)->size
|
||||
|
||||
/*
|
||||
* Early command line parameters.
|
||||
*/
|
||||
|
@ -3,8 +3,22 @@
|
||||
|
||||
#include <asm/memory.h>
|
||||
|
||||
#define MAX_PHYSADDR_BITS 32
|
||||
#define MAX_PHYSMEM_BITS 32
|
||||
#define SECTION_SIZE_BITS NODE_MEM_SIZE_BITS
|
||||
/*
|
||||
* Two definitions are required for sparsemem:
|
||||
*
|
||||
* MAX_PHYSMEM_BITS: The number of physical address bits required
|
||||
* to address the last byte of memory.
|
||||
*
|
||||
* SECTION_SIZE_BITS: The number of physical address bits to cover
|
||||
* the maximum amount of memory in a section.
|
||||
*
|
||||
* Eg, if you have 2 banks of up to 64MB at 0x80000000, 0x84000000,
|
||||
* then MAX_PHYSMEM_BITS is 32, SECTION_SIZE_BITS is 26.
|
||||
*
|
||||
* Define these in your mach/memory.h.
|
||||
*/
|
||||
#if !defined(SECTION_SIZE_BITS) || !defined(MAX_PHYSMEM_BITS)
|
||||
#error Sparsemem is not supported on this platform
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -43,11 +43,6 @@
|
||||
#define CR_XP (1 << 23) /* Extended page tables */
|
||||
#define CR_VE (1 << 24) /* Vectored interrupts */
|
||||
|
||||
#define CPUID_ID 0
|
||||
#define CPUID_CACHETYPE 1
|
||||
#define CPUID_TCM 2
|
||||
#define CPUID_TLBTYPE 3
|
||||
|
||||
/*
|
||||
* This is used to ensure the compiler did actually allocate the register we
|
||||
* asked it for some inline assembly sequences. Apparently we can't trust
|
||||
@ -61,36 +56,8 @@
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/stringify.h>
|
||||
#include <linux/irqflags.h>
|
||||
|
||||
#ifdef CONFIG_CPU_CP15
|
||||
#define read_cpuid(reg) \
|
||||
({ \
|
||||
unsigned int __val; \
|
||||
asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \
|
||||
: "=r" (__val) \
|
||||
: \
|
||||
: "cc"); \
|
||||
__val; \
|
||||
})
|
||||
#else
|
||||
extern unsigned int processor_id;
|
||||
#define read_cpuid(reg) (processor_id)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The CPU ID never changes at run time, so we might as well tell the
|
||||
* compiler that it's constant. Use this function to read the CPU ID
|
||||
* rather than directly reading processor_id or read_cpuid() directly.
|
||||
*/
|
||||
static inline unsigned int read_cpuid_id(void) __attribute_const__;
|
||||
|
||||
static inline unsigned int read_cpuid_id(void)
|
||||
{
|
||||
return read_cpuid(CPUID_ID);
|
||||
}
|
||||
|
||||
#define __exception __attribute__((section(".exception.text")))
|
||||
|
||||
struct thread_info;
|
||||
@ -131,31 +98,6 @@ extern void cpu_init(void);
|
||||
void arm_machine_restart(char mode);
|
||||
extern void (*arm_pm_restart)(char str);
|
||||
|
||||
/*
|
||||
* Intel's XScale3 core supports some v6 features (supersections, L2)
|
||||
* but advertises itself as v5 as it does not support the v6 ISA. For
|
||||
* this reason, we need a way to explicitly test for this type of CPU.
|
||||
*/
|
||||
#ifndef CONFIG_CPU_XSC3
|
||||
#define cpu_is_xsc3() 0
|
||||
#else
|
||||
static inline int cpu_is_xsc3(void)
|
||||
{
|
||||
extern unsigned int processor_id;
|
||||
|
||||
if ((processor_id & 0xffffe000) == 0x69056000)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3)
|
||||
#define cpu_is_xscale() 0
|
||||
#else
|
||||
#define cpu_is_xscale() 1
|
||||
#endif
|
||||
|
||||
#define UDBG_UNDEFINED (1 << 0)
|
||||
#define UDBG_SYSCALL (1 << 1)
|
||||
#define UDBG_BADABORT (1 << 2)
|
||||
|
@ -98,7 +98,7 @@ static inline struct thread_info *current_thread_info(void)
|
||||
}
|
||||
|
||||
#define thread_saved_pc(tsk) \
|
||||
((unsigned long)(pc_pointer(task_thread_info(tsk)->cpu_context.pc)))
|
||||
((unsigned long)(task_thread_info(tsk)->cpu_context.pc))
|
||||
#define thread_saved_fp(tsk) \
|
||||
((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
|
||||
|
||||
|
@ -225,7 +225,7 @@ do { \
|
||||
|
||||
#define __get_user_asm_byte(x,addr,err) \
|
||||
__asm__ __volatile__( \
|
||||
"1: ldrbt %1,[%2],#0\n" \
|
||||
"1: ldrbt %1,[%2]\n" \
|
||||
"2:\n" \
|
||||
" .section .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
@ -261,7 +261,7 @@ do { \
|
||||
|
||||
#define __get_user_asm_word(x,addr,err) \
|
||||
__asm__ __volatile__( \
|
||||
"1: ldrt %1,[%2],#0\n" \
|
||||
"1: ldrt %1,[%2]\n" \
|
||||
"2:\n" \
|
||||
" .section .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
@ -306,7 +306,7 @@ do { \
|
||||
|
||||
#define __put_user_asm_byte(x,__pu_addr,err) \
|
||||
__asm__ __volatile__( \
|
||||
"1: strbt %1,[%2],#0\n" \
|
||||
"1: strbt %1,[%2]\n" \
|
||||
"2:\n" \
|
||||
" .section .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
@ -339,7 +339,7 @@ do { \
|
||||
|
||||
#define __put_user_asm_word(x,__pu_addr,err) \
|
||||
__asm__ __volatile__( \
|
||||
"1: strt %1,[%2],#0\n" \
|
||||
"1: strt %1,[%2]\n" \
|
||||
"2:\n" \
|
||||
" .section .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
@ -365,7 +365,7 @@ do { \
|
||||
#define __put_user_asm_dword(x,__pu_addr,err) \
|
||||
__asm__ __volatile__( \
|
||||
"1: strt " __reg_oper1 ", [%1], #4\n" \
|
||||
"2: strt " __reg_oper0 ", [%1], #0\n" \
|
||||
"2: strt " __reg_oper0 ", [%1]\n" \
|
||||
"3:\n" \
|
||||
" .section .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
|
@ -1,8 +1,8 @@
|
||||
#ifndef ASMARM_VGA_H
|
||||
#define ASMARM_VGA_H
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <mach/hardware.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#define VGA_MAP_MEM(x,s) (PCIMEM_BASE + (x))
|
||||
|
||||
|
@ -13,11 +13,11 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/in6.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/checksum.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/ftrace.h>
|
||||
|
||||
/*
|
||||
|
@ -10,8 +10,8 @@
|
||||
#include <linux/pci.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/mach-types.h>
|
||||
#include <asm/mach/pci.h>
|
||||
|
||||
|
@ -15,9 +15,9 @@
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/io.h>
|
||||
#include <mach/ep93xx-regs.h>
|
||||
#include <asm/thread_notify.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
struct crunch_state *crunch_owner;
|
||||
|
||||
|
@ -89,10 +89,12 @@
|
||||
ENTRY(printhex8)
|
||||
mov r1, #8
|
||||
b printhex
|
||||
ENDPROC(printhex8)
|
||||
|
||||
ENTRY(printhex4)
|
||||
mov r1, #4
|
||||
b printhex
|
||||
ENDPROC(printhex4)
|
||||
|
||||
ENTRY(printhex2)
|
||||
mov r1, #2
|
||||
@ -110,6 +112,7 @@ printhex: adr r2, hexbuf
|
||||
bne 1b
|
||||
mov r0, r2
|
||||
b printascii
|
||||
ENDPROC(printhex2)
|
||||
|
||||
.ltorg
|
||||
|
||||
@ -127,11 +130,13 @@ ENTRY(printascii)
|
||||
teqne r1, #0
|
||||
bne 1b
|
||||
mov pc, lr
|
||||
ENDPROC(printascii)
|
||||
|
||||
ENTRY(printch)
|
||||
addruart r3
|
||||
mov r1, r0
|
||||
mov r0, #0
|
||||
b 1b
|
||||
ENDPROC(printch)
|
||||
|
||||
hexbuf: .space 16
|
||||
|
@ -19,10 +19,9 @@
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/dma.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#include <asm/mach/dma.h>
|
||||
|
||||
#define ISA_DMA_MODE_READ 0x44
|
||||
|
@ -76,14 +76,17 @@
|
||||
__pabt_invalid:
|
||||
inv_entry BAD_PREFETCH
|
||||
b common_invalid
|
||||
ENDPROC(__pabt_invalid)
|
||||
|
||||
__dabt_invalid:
|
||||
inv_entry BAD_DATA
|
||||
b common_invalid
|
||||
ENDPROC(__dabt_invalid)
|
||||
|
||||
__irq_invalid:
|
||||
inv_entry BAD_IRQ
|
||||
b common_invalid
|
||||
ENDPROC(__irq_invalid)
|
||||
|
||||
__und_invalid:
|
||||
inv_entry BAD_UNDEFINSTR
|
||||
@ -107,6 +110,7 @@ common_invalid:
|
||||
|
||||
mov r0, sp
|
||||
b bad_mode
|
||||
ENDPROC(__und_invalid)
|
||||
|
||||
/*
|
||||
* SVC mode handlers
|
||||
@ -192,6 +196,7 @@ __dabt_svc:
|
||||
ldr r0, [sp, #S_PSR]
|
||||
msr spsr_cxsf, r0
|
||||
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
|
||||
ENDPROC(__dabt_svc)
|
||||
|
||||
.align 5
|
||||
__irq_svc:
|
||||
@ -223,6 +228,7 @@ __irq_svc:
|
||||
bleq trace_hardirqs_on
|
||||
#endif
|
||||
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
|
||||
ENDPROC(__irq_svc)
|
||||
|
||||
.ltorg
|
||||
|
||||
@ -272,6 +278,7 @@ __und_svc:
|
||||
ldr lr, [sp, #S_PSR] @ Get SVC cpsr
|
||||
msr spsr_cxsf, lr
|
||||
ldmia sp, {r0 - pc}^ @ Restore SVC registers
|
||||
ENDPROC(__und_svc)
|
||||
|
||||
.align 5
|
||||
__pabt_svc:
|
||||
@ -313,6 +320,7 @@ __pabt_svc:
|
||||
ldr r0, [sp, #S_PSR]
|
||||
msr spsr_cxsf, r0
|
||||
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
|
||||
ENDPROC(__pabt_svc)
|
||||
|
||||
.align 5
|
||||
.LCcralign:
|
||||
@ -412,6 +420,7 @@ __dabt_usr:
|
||||
mov r2, sp
|
||||
adr lr, ret_from_exception
|
||||
b do_DataAbort
|
||||
ENDPROC(__dabt_usr)
|
||||
|
||||
.align 5
|
||||
__irq_usr:
|
||||
@ -441,6 +450,7 @@ __irq_usr:
|
||||
|
||||
mov why, #0
|
||||
b ret_to_user
|
||||
ENDPROC(__irq_usr)
|
||||
|
||||
.ltorg
|
||||
|
||||
@ -474,6 +484,7 @@ __und_usr:
|
||||
#else
|
||||
b __und_usr_unknown
|
||||
#endif
|
||||
ENDPROC(__und_usr)
|
||||
|
||||
@
|
||||
@ fallthrough to call_fpe
|
||||
@ -642,6 +653,7 @@ __und_usr_unknown:
|
||||
mov r0, sp
|
||||
adr lr, ret_from_exception
|
||||
b do_undefinstr
|
||||
ENDPROC(__und_usr_unknown)
|
||||
|
||||
.align 5
|
||||
__pabt_usr:
|
||||
@ -666,6 +678,8 @@ ENTRY(ret_from_exception)
|
||||
get_thread_info tsk
|
||||
mov why, #0
|
||||
b ret_to_user
|
||||
ENDPROC(__pabt_usr)
|
||||
ENDPROC(ret_from_exception)
|
||||
|
||||
/*
|
||||
* Register switch for ARMv3 and ARMv4 processors
|
||||
@ -702,6 +716,7 @@ ENTRY(__switch_to)
|
||||
bl atomic_notifier_call_chain
|
||||
mov r0, r5
|
||||
ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
|
||||
ENDPROC(__switch_to)
|
||||
|
||||
__INIT
|
||||
|
||||
@ -1029,6 +1044,7 @@ vector_\name:
|
||||
mov r0, sp
|
||||
ldr lr, [pc, lr, lsl #2]
|
||||
movs pc, lr @ branch to handler in SVC mode
|
||||
ENDPROC(vector_\name)
|
||||
.endm
|
||||
|
||||
.globl __stubs_start
|
||||
|
@ -77,6 +77,7 @@ no_work_pending:
|
||||
mov r0, r0
|
||||
add sp, sp, #S_FRAME_SIZE - S_PC
|
||||
movs pc, lr @ return & move spsr_svc into cpsr
|
||||
ENDPROC(ret_to_user)
|
||||
|
||||
/*
|
||||
* This is how we return from a fork.
|
||||
@ -92,7 +93,7 @@ ENTRY(ret_from_fork)
|
||||
mov r0, #1 @ trace exit [IP = 1]
|
||||
bl syscall_trace
|
||||
b ret_slow_syscall
|
||||
|
||||
ENDPROC(ret_from_fork)
|
||||
|
||||
.equ NR_syscalls,0
|
||||
#define CALL(x) .equ NR_syscalls,NR_syscalls+1
|
||||
@ -269,6 +270,7 @@ ENTRY(vector_swi)
|
||||
eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
|
||||
bcs arm_syscall
|
||||
b sys_ni_syscall @ not private func
|
||||
ENDPROC(vector_swi)
|
||||
|
||||
/*
|
||||
* This is the really slow path. We're going to be doing
|
||||
@ -326,7 +328,6 @@ ENTRY(sys_call_table)
|
||||
*/
|
||||
@ r0 = syscall number
|
||||
@ r8 = syscall table
|
||||
.type sys_syscall, #function
|
||||
sys_syscall:
|
||||
bic scno, r0, #__NR_OABI_SYSCALL_BASE
|
||||
cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
|
||||
@ -338,53 +339,65 @@ sys_syscall:
|
||||
movlo r3, r4
|
||||
ldrlo pc, [tbl, scno, lsl #2]
|
||||
b sys_ni_syscall
|
||||
ENDPROC(sys_syscall)
|
||||
|
||||
sys_fork_wrapper:
|
||||
add r0, sp, #S_OFF
|
||||
b sys_fork
|
||||
ENDPROC(sys_fork_wrapper)
|
||||
|
||||
sys_vfork_wrapper:
|
||||
add r0, sp, #S_OFF
|
||||
b sys_vfork
|
||||
ENDPROC(sys_vfork_wrapper)
|
||||
|
||||
sys_execve_wrapper:
|
||||
add r3, sp, #S_OFF
|
||||
b sys_execve
|
||||
ENDPROC(sys_execve_wrapper)
|
||||
|
||||
sys_clone_wrapper:
|
||||
add ip, sp, #S_OFF
|
||||
str ip, [sp, #4]
|
||||
b sys_clone
|
||||
ENDPROC(sys_clone_wrapper)
|
||||
|
||||
sys_sigsuspend_wrapper:
|
||||
add r3, sp, #S_OFF
|
||||
b sys_sigsuspend
|
||||
ENDPROC(sys_sigsuspend_wrapper)
|
||||
|
||||
sys_rt_sigsuspend_wrapper:
|
||||
add r2, sp, #S_OFF
|
||||
b sys_rt_sigsuspend
|
||||
ENDPROC(sys_rt_sigsuspend_wrapper)
|
||||
|
||||
sys_sigreturn_wrapper:
|
||||
add r0, sp, #S_OFF
|
||||
b sys_sigreturn
|
||||
ENDPROC(sys_sigreturn_wrapper)
|
||||
|
||||
sys_rt_sigreturn_wrapper:
|
||||
add r0, sp, #S_OFF
|
||||
b sys_rt_sigreturn
|
||||
ENDPROC(sys_rt_sigreturn_wrapper)
|
||||
|
||||
sys_sigaltstack_wrapper:
|
||||
ldr r2, [sp, #S_OFF + S_SP]
|
||||
b do_sigaltstack
|
||||
ENDPROC(sys_sigaltstack_wrapper)
|
||||
|
||||
sys_statfs64_wrapper:
|
||||
teq r1, #88
|
||||
moveq r1, #84
|
||||
b sys_statfs64
|
||||
ENDPROC(sys_statfs64_wrapper)
|
||||
|
||||
sys_fstatfs64_wrapper:
|
||||
teq r1, #88
|
||||
moveq r1, #84
|
||||
b sys_fstatfs64
|
||||
ENDPROC(sys_fstatfs64_wrapper)
|
||||
|
||||
/*
|
||||
* Note: off_4k (r5) is always units of 4K. If we can't do the requested
|
||||
@ -402,11 +415,14 @@ sys_mmap2:
|
||||
str r5, [sp, #4]
|
||||
b do_mmap2
|
||||
#endif
|
||||
ENDPROC(sys_mmap2)
|
||||
|
||||
ENTRY(pabort_ifar)
|
||||
mrc p15, 0, r0, cr6, cr0, 2
|
||||
ENTRY(pabort_noifar)
|
||||
mov pc, lr
|
||||
ENDPROC(pabort_ifar)
|
||||
ENDPROC(pabort_noifar)
|
||||
|
||||
#ifdef CONFIG_OABI_COMPAT
|
||||
|
||||
@ -417,26 +433,31 @@ ENTRY(pabort_noifar)
|
||||
sys_oabi_pread64:
|
||||
stmia sp, {r3, r4}
|
||||
b sys_pread64
|
||||
ENDPROC(sys_oabi_pread64)
|
||||
|
||||
sys_oabi_pwrite64:
|
||||
stmia sp, {r3, r4}
|
||||
b sys_pwrite64
|
||||
ENDPROC(sys_oabi_pwrite64)
|
||||
|
||||
sys_oabi_truncate64:
|
||||
mov r3, r2
|
||||
mov r2, r1
|
||||
b sys_truncate64
|
||||
ENDPROC(sys_oabi_truncate64)
|
||||
|
||||
sys_oabi_ftruncate64:
|
||||
mov r3, r2
|
||||
mov r2, r1
|
||||
b sys_ftruncate64
|
||||
ENDPROC(sys_oabi_ftruncate64)
|
||||
|
||||
sys_oabi_readahead:
|
||||
str r3, [sp]
|
||||
mov r3, r2
|
||||
mov r2, r1
|
||||
b sys_readahead
|
||||
ENDPROC(sys_oabi_readahead)
|
||||
|
||||
/*
|
||||
* Let's declare a second syscall table for old ABI binaries
|
||||
|
@ -45,7 +45,6 @@
|
||||
#include <asm/fiq.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
static unsigned long no_fiq_insn;
|
||||
|
||||
|
@ -36,7 +36,6 @@ __switch_data:
|
||||
* r2 = atags pointer
|
||||
* r9 = processor ID
|
||||
*/
|
||||
.type __mmap_switched, %function
|
||||
__mmap_switched:
|
||||
adr r3, __switch_data + 4
|
||||
|
||||
@ -59,6 +58,7 @@ __mmap_switched:
|
||||
bic r4, r0, #CR_A @ Clear 'A' bit
|
||||
stmia r7, {r0, r4} @ Save control register values
|
||||
b start_kernel
|
||||
ENDPROC(__mmap_switched)
|
||||
|
||||
/*
|
||||
* Exception handling. Something went wrong and we can't proceed. We
|
||||
@ -69,8 +69,6 @@ __mmap_switched:
|
||||
* and hope for the best (useful if bootloader fails to pass a proper
|
||||
* machine ID for example).
|
||||
*/
|
||||
|
||||
.type __error_p, %function
|
||||
__error_p:
|
||||
#ifdef CONFIG_DEBUG_LL
|
||||
adr r0, str_p1
|
||||
@ -84,8 +82,8 @@ str_p1: .asciz "\nError: unrecognized/unsupported processor variant (0x"
|
||||
str_p2: .asciz ").\n"
|
||||
.align
|
||||
#endif
|
||||
ENDPROC(__error_p)
|
||||
|
||||
.type __error_a, %function
|
||||
__error_a:
|
||||
#ifdef CONFIG_DEBUG_LL
|
||||
mov r4, r1 @ preserve machine ID
|
||||
@ -115,13 +113,14 @@ __error_a:
|
||||
adr r0, str_a3
|
||||
bl printascii
|
||||
b __error
|
||||
ENDPROC(__error_a)
|
||||
|
||||
str_a1: .asciz "\nError: unrecognized/unsupported machine ID (r1 = 0x"
|
||||
str_a2: .asciz ").\n\nAvailable machine support:\n\nID (hex)\tNAME\n"
|
||||
str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n"
|
||||
.align
|
||||
#endif
|
||||
|
||||
.type __error, %function
|
||||
__error:
|
||||
#ifdef CONFIG_ARCH_RPC
|
||||
/*
|
||||
@ -138,6 +137,7 @@ __error:
|
||||
#endif
|
||||
1: mov r0, r0
|
||||
b 1b
|
||||
ENDPROC(__error)
|
||||
|
||||
|
||||
/*
|
||||
@ -153,7 +153,6 @@ __error:
|
||||
* r5 = proc_info pointer in physical address space
|
||||
* r9 = cpuid (preserved)
|
||||
*/
|
||||
.type __lookup_processor_type, %function
|
||||
__lookup_processor_type:
|
||||
adr r3, 3f
|
||||
ldmda r3, {r5 - r7}
|
||||
@ -169,6 +168,7 @@ __lookup_processor_type:
|
||||
blo 1b
|
||||
mov r5, #0 @ unknown processor
|
||||
2: mov pc, lr
|
||||
ENDPROC(__lookup_processor_type)
|
||||
|
||||
/*
|
||||
* This provides a C-API version of the above function.
|
||||
@ -179,6 +179,7 @@ ENTRY(lookup_processor_type)
|
||||
bl __lookup_processor_type
|
||||
mov r0, r5
|
||||
ldmfd sp!, {r4 - r7, r9, pc}
|
||||
ENDPROC(lookup_processor_type)
|
||||
|
||||
/*
|
||||
* Look in <asm/procinfo.h> and arch/arm/kernel/arch.[ch] for
|
||||
@ -201,7 +202,6 @@ ENTRY(lookup_processor_type)
|
||||
* r3, r4, r6 corrupted
|
||||
* r5 = mach_info pointer in physical address space
|
||||
*/
|
||||
.type __lookup_machine_type, %function
|
||||
__lookup_machine_type:
|
||||
adr r3, 3b
|
||||
ldmia r3, {r4, r5, r6}
|
||||
@ -216,6 +216,7 @@ __lookup_machine_type:
|
||||
blo 1b
|
||||
mov r5, #0 @ unknown machine
|
||||
2: mov pc, lr
|
||||
ENDPROC(__lookup_machine_type)
|
||||
|
||||
/*
|
||||
* This provides a C-API version of the above function.
|
||||
@ -226,6 +227,7 @@ ENTRY(lookup_machine_type)
|
||||
bl __lookup_machine_type
|
||||
mov r0, r5
|
||||
ldmfd sp!, {r4 - r6, pc}
|
||||
ENDPROC(lookup_machine_type)
|
||||
|
||||
/* Determine validity of the r2 atags pointer. The heuristic requires
|
||||
* that the pointer be aligned, in the first 16k of physical RAM and
|
||||
@ -239,8 +241,6 @@ ENTRY(lookup_machine_type)
|
||||
* r2 either valid atags pointer, or zero
|
||||
* r5, r6 corrupted
|
||||
*/
|
||||
|
||||
.type __vet_atags, %function
|
||||
__vet_atags:
|
||||
tst r2, #0x3 @ aligned?
|
||||
bne 1f
|
||||
@ -257,3 +257,4 @@ __vet_atags:
|
||||
|
||||
1: mov r2, #0
|
||||
mov pc, lr
|
||||
ENDPROC(__vet_atags)
|
||||
|
@ -33,7 +33,6 @@
|
||||
*
|
||||
*/
|
||||
.section ".text.head", "ax"
|
||||
.type stext, %function
|
||||
ENTRY(stext)
|
||||
msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode
|
||||
@ and irqs disabled
|
||||
@ -53,11 +52,11 @@ ENTRY(stext)
|
||||
@ the initialization is done
|
||||
adr lr, __after_proc_init @ return (PIC) address
|
||||
add pc, r10, #PROCINFO_INITFUNC
|
||||
ENDPROC(stext)
|
||||
|
||||
/*
|
||||
* Set the Control Register and Read the process ID.
|
||||
*/
|
||||
.type __after_proc_init, %function
|
||||
__after_proc_init:
|
||||
#ifdef CONFIG_CPU_CP15
|
||||
mrc p15, 0, r0, c1, c0, 0 @ read control reg
|
||||
@ -85,6 +84,7 @@ __after_proc_init:
|
||||
|
||||
mov pc, r13 @ clear the BSS and jump
|
||||
@ to start_kernel
|
||||
ENDPROC(__after_proc_init)
|
||||
.ltorg
|
||||
|
||||
#include "head-common.S"
|
||||
|
@ -75,7 +75,6 @@
|
||||
* circumstances, zImage) is for.
|
||||
*/
|
||||
.section ".text.head", "ax"
|
||||
.type stext, %function
|
||||
ENTRY(stext)
|
||||
msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode
|
||||
@ and irqs disabled
|
||||
@ -100,9 +99,9 @@ ENTRY(stext)
|
||||
@ mmu has been enabled
|
||||
adr lr, __enable_mmu @ return (PIC) address
|
||||
add pc, r10, #PROCINFO_INITFUNC
|
||||
ENDPROC(stext)
|
||||
|
||||
#if defined(CONFIG_SMP)
|
||||
.type secondary_startup, #function
|
||||
ENTRY(secondary_startup)
|
||||
/*
|
||||
* Common entry point for secondary CPUs.
|
||||
@ -128,6 +127,7 @@ ENTRY(secondary_startup)
|
||||
adr lr, __enable_mmu @ return address
|
||||
add pc, r10, #PROCINFO_INITFUNC @ initialise processor
|
||||
@ (return control reg)
|
||||
ENDPROC(secondary_startup)
|
||||
|
||||
/*
|
||||
* r6 = &secondary_data
|
||||
@ -136,6 +136,7 @@ ENTRY(__secondary_switched)
|
||||
ldr sp, [r7, #4] @ get secondary_data.stack
|
||||
mov fp, #0
|
||||
b secondary_start_kernel
|
||||
ENDPROC(__secondary_switched)
|
||||
|
||||
.type __secondary_data, %object
|
||||
__secondary_data:
|
||||
@ -151,7 +152,6 @@ __secondary_data:
|
||||
* this is just loading the page table pointer and domain access
|
||||
* registers.
|
||||
*/
|
||||
.type __enable_mmu, %function
|
||||
__enable_mmu:
|
||||
#ifdef CONFIG_ALIGNMENT_TRAP
|
||||
orr r0, r0, #CR_A
|
||||
@ -174,6 +174,7 @@ __enable_mmu:
|
||||
mcr p15, 0, r5, c3, c0, 0 @ load domain access register
|
||||
mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
|
||||
b __turn_mmu_on
|
||||
ENDPROC(__enable_mmu)
|
||||
|
||||
/*
|
||||
* Enable the MMU. This completely changes the structure of the visible
|
||||
@ -187,7 +188,6 @@ __enable_mmu:
|
||||
* other registers depend on the function called upon completion
|
||||
*/
|
||||
.align 5
|
||||
.type __turn_mmu_on, %function
|
||||
__turn_mmu_on:
|
||||
mov r0, r0
|
||||
mcr p15, 0, r0, c1, c0, 0 @ write control reg
|
||||
@ -195,7 +195,7 @@ __turn_mmu_on:
|
||||
mov r3, r3
|
||||
mov r3, r3
|
||||
mov pc, r13
|
||||
|
||||
ENDPROC(__turn_mmu_on)
|
||||
|
||||
|
||||
/*
|
||||
@ -211,7 +211,6 @@ __turn_mmu_on:
|
||||
* r0, r3, r6, r7 corrupted
|
||||
* r4 = physical page table address
|
||||
*/
|
||||
.type __create_page_tables, %function
|
||||
__create_page_tables:
|
||||
pgtbl r4 @ page table address
|
||||
|
||||
@ -325,6 +324,7 @@ __create_page_tables:
|
||||
#endif
|
||||
#endif
|
||||
mov pc, lr
|
||||
ENDPROC(__create_page_tables)
|
||||
.ltorg
|
||||
|
||||
#include "head-common.S"
|
||||
|
@ -8,8 +8,8 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/init_task.h>
|
||||
#include <linux/mqueue.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
static struct fs_struct init_fs = INIT_FS;
|
||||
|
@ -1,7 +1,6 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
/*
|
||||
* Copy data from IO memory space to "real" memory space.
|
||||
|
@ -488,7 +488,7 @@ static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs)
|
||||
|
||||
if (!ubit)
|
||||
addr -= reg_count;
|
||||
addr += (!pbit ^ !ubit);
|
||||
addr += (!pbit == !ubit);
|
||||
|
||||
reg_bit_vector = insn & 0xffff;
|
||||
while (reg_bit_vector) {
|
||||
@ -503,7 +503,7 @@ static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs)
|
||||
if (wbit) {
|
||||
if (!ubit)
|
||||
addr -= reg_count;
|
||||
addr -= (!pbit ^ !ubit);
|
||||
addr -= (!pbit == !ubit);
|
||||
regs->uregs[rn] = (long)addr;
|
||||
}
|
||||
}
|
||||
|
@ -200,9 +200,12 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
|
||||
}
|
||||
}
|
||||
|
||||
int kprobe_trap_handler(struct pt_regs *regs, unsigned int instr)
|
||||
static int __kprobes kprobe_trap_handler(struct pt_regs *regs, unsigned int instr)
|
||||
{
|
||||
unsigned long flags;
|
||||
local_irq_save(flags);
|
||||
kprobe_handler(regs);
|
||||
local_irq_restore(flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -6,10 +6,10 @@
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/mach-types.h>
|
||||
|
||||
|
@ -28,12 +28,12 @@
|
||||
#include <linux/pm.h>
|
||||
#include <linux/tick.h>
|
||||
#include <linux/utsname.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/leds.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/thread_notify.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/mach/time.h>
|
||||
|
||||
static const char *processor_modes[] = {
|
||||
@ -267,35 +267,6 @@ void show_regs(struct pt_regs * regs)
|
||||
__backtrace();
|
||||
}
|
||||
|
||||
void show_fpregs(struct user_fp *regs)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
unsigned long *p;
|
||||
char type;
|
||||
|
||||
p = (unsigned long *)(regs->fpregs + i);
|
||||
|
||||
switch (regs->ftype[i]) {
|
||||
case 1: type = 'f'; break;
|
||||
case 2: type = 'd'; break;
|
||||
case 3: type = 'e'; break;
|
||||
default: type = '?'; break;
|
||||
}
|
||||
if (regs->init_flag)
|
||||
type = '?';
|
||||
|
||||
printk(" f%d(%c): %08lx %08lx %08lx%c",
|
||||
i, type, p[0], p[1], p[2], i & 1 ? '\n' : ' ');
|
||||
}
|
||||
|
||||
|
||||
printk("FPSR: %08lx FPCR: %08lx\n",
|
||||
(unsigned long)regs->fpsr,
|
||||
(unsigned long)regs->fpcr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Free current thread data structures etc..
|
||||
*/
|
||||
@ -414,7 +385,7 @@ unsigned long get_wchan(struct task_struct *p)
|
||||
do {
|
||||
if (fp < stack_start || fp > stack_end)
|
||||
return 0;
|
||||
lr = pc_pointer (((unsigned long *)fp)[-1]);
|
||||
lr = ((unsigned long *)fp)[-1];
|
||||
if (!in_sched_functions(lr))
|
||||
return lr;
|
||||
fp = *(unsigned long *) (fp - 12);
|
||||
|
@ -18,8 +18,8 @@
|
||||
#include <linux/security.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/traps.h>
|
||||
@ -126,7 +126,7 @@ ptrace_getrn(struct task_struct *child, unsigned long insn)
|
||||
|
||||
val = get_user_reg(child, reg);
|
||||
if (reg == 15)
|
||||
val = pc_pointer(val + 8);
|
||||
val += 8;
|
||||
|
||||
return val;
|
||||
}
|
||||
@ -278,8 +278,7 @@ get_branch_address(struct task_struct *child, unsigned long pc, unsigned long in
|
||||
else
|
||||
base -= aluop2;
|
||||
}
|
||||
if (read_u32(child, base, &alt) == 0)
|
||||
alt = pc_pointer(alt);
|
||||
read_u32(child, base, &alt);
|
||||
}
|
||||
break;
|
||||
|
||||
@ -305,8 +304,7 @@ get_branch_address(struct task_struct *child, unsigned long pc, unsigned long in
|
||||
|
||||
base = ptrace_getrn(child, insn);
|
||||
|
||||
if (read_u32(child, base + nr_regs, &alt) == 0)
|
||||
alt = pc_pointer(alt);
|
||||
read_u32(child, base + nr_regs, &alt);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
@ -26,11 +26,13 @@
|
||||
#include <linux/fs.h>
|
||||
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/elf.h>
|
||||
#include <asm/procinfo.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/mach-types.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cachetype.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
#include <asm/mach/arch.h>
|
||||
@ -59,13 +61,14 @@ __setup("fpe=", fpe_setup);
|
||||
|
||||
extern void paging_init(struct meminfo *, struct machine_desc *desc);
|
||||
extern void reboot_setup(char *str);
|
||||
extern int root_mountflags;
|
||||
extern void _stext, _text, _etext, __data_start, _edata, _end;
|
||||
extern void _text, _etext, __data_start, _edata, _end;
|
||||
|
||||
unsigned int processor_id;
|
||||
EXPORT_SYMBOL(processor_id);
|
||||
unsigned int __machine_arch_type;
|
||||
EXPORT_SYMBOL(__machine_arch_type);
|
||||
unsigned int cacheid;
|
||||
EXPORT_SYMBOL(cacheid);
|
||||
|
||||
unsigned int __atags_pointer __initdata;
|
||||
|
||||
@ -81,8 +84,6 @@ EXPORT_SYMBOL(system_serial_high);
|
||||
unsigned int elf_hwcap;
|
||||
EXPORT_SYMBOL(elf_hwcap);
|
||||
|
||||
unsigned long __initdata vmalloc_reserve = 128 << 20;
|
||||
|
||||
|
||||
#ifdef MULTI_CPU
|
||||
struct processor processor;
|
||||
@ -111,9 +112,6 @@ static struct stack stacks[NR_CPUS];
|
||||
char elf_platform[ELF_PLATFORM_SIZE];
|
||||
EXPORT_SYMBOL(elf_platform);
|
||||
|
||||
unsigned long phys_initrd_start __initdata = 0;
|
||||
unsigned long phys_initrd_size __initdata = 0;
|
||||
|
||||
static struct meminfo meminfo __initdata = { 0, };
|
||||
static const char *cpu_name;
|
||||
static const char *machine_name;
|
||||
@ -178,63 +176,6 @@ static struct resource io_res[] = {
|
||||
#define lp1 io_res[1]
|
||||
#define lp2 io_res[2]
|
||||
|
||||
static const char *cache_types[16] = {
|
||||
"write-through",
|
||||
"write-back",
|
||||
"write-back",
|
||||
"undefined 3",
|
||||
"undefined 4",
|
||||
"undefined 5",
|
||||
"write-back",
|
||||
"write-back",
|
||||
"undefined 8",
|
||||
"undefined 9",
|
||||
"undefined 10",
|
||||
"undefined 11",
|
||||
"undefined 12",
|
||||
"undefined 13",
|
||||
"write-back",
|
||||
"undefined 15",
|
||||
};
|
||||
|
||||
static const char *cache_clean[16] = {
|
||||
"not required",
|
||||
"read-block",
|
||||
"cp15 c7 ops",
|
||||
"undefined 3",
|
||||
"undefined 4",
|
||||
"undefined 5",
|
||||
"cp15 c7 ops",
|
||||
"cp15 c7 ops",
|
||||
"undefined 8",
|
||||
"undefined 9",
|
||||
"undefined 10",
|
||||
"undefined 11",
|
||||
"undefined 12",
|
||||
"undefined 13",
|
||||
"cp15 c7 ops",
|
||||
"undefined 15",
|
||||
};
|
||||
|
||||
static const char *cache_lockdown[16] = {
|
||||
"not supported",
|
||||
"not supported",
|
||||
"not supported",
|
||||
"undefined 3",
|
||||
"undefined 4",
|
||||
"undefined 5",
|
||||
"format A",
|
||||
"format B",
|
||||
"undefined 8",
|
||||
"undefined 9",
|
||||
"undefined 10",
|
||||
"undefined 11",
|
||||
"undefined 12",
|
||||
"undefined 13",
|
||||
"format C",
|
||||
"undefined 15",
|
||||
};
|
||||
|
||||
static const char *proc_arch[] = {
|
||||
"undefined/unknown",
|
||||
"3",
|
||||
@ -255,61 +196,19 @@ static const char *proc_arch[] = {
|
||||
"?(17)",
|
||||
};
|
||||
|
||||
#define CACHE_TYPE(x) (((x) >> 25) & 15)
|
||||
#define CACHE_S(x) ((x) & (1 << 24))
|
||||
#define CACHE_DSIZE(x) (((x) >> 12) & 4095) /* only if S=1 */
|
||||
#define CACHE_ISIZE(x) ((x) & 4095)
|
||||
|
||||
#define CACHE_SIZE(y) (((y) >> 6) & 7)
|
||||
#define CACHE_ASSOC(y) (((y) >> 3) & 7)
|
||||
#define CACHE_M(y) ((y) & (1 << 2))
|
||||
#define CACHE_LINE(y) ((y) & 3)
|
||||
|
||||
static inline void dump_cache(const char *prefix, int cpu, unsigned int cache)
|
||||
{
|
||||
unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
|
||||
|
||||
printk("CPU%u: %s: %d bytes, associativity %d, %d byte lines, %d sets\n",
|
||||
cpu, prefix,
|
||||
mult << (8 + CACHE_SIZE(cache)),
|
||||
(mult << CACHE_ASSOC(cache)) >> 1,
|
||||
8 << CACHE_LINE(cache),
|
||||
1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
|
||||
CACHE_LINE(cache)));
|
||||
}
|
||||
|
||||
static void __init dump_cpu_info(int cpu)
|
||||
{
|
||||
unsigned int info = read_cpuid(CPUID_CACHETYPE);
|
||||
|
||||
if (info != processor_id) {
|
||||
printk("CPU%u: D %s %s cache\n", cpu, cache_is_vivt() ? "VIVT" : "VIPT",
|
||||
cache_types[CACHE_TYPE(info)]);
|
||||
if (CACHE_S(info)) {
|
||||
dump_cache("I cache", cpu, CACHE_ISIZE(info));
|
||||
dump_cache("D cache", cpu, CACHE_DSIZE(info));
|
||||
} else {
|
||||
dump_cache("cache", cpu, CACHE_ISIZE(info));
|
||||
}
|
||||
}
|
||||
|
||||
if (arch_is_coherent())
|
||||
printk("Cache coherency enabled\n");
|
||||
}
|
||||
|
||||
int cpu_architecture(void)
|
||||
{
|
||||
int cpu_arch;
|
||||
|
||||
if ((processor_id & 0x0008f000) == 0) {
|
||||
if ((read_cpuid_id() & 0x0008f000) == 0) {
|
||||
cpu_arch = CPU_ARCH_UNKNOWN;
|
||||
} else if ((processor_id & 0x0008f000) == 0x00007000) {
|
||||
cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
|
||||
} else if ((processor_id & 0x00080000) == 0x00000000) {
|
||||
cpu_arch = (processor_id >> 16) & 7;
|
||||
} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
|
||||
cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
|
||||
} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
|
||||
cpu_arch = (read_cpuid_id() >> 16) & 7;
|
||||
if (cpu_arch)
|
||||
cpu_arch += CPU_ARCH_ARMv3;
|
||||
} else if ((processor_id & 0x000f0000) == 0x000f0000) {
|
||||
} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
|
||||
unsigned int mmfr0;
|
||||
|
||||
/* Revised CPUID format. Read the Memory Model Feature
|
||||
@ -330,6 +229,34 @@ int cpu_architecture(void)
|
||||
return cpu_arch;
|
||||
}
|
||||
|
||||
static void __init cacheid_init(void)
|
||||
{
|
||||
unsigned int cachetype = read_cpuid_cachetype();
|
||||
unsigned int arch = cpu_architecture();
|
||||
|
||||
if (arch >= CPU_ARCH_ARMv7) {
|
||||
cacheid = CACHEID_VIPT_NONALIASING;
|
||||
if ((cachetype & (3 << 14)) == 1 << 14)
|
||||
cacheid |= CACHEID_ASID_TAGGED;
|
||||
} else if (arch >= CPU_ARCH_ARMv6) {
|
||||
if (cachetype & (1 << 23))
|
||||
cacheid = CACHEID_VIPT_ALIASING;
|
||||
else
|
||||
cacheid = CACHEID_VIPT_NONALIASING;
|
||||
} else {
|
||||
cacheid = CACHEID_VIVT;
|
||||
}
|
||||
|
||||
printk("CPU: %s data cache, %s instruction cache\n",
|
||||
cache_is_vivt() ? "VIVT" :
|
||||
cache_is_vipt_aliasing() ? "VIPT aliasing" :
|
||||
cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
|
||||
cache_is_vivt() ? "VIVT" :
|
||||
icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
|
||||
cache_is_vipt_aliasing() ? "VIPT aliasing" :
|
||||
cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
|
||||
}
|
||||
|
||||
/*
|
||||
* These functions re-use the assembly code in head.S, which
|
||||
* already provide the required functionality.
|
||||
@ -346,10 +273,10 @@ static void __init setup_processor(void)
|
||||
* types. The linker builds this table for us from the
|
||||
* entries in arch/arm/mm/proc-*.S
|
||||
*/
|
||||
list = lookup_processor_type(processor_id);
|
||||
list = lookup_processor_type(read_cpuid_id());
|
||||
if (!list) {
|
||||
printk("CPU configuration botched (ID %08x), unable "
|
||||
"to continue.\n", processor_id);
|
||||
"to continue.\n", read_cpuid_id());
|
||||
while (1);
|
||||
}
|
||||
|
||||
@ -369,7 +296,7 @@ static void __init setup_processor(void)
|
||||
#endif
|
||||
|
||||
printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
|
||||
cpu_name, processor_id, (int)processor_id & 15,
|
||||
cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
|
||||
proc_arch[cpu_architecture()], cr_alignment);
|
||||
|
||||
sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
|
||||
@ -379,14 +306,14 @@ static void __init setup_processor(void)
|
||||
elf_hwcap &= ~HWCAP_THUMB;
|
||||
#endif
|
||||
|
||||
cacheid_init();
|
||||
cpu_proc_init();
|
||||
}
|
||||
|
||||
/*
|
||||
* cpu_init - initialise one CPU.
|
||||
*
|
||||
* cpu_init dumps the cache information, initialises SMP specific
|
||||
* information, and sets up the per-CPU stacks.
|
||||
* cpu_init sets up the per-CPU stacks.
|
||||
*/
|
||||
void cpu_init(void)
|
||||
{
|
||||
@ -398,9 +325,6 @@ void cpu_init(void)
|
||||
BUG();
|
||||
}
|
||||
|
||||
if (system_state == SYSTEM_BOOTING)
|
||||
dump_cpu_info(cpu);
|
||||
|
||||
/*
|
||||
* setup stacks for re-entrant exception handlers
|
||||
*/
|
||||
@ -443,20 +367,6 @@ static struct machine_desc * __init setup_machine(unsigned int nr)
|
||||
return list;
|
||||
}
|
||||
|
||||
static void __init early_initrd(char **p)
|
||||
{
|
||||
unsigned long start, size;
|
||||
|
||||
start = memparse(*p, p);
|
||||
if (**p == ',') {
|
||||
size = memparse((*p) + 1, p);
|
||||
|
||||
phys_initrd_start = start;
|
||||
phys_initrd_size = size;
|
||||
}
|
||||
}
|
||||
__early_param("initrd=", early_initrd);
|
||||
|
||||
static void __init arm_add_memory(unsigned long start, unsigned long size)
|
||||
{
|
||||
struct membank *bank;
|
||||
@ -502,17 +412,6 @@ static void __init early_mem(char **p)
|
||||
}
|
||||
__early_param("mem=", early_mem);
|
||||
|
||||
/*
|
||||
* vmalloc=size forces the vmalloc area to be exactly 'size'
|
||||
* bytes. This can be used to increase (or decrease) the vmalloc
|
||||
* area - the default is 128m.
|
||||
*/
|
||||
static void __init early_vmalloc(char **arg)
|
||||
{
|
||||
vmalloc_reserve = memparse(*arg, arg);
|
||||
}
|
||||
__early_param("vmalloc=", early_vmalloc);
|
||||
|
||||
/*
|
||||
* Initial parsing of the command line.
|
||||
*/
|
||||
@ -527,12 +426,12 @@ static void __init parse_cmdline(char **cmdline_p, char *from)
|
||||
struct early_params *p;
|
||||
|
||||
for (p = &__early_begin; p < &__early_end; p++) {
|
||||
int len = strlen(p->arg);
|
||||
int arglen = strlen(p->arg);
|
||||
|
||||
if (memcmp(from, p->arg, len) == 0) {
|
||||
if (memcmp(from, p->arg, arglen) == 0) {
|
||||
if (to != command_line)
|
||||
to -= 1;
|
||||
from += len;
|
||||
from += arglen;
|
||||
p->fn(&from);
|
||||
|
||||
while (*from != ' ' && *from != '\0')
|
||||
@ -579,18 +478,13 @@ request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
|
||||
kernel_data.end = virt_to_phys(&_end - 1);
|
||||
|
||||
for (i = 0; i < mi->nr_banks; i++) {
|
||||
unsigned long virt_start, virt_end;
|
||||
|
||||
if (mi->bank[i].size == 0)
|
||||
continue;
|
||||
|
||||
virt_start = __phys_to_virt(mi->bank[i].start);
|
||||
virt_end = virt_start + mi->bank[i].size - 1;
|
||||
|
||||
res = alloc_bootmem_low(sizeof(*res));
|
||||
res->name = "System RAM";
|
||||
res->start = __virt_to_phys(virt_start);
|
||||
res->end = __virt_to_phys(virt_end);
|
||||
res->start = mi->bank[i].start;
|
||||
res->end = mi->bank[i].start + mi->bank[i].size - 1;
|
||||
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
||||
|
||||
request_resource(&iomem_resource, res);
|
||||
@ -694,26 +588,6 @@ static int __init parse_tag_ramdisk(const struct tag *tag)
|
||||
|
||||
__tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
|
||||
|
||||
static int __init parse_tag_initrd(const struct tag *tag)
|
||||
{
|
||||
printk(KERN_WARNING "ATAG_INITRD is deprecated; "
|
||||
"please update your bootloader.\n");
|
||||
phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
|
||||
phys_initrd_size = tag->u.initrd.size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
__tagtable(ATAG_INITRD, parse_tag_initrd);
|
||||
|
||||
static int __init parse_tag_initrd2(const struct tag *tag)
|
||||
{
|
||||
phys_initrd_start = tag->u.initrd.start;
|
||||
phys_initrd_size = tag->u.initrd.size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
__tagtable(ATAG_INITRD2, parse_tag_initrd2);
|
||||
|
||||
static int __init parse_tag_serialnr(const struct tag *tag)
|
||||
{
|
||||
system_serial_low = tag->u.serialnr.low;
|
||||
@ -901,28 +775,12 @@ static const char *hwcap_str[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
static void
|
||||
c_show_cache(struct seq_file *m, const char *type, unsigned int cache)
|
||||
{
|
||||
unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
|
||||
|
||||
seq_printf(m, "%s size\t\t: %d\n"
|
||||
"%s assoc\t\t: %d\n"
|
||||
"%s line length\t: %d\n"
|
||||
"%s sets\t\t: %d\n",
|
||||
type, mult << (8 + CACHE_SIZE(cache)),
|
||||
type, (mult << CACHE_ASSOC(cache)) >> 1,
|
||||
type, 8 << CACHE_LINE(cache),
|
||||
type, 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
|
||||
CACHE_LINE(cache)));
|
||||
}
|
||||
|
||||
static int c_show(struct seq_file *m, void *v)
|
||||
{
|
||||
int i;
|
||||
|
||||
seq_printf(m, "Processor\t: %s rev %d (%s)\n",
|
||||
cpu_name, (int)processor_id & 15, elf_platform);
|
||||
cpu_name, read_cpuid_id() & 15, elf_platform);
|
||||
|
||||
#if defined(CONFIG_SMP)
|
||||
for_each_online_cpu(i) {
|
||||
@ -949,47 +807,26 @@ static int c_show(struct seq_file *m, void *v)
|
||||
if (elf_hwcap & (1 << i))
|
||||
seq_printf(m, "%s ", hwcap_str[i]);
|
||||
|
||||
seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24);
|
||||
seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
|
||||
seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
|
||||
|
||||
if ((processor_id & 0x0008f000) == 0x00000000) {
|
||||
if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
|
||||
/* pre-ARM7 */
|
||||
seq_printf(m, "CPU part\t: %07x\n", processor_id >> 4);
|
||||
seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
|
||||
} else {
|
||||
if ((processor_id & 0x0008f000) == 0x00007000) {
|
||||
if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
|
||||
/* ARM7 */
|
||||
seq_printf(m, "CPU variant\t: 0x%02x\n",
|
||||
(processor_id >> 16) & 127);
|
||||
(read_cpuid_id() >> 16) & 127);
|
||||
} else {
|
||||
/* post-ARM7 */
|
||||
seq_printf(m, "CPU variant\t: 0x%x\n",
|
||||
(processor_id >> 20) & 15);
|
||||
(read_cpuid_id() >> 20) & 15);
|
||||
}
|
||||
seq_printf(m, "CPU part\t: 0x%03x\n",
|
||||
(processor_id >> 4) & 0xfff);
|
||||
}
|
||||
seq_printf(m, "CPU revision\t: %d\n", processor_id & 15);
|
||||
|
||||
{
|
||||
unsigned int cache_info = read_cpuid(CPUID_CACHETYPE);
|
||||
if (cache_info != processor_id) {
|
||||
seq_printf(m, "Cache type\t: %s\n"
|
||||
"Cache clean\t: %s\n"
|
||||
"Cache lockdown\t: %s\n"
|
||||
"Cache format\t: %s\n",
|
||||
cache_types[CACHE_TYPE(cache_info)],
|
||||
cache_clean[CACHE_TYPE(cache_info)],
|
||||
cache_lockdown[CACHE_TYPE(cache_info)],
|
||||
CACHE_S(cache_info) ? "Harvard" : "Unified");
|
||||
|
||||
if (CACHE_S(cache_info)) {
|
||||
c_show_cache(m, "I", CACHE_ISIZE(cache_info));
|
||||
c_show_cache(m, "D", CACHE_DSIZE(cache_info));
|
||||
} else {
|
||||
c_show_cache(m, "Cache", CACHE_ISIZE(cache_info));
|
||||
}
|
||||
}
|
||||
(read_cpuid_id() >> 4) & 0xfff);
|
||||
}
|
||||
seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
|
||||
|
||||
seq_puts(m, "\n");
|
||||
|
||||
|
@ -11,11 +11,11 @@
|
||||
#include <linux/signal.h>
|
||||
#include <linux/personality.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/elf.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/ucontext.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/unistd.h>
|
||||
|
||||
#include "ptrace.h"
|
||||
|
@ -27,8 +27,7 @@
|
||||
#include <linux/file.h>
|
||||
#include <linux/utsname.h>
|
||||
#include <linux/ipc.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
extern unsigned long do_mremap(unsigned long addr, unsigned long old_len,
|
||||
unsigned long new_len, unsigned long flags,
|
||||
|
@ -82,7 +82,7 @@
|
||||
#include <linux/socket.h>
|
||||
#include <linux/net.h>
|
||||
#include <linux/ipc.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
struct oldabi_stat64 {
|
||||
unsigned long long st_dev;
|
||||
|
@ -59,7 +59,7 @@ unsigned long profile_pc(struct pt_regs *regs)
|
||||
|
||||
if (in_lock_functions(pc)) {
|
||||
fp = regs->ARM_fp;
|
||||
pc = pc_pointer(((unsigned long *)fp)[-1]);
|
||||
pc = ((unsigned long *)fp)[-1];
|
||||
}
|
||||
|
||||
return pc;
|
||||
|
@ -19,15 +19,13 @@
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#include "ptrace.h"
|
||||
#include "signal.h"
|
||||
@ -69,7 +67,8 @@ void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long
|
||||
*/
|
||||
static int verify_stack(unsigned long sp)
|
||||
{
|
||||
if (sp < PAGE_OFFSET || (sp > (unsigned long)high_memory && high_memory != 0))
|
||||
if (sp < PAGE_OFFSET ||
|
||||
(sp > (unsigned long)high_memory && high_memory != NULL))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
@ -328,17 +327,6 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
|
||||
get_user(instr, (u32 __user *)pc);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KPROBES
|
||||
/*
|
||||
* It is possible to have recursive kprobes, so we can't call
|
||||
* the kprobe trap handler with the undef_lock held.
|
||||
*/
|
||||
if (instr == KPROBE_BREAKPOINT_INSTRUCTION && !user_mode(regs)) {
|
||||
kprobe_trap_handler(regs, instr);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (call_undef_hook(regs, instr) == 0)
|
||||
return;
|
||||
|
||||
|
@ -14,8 +14,8 @@
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/thread_notify.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
static inline void dsp_save_state(u32 *state)
|
||||
{
|
||||
|
@ -47,3 +47,5 @@ ENTRY(__aeabi_llsl)
|
||||
mov al, al, lsl r2
|
||||
mov pc, lr
|
||||
|
||||
ENDPROC(__ashldi3)
|
||||
ENDPROC(__aeabi_llsl)
|
||||
|
@ -47,3 +47,5 @@ ENTRY(__aeabi_lasr)
|
||||
mov ah, ah, asr r2
|
||||
mov pc, lr
|
||||
|
||||
ENDPROC(__ashrdi3)
|
||||
ENDPROC(__aeabi_lasr)
|
||||
|
@ -30,6 +30,8 @@ ENTRY(c_backtrace)
|
||||
|
||||
#if !defined(CONFIG_FRAME_POINTER) || !defined(CONFIG_PRINTK)
|
||||
mov pc, lr
|
||||
ENDPROC(__backtrace)
|
||||
ENDPROC(c_backtrace)
|
||||
#else
|
||||
stmfd sp!, {r4 - r8, lr} @ Save an extra register so we have a location...
|
||||
movs frame, r0 @ if frame pointer is zero
|
||||
@ -103,6 +105,8 @@ for_each_frame: tst frame, mask @ Check for address exceptions
|
||||
mov r1, frame
|
||||
bl printk
|
||||
no_frame: ldmfd sp!, {r4 - r8, pc}
|
||||
ENDPROC(__backtrace)
|
||||
ENDPROC(c_backtrace)
|
||||
|
||||
.section __ex_table,"a"
|
||||
.align 3
|
||||
|
@ -19,3 +19,5 @@ ENTRY(_change_bit_be)
|
||||
eor r0, r0, #0x18 @ big endian byte ordering
|
||||
ENTRY(_change_bit_le)
|
||||
bitop eor
|
||||
ENDPROC(_change_bit_be)
|
||||
ENDPROC(_change_bit_le)
|
||||
|
@ -41,9 +41,10 @@ USER( strplt r2, [r0], #4)
|
||||
USER( strnebt r2, [r0], #1)
|
||||
USER( strnebt r2, [r0], #1)
|
||||
tst r1, #1 @ x1 x0 x1 x0 x1 x0 x1
|
||||
USER( strnebt r2, [r0], #1)
|
||||
USER( strnebt r2, [r0])
|
||||
mov r0, #0
|
||||
ldmfd sp!, {r1, pc}
|
||||
ENDPROC(__clear_user)
|
||||
|
||||
.section .fixup,"ax"
|
||||
.align 0
|
||||
|
@ -20,3 +20,5 @@ ENTRY(_clear_bit_be)
|
||||
eor r0, r0, #0x18 @ big endian byte ordering
|
||||
ENTRY(_clear_bit_le)
|
||||
bitop bic
|
||||
ENDPROC(_clear_bit_be)
|
||||
ENDPROC(_clear_bit_le)
|
||||
|
@ -87,6 +87,8 @@ ENTRY(__copy_from_user)
|
||||
|
||||
#include "copy_template.S"
|
||||
|
||||
ENDPROC(__copy_from_user)
|
||||
|
||||
.section .fixup,"ax"
|
||||
.align 0
|
||||
copy_abort_preamble
|
||||
|
@ -44,3 +44,4 @@ ENTRY(copy_page)
|
||||
PLD( ldmeqia r1!, {r3, r4, ip, lr} )
|
||||
PLD( beq 2b )
|
||||
ldmfd sp!, {r4, pc} @ 3
|
||||
ENDPROC(copy_page)
|
||||
|
@ -90,6 +90,8 @@ ENTRY(__copy_to_user)
|
||||
|
||||
#include "copy_template.S"
|
||||
|
||||
ENDPROC(__copy_to_user)
|
||||
|
||||
.section .fixup,"ax"
|
||||
.align 0
|
||||
copy_abort_preamble
|
||||
|
@ -29,4 +29,5 @@ ENTRY(__csum_ipv6_magic)
|
||||
adcs r0, r0, r2
|
||||
adcs r0, r0, #0
|
||||
ldmfd sp!, {pc}
|
||||
ENDPROC(__csum_ipv6_magic)
|
||||
|
||||
|
@ -139,3 +139,4 @@ ENTRY(csum_partial)
|
||||
tst len, #0x1c
|
||||
bne 4b
|
||||
b .Lless4
|
||||
ENDPROC(csum_partial)
|
||||
|
@ -18,13 +18,11 @@
|
||||
*/
|
||||
|
||||
.macro save_regs
|
||||
mov ip, sp
|
||||
stmfd sp!, {r1, r4 - r8, fp, ip, lr, pc}
|
||||
sub fp, ip, #4
|
||||
stmfd sp!, {r1, r4 - r8, lr}
|
||||
.endm
|
||||
|
||||
.macro load_regs
|
||||
ldmfd sp, {r1, r4 - r8, fp, sp, pc}
|
||||
ldmfd sp!, {r1, r4 - r8, pc}
|
||||
.endm
|
||||
|
||||
.macro load1b, reg1
|
||||
@ -50,5 +48,6 @@
|
||||
.endm
|
||||
|
||||
#define FN_ENTRY ENTRY(csum_partial_copy_nocheck)
|
||||
#define FN_EXIT ENDPROC(csum_partial_copy_nocheck)
|
||||
|
||||
#include "csumpartialcopygeneric.S"
|
||||
|
@ -329,3 +329,4 @@ FN_ENTRY
|
||||
adcs sum, sum, r4, push #24
|
||||
mov r5, r4, get_byte_1
|
||||
b .Lexit
|
||||
FN_EXIT
|
||||
|
@ -18,13 +18,11 @@
|
||||
.text
|
||||
|
||||
.macro save_regs
|
||||
mov ip, sp
|
||||
stmfd sp!, {r1 - r2, r4 - r8, fp, ip, lr, pc}
|
||||
sub fp, ip, #4
|
||||
stmfd sp!, {r1, r2, r4 - r8, lr}
|
||||
.endm
|
||||
|
||||
.macro load_regs
|
||||
ldmfd sp, {r1, r2, r4-r8, fp, sp, pc}
|
||||
ldmfd sp!, {r1, r2, r4 - r8, pc}
|
||||
.endm
|
||||
|
||||
.macro load1b, reg1
|
||||
@ -82,6 +80,7 @@
|
||||
*/
|
||||
|
||||
#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
|
||||
#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
|
||||
|
||||
#include "csumpartialcopygeneric.S"
|
||||
|
||||
|
@ -60,3 +60,6 @@ ENTRY(__delay)
|
||||
#endif
|
||||
bhi __delay
|
||||
mov pc, lr
|
||||
ENDPROC(__udelay)
|
||||
ENDPROC(__const_udelay)
|
||||
ENDPROC(__delay)
|
||||
|
@ -198,3 +198,4 @@ ENTRY(__do_div64)
|
||||
mov xh, #0
|
||||
ldr pc, [sp], #8
|
||||
|
||||
ENDPROC(__do_div64)
|
||||
|
@ -33,6 +33,7 @@ ENTRY(_find_first_zero_bit_le)
|
||||
blo 1b
|
||||
3: mov r0, r1 @ no free bits
|
||||
mov pc, lr
|
||||
ENDPROC(_find_first_zero_bit_le)
|
||||
|
||||
/*
|
||||
* Purpose : Find next 'zero' bit
|
||||
@ -50,6 +51,7 @@ ENTRY(_find_next_zero_bit_le)
|
||||
orr r2, r2, #7 @ if zero, then no bits here
|
||||
add r2, r2, #1 @ align bit pointer
|
||||
b 2b @ loop for next bit
|
||||
ENDPROC(_find_next_zero_bit_le)
|
||||
|
||||
/*
|
||||
* Purpose : Find a 'one' bit
|
||||
@ -67,6 +69,7 @@ ENTRY(_find_first_bit_le)
|
||||
blo 1b
|
||||
3: mov r0, r1 @ no free bits
|
||||
mov pc, lr
|
||||
ENDPROC(_find_first_bit_le)
|
||||
|
||||
/*
|
||||
* Purpose : Find next 'one' bit
|
||||
@ -83,6 +86,7 @@ ENTRY(_find_next_bit_le)
|
||||
orr r2, r2, #7 @ if zero, then no bits here
|
||||
add r2, r2, #1 @ align bit pointer
|
||||
b 2b @ loop for next bit
|
||||
ENDPROC(_find_next_bit_le)
|
||||
|
||||
#ifdef __ARMEB__
|
||||
|
||||
@ -99,6 +103,7 @@ ENTRY(_find_first_zero_bit_be)
|
||||
blo 1b
|
||||
3: mov r0, r1 @ no free bits
|
||||
mov pc, lr
|
||||
ENDPROC(_find_first_zero_bit_be)
|
||||
|
||||
ENTRY(_find_next_zero_bit_be)
|
||||
teq r1, #0
|
||||
@ -113,6 +118,7 @@ ENTRY(_find_next_zero_bit_be)
|
||||
orr r2, r2, #7 @ if zero, then no bits here
|
||||
add r2, r2, #1 @ align bit pointer
|
||||
b 2b @ loop for next bit
|
||||
ENDPROC(_find_next_zero_bit_be)
|
||||
|
||||
ENTRY(_find_first_bit_be)
|
||||
teq r1, #0
|
||||
@ -127,6 +133,7 @@ ENTRY(_find_first_bit_be)
|
||||
blo 1b
|
||||
3: mov r0, r1 @ no free bits
|
||||
mov pc, lr
|
||||
ENDPROC(_find_first_bit_be)
|
||||
|
||||
ENTRY(_find_next_bit_be)
|
||||
teq r1, #0
|
||||
@ -140,6 +147,7 @@ ENTRY(_find_next_bit_be)
|
||||
orr r2, r2, #7 @ if zero, then no bits here
|
||||
add r2, r2, #1 @ align bit pointer
|
||||
b 2b @ loop for next bit
|
||||
ENDPROC(_find_next_bit_be)
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -26,16 +26,16 @@
|
||||
* Note that ADDR_LIMIT is either 0 or 0xc0000000.
|
||||
* Note also that it is intended that __get_user_bad is not global.
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/errno.h>
|
||||
|
||||
.global __get_user_1
|
||||
__get_user_1:
|
||||
ENTRY(__get_user_1)
|
||||
1: ldrbt r2, [r0]
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
ENDPROC(__get_user_1)
|
||||
|
||||
.global __get_user_2
|
||||
__get_user_2:
|
||||
ENTRY(__get_user_2)
|
||||
2: ldrbt r2, [r0], #1
|
||||
3: ldrbt r3, [r0]
|
||||
#ifndef __ARMEB__
|
||||
@ -45,17 +45,19 @@ __get_user_2:
|
||||
#endif
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
ENDPROC(__get_user_2)
|
||||
|
||||
.global __get_user_4
|
||||
__get_user_4:
|
||||
ENTRY(__get_user_4)
|
||||
4: ldrt r2, [r0]
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
ENDPROC(__get_user_4)
|
||||
|
||||
__get_user_bad:
|
||||
mov r2, #0
|
||||
mov r0, #-EFAULT
|
||||
mov pc, lr
|
||||
ENDPROC(__get_user_bad)
|
||||
|
||||
.section __ex_table, "a"
|
||||
.long 1b, __get_user_bad
|
||||
|
@ -120,3 +120,4 @@ ENTRY(__raw_readsb)
|
||||
strgtb r3, [r1]
|
||||
|
||||
ldmfd sp!, {r4 - r6, pc}
|
||||
ENDPROC(__raw_readsb)
|
||||
|
@ -76,3 +76,4 @@ ENTRY(__raw_readsl)
|
||||
8: mov r3, ip, get_byte_0
|
||||
strb r3, [r1, #0]
|
||||
mov pc, lr
|
||||
ENDPROC(__raw_readsl)
|
||||
|
@ -128,3 +128,4 @@ ENTRY(__raw_readsw)
|
||||
_BE_ONLY_( movne ip, ip, lsr #24 )
|
||||
strneb ip, [r1]
|
||||
ldmfd sp!, {r4, pc}
|
||||
ENDPROC(__raw_readsw)
|
||||
|
@ -91,3 +91,4 @@ ENTRY(__raw_writesb)
|
||||
strgtb r3, [r0]
|
||||
|
||||
ldmfd sp!, {r4, r5, pc}
|
||||
ENDPROC(__raw_writesb)
|
||||
|
@ -64,3 +64,4 @@ ENTRY(__raw_writesl)
|
||||
str ip, [r0]
|
||||
bne 6b
|
||||
mov pc, lr
|
||||
ENDPROC(__raw_writesl)
|
||||
|
@ -94,3 +94,4 @@ ENTRY(__raw_writesw)
|
||||
3: movne ip, r3, lsr #8
|
||||
strneh ip, [r0]
|
||||
mov pc, lr
|
||||
ENDPROC(__raw_writesw)
|
||||
|
@ -230,6 +230,8 @@ ENTRY(__aeabi_uidiv)
|
||||
mov r0, r0, lsr r2
|
||||
mov pc, lr
|
||||
|
||||
ENDPROC(__udivsi3)
|
||||
ENDPROC(__aeabi_uidiv)
|
||||
|
||||
ENTRY(__umodsi3)
|
||||
|
||||
@ -245,6 +247,7 @@ ENTRY(__umodsi3)
|
||||
|
||||
mov pc, lr
|
||||
|
||||
ENDPROC(__umodsi3)
|
||||
|
||||
ENTRY(__divsi3)
|
||||
ENTRY(__aeabi_idiv)
|
||||
@ -284,6 +287,8 @@ ENTRY(__aeabi_idiv)
|
||||
rsbmi r0, r0, #0
|
||||
mov pc, lr
|
||||
|
||||
ENDPROC(__divsi3)
|
||||
ENDPROC(__aeabi_idiv)
|
||||
|
||||
ENTRY(__modsi3)
|
||||
|
||||
@ -305,6 +310,8 @@ ENTRY(__modsi3)
|
||||
rsbmi r0, r0, #0
|
||||
mov pc, lr
|
||||
|
||||
ENDPROC(__modsi3)
|
||||
|
||||
#ifdef CONFIG_AEABI
|
||||
|
||||
ENTRY(__aeabi_uidivmod)
|
||||
@ -316,6 +323,8 @@ ENTRY(__aeabi_uidivmod)
|
||||
sub r1, r1, r3
|
||||
mov pc, lr
|
||||
|
||||
ENDPROC(__aeabi_uidivmod)
|
||||
|
||||
ENTRY(__aeabi_idivmod)
|
||||
|
||||
stmfd sp!, {r0, r1, ip, lr}
|
||||
@ -325,6 +334,8 @@ ENTRY(__aeabi_idivmod)
|
||||
sub r1, r1, r3
|
||||
mov pc, lr
|
||||
|
||||
ENDPROC(__aeabi_idivmod)
|
||||
|
||||
#endif
|
||||
|
||||
Ldiv0:
|
||||
|
@ -47,3 +47,5 @@ ENTRY(__aeabi_llsr)
|
||||
mov ah, ah, lsr r2
|
||||
mov pc, lr
|
||||
|
||||
ENDPROC(__lshrdi3)
|
||||
ENDPROC(__aeabi_llsr)
|
||||
|
@ -23,3 +23,4 @@ ENTRY(memchr)
|
||||
sub r0, r0, #1
|
||||
2: movne r0, #0
|
||||
mov pc, lr
|
||||
ENDPROC(memchr)
|
||||
|
@ -57,3 +57,4 @@ ENTRY(memcpy)
|
||||
|
||||
#include "copy_template.S"
|
||||
|
||||
ENDPROC(memcpy)
|
||||
|
@ -196,3 +196,4 @@ ENTRY(memmove)
|
||||
|
||||
18: backward_copy_shift push=24 pull=8
|
||||
|
||||
ENDPROC(memmove)
|
||||
|
@ -124,3 +124,4 @@ ENTRY(memset)
|
||||
tst r2, #1
|
||||
strneb r1, [r0], #1
|
||||
mov pc, lr
|
||||
ENDPROC(memset)
|
||||
|
@ -122,3 +122,4 @@ ENTRY(__memzero)
|
||||
tst r1, #1 @ 1 a byte left over
|
||||
strneb r2, [r0], #1 @ 1
|
||||
mov pc, lr @ 1
|
||||
ENDPROC(__memzero)
|
||||
|
@ -43,3 +43,5 @@ ENTRY(__aeabi_lmul)
|
||||
adc xh, xh, ip, lsr #16
|
||||
mov pc, lr
|
||||
|
||||
ENDPROC(__muldi3)
|
||||
ENDPROC(__aeabi_lmul)
|
||||
|
@ -26,16 +26,16 @@
|
||||
* Note that ADDR_LIMIT is either 0 or 0xc0000000
|
||||
* Note also that it is intended that __put_user_bad is not global.
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/errno.h>
|
||||
|
||||
.global __put_user_1
|
||||
__put_user_1:
|
||||
ENTRY(__put_user_1)
|
||||
1: strbt r2, [r0]
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
ENDPROC(__put_user_1)
|
||||
|
||||
.global __put_user_2
|
||||
__put_user_2:
|
||||
ENTRY(__put_user_2)
|
||||
mov ip, r2, lsr #8
|
||||
#ifndef __ARMEB__
|
||||
2: strbt r2, [r0], #1
|
||||
@ -46,23 +46,25 @@ __put_user_2:
|
||||
#endif
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
ENDPROC(__put_user_2)
|
||||
|
||||
.global __put_user_4
|
||||
__put_user_4:
|
||||
ENTRY(__put_user_4)
|
||||
4: strt r2, [r0]
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
ENDPROC(__put_user_4)
|
||||
|
||||
.global __put_user_8
|
||||
__put_user_8:
|
||||
ENTRY(__put_user_8)
|
||||
5: strt r2, [r0], #4
|
||||
6: strt r3, [r0]
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
ENDPROC(__put_user_8)
|
||||
|
||||
__put_user_bad:
|
||||
mov r0, #-EFAULT
|
||||
mov pc, lr
|
||||
ENDPROC(__put_user_bad)
|
||||
|
||||
.section __ex_table, "a"
|
||||
.long 1b, __put_user_bad
|
||||
|
@ -20,3 +20,5 @@ ENTRY(_set_bit_be)
|
||||
eor r0, r0, #0x18 @ big endian byte ordering
|
||||
ENTRY(_set_bit_le)
|
||||
bitop orr
|
||||
ENDPROC(_set_bit_be)
|
||||
ENDPROC(_set_bit_le)
|
||||
|
@ -185,6 +185,8 @@ ENTRY(sha_transform)
|
||||
|
||||
ldmfd sp!, {r4 - r8, pc}
|
||||
|
||||
ENDPROC(sha_transform)
|
||||
|
||||
.L_sha_K:
|
||||
.word 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6
|
||||
|
||||
@ -204,3 +206,4 @@ ENTRY(sha_init)
|
||||
stmia r0, {r1, r2, r3, ip, lr}
|
||||
ldr pc, [sp], #4
|
||||
|
||||
ENDPROC(sha_init)
|
||||
|
@ -24,3 +24,4 @@ ENTRY(strchr)
|
||||
movne r0, #0
|
||||
subeq r0, r0, #1
|
||||
mov pc, lr
|
||||
ENDPROC(strchr)
|
||||
|
@ -31,6 +31,7 @@ USER( ldrplbt r3, [r1], #1)
|
||||
sub r1, r1, #1 @ take NUL character out of count
|
||||
2: sub r0, r1, ip
|
||||
mov pc, lr
|
||||
ENDPROC(__strncpy_from_user)
|
||||
|
||||
.section .fixup,"ax"
|
||||
.align 0
|
||||
|
@ -31,6 +31,7 @@ USER( ldrbt r3, [r0], #1)
|
||||
add r0, r0, #1
|
||||
2: sub r0, r0, r2
|
||||
mov pc, lr
|
||||
ENDPROC(__strnlen_user)
|
||||
|
||||
.section .fixup,"ax"
|
||||
.align 0
|
||||
|
@ -23,3 +23,4 @@ ENTRY(strrchr)
|
||||
bne 1b
|
||||
mov r0, r3
|
||||
mov pc, lr
|
||||
ENDPROC(strrchr)
|
||||
|
@ -16,3 +16,5 @@ ENTRY(_test_and_change_bit_be)
|
||||
eor r0, r0, #0x18 @ big endian byte ordering
|
||||
ENTRY(_test_and_change_bit_le)
|
||||
testop eor, strb
|
||||
ENDPROC(_test_and_change_bit_be)
|
||||
ENDPROC(_test_and_change_bit_le)
|
||||
|
@ -16,3 +16,5 @@ ENTRY(_test_and_clear_bit_be)
|
||||
eor r0, r0, #0x18 @ big endian byte ordering
|
||||
ENTRY(_test_and_clear_bit_le)
|
||||
testop bicne, strneb
|
||||
ENDPROC(_test_and_clear_bit_be)
|
||||
ENDPROC(_test_and_clear_bit_le)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user