Merge branches 'arnd-fixes', 'clk', 'misc', 'v7' and 'fixes' into for-next

This commit is contained in:
Russell King 2015-06-12 21:18:08 +01:00
74 changed files with 1102 additions and 531 deletions

View File

@ -67,6 +67,11 @@ Optional properties:
disable if zero.
- arm,prefetch-offset : Override prefetch offset value. Valid values are
0-7, 15, 23, and 31.
- prefetch-data : Data prefetch. Value: <0> (forcibly disable), <1>
(forcibly enable), property absent (retain settings set by firmware)
- prefetch-instr : Instruction prefetch. Value: <0> (forcibly disable),
<1> (forcibly enable), property absent (retain settings set by
firmware)
Example:

View File

@ -856,6 +856,10 @@ address which can extend beyond that limit.
name may clash with standard defined ones, you prefix them with your
vendor name and a comma.
Additional properties for the root node:
- serial-number : a string representing the device's serial number
b) The /cpus node
This node is the parent of all individual CPU nodes. It doesn't

View File

@ -60,7 +60,7 @@ config ARM
select HAVE_KPROBES if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !CPU_V7M
select HAVE_KRETPROBES if (HAVE_KPROBES)
select HAVE_MEMBLOCK
select HAVE_MOD_ARCH_SPECIFIC if ARM_UNWIND
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
select HAVE_OPTPROBES if !THUMB2_KERNEL
select HAVE_PERF_EVENTS
@ -975,11 +975,6 @@ config PLAT_PXA
config PLAT_VERSATILE
bool
config ARM_TIMER_SP804
bool
select CLKSRC_MMIO
select CLKSRC_OF if OF
source "arch/arm/firmware/Kconfig"
source arch/arm/mm/Kconfig
@ -1682,6 +1677,21 @@ config HAVE_ARCH_TRANSPARENT_HUGEPAGE
config ARCH_WANT_GENERAL_HUGETLB
def_bool y
config ARM_MODULE_PLTS
bool "Use PLTs to allow module memory to spill over into vmalloc area"
depends on MODULES
help
Allocate PLTs when loading modules so that jumps and calls whose
targets are too far away for their relative offsets to be encoded
in the instructions themselves can be bounced via veneers in the
module's PLT. This allows modules to be allocated in the generic
vmalloc area after the dedicated module memory area has been
exhausted. The modules will use slightly more memory, but after
rounding up to page size, the actual memory footprint is usually
the same.
Say y if you are getting out of memory errors while loading modules
source "mm/Kconfig"
config FORCE_MAX_ZONEORDER

View File

@ -19,6 +19,10 @@ LDFLAGS_vmlinux += --be8
LDFLAGS_MODULE += --be8
endif
ifeq ($(CONFIG_ARM_MODULE_PLTS),y)
LDFLAGS_MODULE += -T $(srctree)/arch/arm/kernel/module.lds
endif
OBJCOPYFLAGS :=-O binary -R .comment -S
GZFLAGS :=-9
#KBUILD_CFLAGS +=-pipe

View File

@ -11,7 +11,6 @@ obj-$(CONFIG_SHARP_LOCOMO) += locomo.o
obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o
obj-$(CONFIG_SHARP_SCOOP) += scoop.o
obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
CFLAGS_REMOVE_mcpm_entry.o = -pg
AFLAGS_mcpm_head.o := -march=armv7-a

View File

@ -94,6 +94,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
break;
#endif
default:
/* Cause a link-time error, the xchg() size is not supported */
__bad_xchg(ptr, size), ret = 0;
break;
}
@ -102,8 +103,10 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
return ret;
}
#define xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
#define xchg(ptr, x) ({ \
(__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
sizeof(*(ptr))); \
})
#include <asm-generic/cmpxchg-local.h>
@ -118,14 +121,16 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
#define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
(unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg_local(ptr, o, n) ({ \
(__typeof(*ptr))__cmpxchg_local_generic((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))); \
})
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#ifndef CONFIG_SMP
#include <asm-generic/cmpxchg.h>
#endif
#else /* min ARCH >= ARMv6 */
@ -201,11 +206,12 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
return ret;
}
#define cmpxchg(ptr,o,n) \
((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))))
#define cmpxchg(ptr,o,n) ({ \
(__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))); \
})
static inline unsigned long __cmpxchg_local(volatile void *ptr,
unsigned long old,
@ -227,6 +233,13 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
return ret;
}
#define cmpxchg_local(ptr, o, n) ({ \
(__typeof(*ptr))__cmpxchg_local((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))); \
})
static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
unsigned long long old,
unsigned long long new)
@ -252,6 +265,14 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
return oldval;
}
#define cmpxchg64_relaxed(ptr, o, n) ({ \
(__typeof__(*(ptr)))__cmpxchg64((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)); \
})
#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
unsigned long long old,
unsigned long long new)
@ -265,23 +286,11 @@ static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
return ret;
}
#define cmpxchg_local(ptr,o,n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))))
#define cmpxchg64(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)))
#define cmpxchg64_relaxed(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg64((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)))
#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
#define cmpxchg64(ptr, o, n) ({ \
(__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)); \
})
#endif /* __LINUX_ARM_ARCH__ >= 6 */

View File

@ -25,7 +25,7 @@ struct dma_iommu_mapping {
};
struct dma_iommu_mapping *
arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size);
arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size);
void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);

View File

@ -23,6 +23,7 @@
#ifdef __KERNEL__
#include <linux/string.h>
#include <linux/types.h>
#include <linux/blk_types.h>
#include <asm/byteorder.h>
@ -73,17 +74,16 @@ void __raw_readsl(const volatile void __iomem *addr, void *data, int longlen);
static inline void __raw_writew(u16 val, volatile void __iomem *addr)
{
asm volatile("strh %1, %0"
: "+Q" (*(volatile u16 __force *)addr)
: "r" (val));
: : "Q" (*(volatile u16 __force *)addr), "r" (val));
}
#define __raw_readw __raw_readw
static inline u16 __raw_readw(const volatile void __iomem *addr)
{
u16 val;
asm volatile("ldrh %1, %0"
: "+Q" (*(volatile u16 __force *)addr),
"=r" (val));
asm volatile("ldrh %0, %1"
: "=r" (val)
: "Q" (*(volatile u16 __force *)addr));
return val;
}
#endif
@ -92,25 +92,23 @@ static inline u16 __raw_readw(const volatile void __iomem *addr)
static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
{
asm volatile("strb %1, %0"
: "+Qo" (*(volatile u8 __force *)addr)
: "r" (val));
: : "Qo" (*(volatile u8 __force *)addr), "r" (val));
}
#define __raw_writel __raw_writel
static inline void __raw_writel(u32 val, volatile void __iomem *addr)
{
asm volatile("str %1, %0"
: "+Qo" (*(volatile u32 __force *)addr)
: "r" (val));
: : "Qo" (*(volatile u32 __force *)addr), "r" (val));
}
#define __raw_readb __raw_readb
static inline u8 __raw_readb(const volatile void __iomem *addr)
{
u8 val;
asm volatile("ldrb %1, %0"
: "+Qo" (*(volatile u8 __force *)addr),
"=r" (val));
asm volatile("ldrb %0, %1"
: "=r" (val)
: "Qo" (*(volatile u8 __force *)addr));
return val;
}
@ -118,9 +116,9 @@ static inline u8 __raw_readb(const volatile void __iomem *addr)
static inline u32 __raw_readl(const volatile void __iomem *addr)
{
u32 val;
asm volatile("ldr %1, %0"
: "+Qo" (*(volatile u32 __force *)addr),
"=r" (val));
asm volatile("ldr %0, %1"
: "=r" (val)
: "Qo" (*(volatile u32 __force *)addr));
return val;
}
@ -319,9 +317,33 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
#define writesw(p,d,l) __raw_writesw(p,d,l)
#define writesl(p,d,l) __raw_writesl(p,d,l)
#ifndef __ARMBE__
static inline void memset_io(volatile void __iomem *dst, unsigned c,
size_t count)
{
memset((void __force *)dst, c, count);
}
#define memset_io(dst,c,count) memset_io(dst,c,count)
static inline void memcpy_fromio(void *to, const volatile void __iomem *from,
size_t count)
{
memcpy(to, (const void __force *)from, count);
}
#define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count)
static inline void memcpy_toio(volatile void __iomem *to, const void *from,
size_t count)
{
memcpy((void __force *)to, from, count);
}
#define memcpy_toio(to,from,count) memcpy_toio(to,from,count)
#else
#define memset_io(c,v,l) _memset_io(c,(v),(l))
#define memcpy_fromio(a,c,l) _memcpy_fromio((a),c,(l))
#define memcpy_toio(c,a,l) _memcpy_toio(c,(a),(l))
#endif
#endif /* readl */

View File

@ -51,7 +51,7 @@ struct machine_desc {
bool (*smp_init)(void);
void (*fixup)(struct tag *, char **);
void (*dt_fixup)(void);
void (*init_meminfo)(void);
long long (*pv_fixup)(void);
void (*reserve)(void);/* reserve mem blocks */
void (*map_io)(void);/* IO mapping function */
void (*init_early)(void);

View File

@ -18,8 +18,6 @@
#include <linux/types.h>
#include <linux/sizes.h>
#include <asm/cache.h>
#ifdef CONFIG_NEED_MACH_MEMORY_H
#include <mach/memory.h>
#endif
@ -132,20 +130,6 @@
#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
/*
* Minimum guaranted alignment in pgd_alloc(). The page table pointers passed
* around in head.S and proc-*.S are shifted by this amount, in order to
* leave spare high bits for systems with physical address extension. This
* does not fully accomodate the 40-bit addressing capability of ARM LPAE, but
* gives us about 38-bits or so.
*/
#ifdef CONFIG_ARM_LPAE
#define ARCH_PGD_SHIFT L1_CACHE_SHIFT
#else
#define ARCH_PGD_SHIFT 0
#endif
#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1)
/*
* PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
* memory. This is used for XIP and NoMMU kernels, and on platforms that don't

View File

@ -16,11 +16,21 @@ enum {
ARM_SEC_UNLIKELY,
ARM_SEC_MAX,
};
#endif
struct mod_arch_specific {
#ifdef CONFIG_ARM_UNWIND
struct unwind_table *unwind[ARM_SEC_MAX];
};
#endif
#ifdef CONFIG_ARM_MODULE_PLTS
struct elf32_shdr *core_plt;
struct elf32_shdr *init_plt;
int core_plt_count;
int init_plt_count;
#endif
};
u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val);
/*
* Add the ARM architecture version to the version magic string

View File

@ -125,13 +125,6 @@ extern void cpu_resume(void);
ttbr; \
})
#define cpu_set_ttbr(nr, val) \
do { \
u64 ttbr = val; \
__asm__("mcrr p15, " #nr ", %Q0, %R0, c2" \
: : "r" (ttbr)); \
} while (0)
#define cpu_get_pgd() \
({ \
u64 pg = cpu_get_ttbr(0); \

View File

@ -61,7 +61,7 @@ asmlinkage void secondary_start_kernel(void);
struct secondary_data {
union {
unsigned long mpu_rgn_szr;
unsigned long pgdir;
u64 pgdir;
};
unsigned long swapper_pg_dir;
void *stack;
@ -69,6 +69,7 @@ struct secondary_data {
extern struct secondary_data secondary_data;
extern volatile int pen_release;
extern void secondary_startup(void);
extern void secondary_startup_arm(void);
extern int __cpu_disable(void);

View File

@ -17,6 +17,7 @@
/* information about the system we're running on */
extern unsigned int system_rev;
extern const char *system_serial;
extern unsigned int system_serial_low;
extern unsigned int system_serial_high;
extern unsigned int mem_fclk_21285;

View File

@ -34,6 +34,7 @@ obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_ISA_DMA_API) += dma.o
obj-$(CONFIG_FIQ) += fiq.o fiqasm.o
obj-$(CONFIG_MODULES) += armksyms.o module.o
obj-$(CONFIG_ARM_MODULE_PLTS) += module-plts.o
obj-$(CONFIG_ISA_DMA) += dma-isa.o
obj-$(CONFIG_PCI) += bios32.o isa.o
obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o

View File

@ -33,7 +33,9 @@ ret_fast_syscall:
UNWIND(.fnstart )
UNWIND(.cantunwind )
disable_irq @ disable interrupts
ldr r1, [tsk, #TI_FLAGS]
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
tst r1, #_TIF_SYSCALL_WORK
bne __sys_trace_return
tst r1, #_TIF_WORK_MASK
bne fast_work_pending
asm_trace_hardirqs_on

View File

@ -117,9 +117,14 @@ ENTRY(__switch_to)
ENDPROC(__switch_to)
.data
.align 8
#if CONFIG_CPU_V7M_NUM_IRQ <= 112
.align 9
#else
.align 10
#endif
/*
* Vector table (64 words => 256 bytes natural alignment)
* Vector table (Natural alignment need to be ensured)
*/
ENTRY(vector_table)
.long 0 @ 0 - Reset stack pointer
@ -138,6 +143,6 @@ ENTRY(vector_table)
.long __invalid_entry @ 13 - Reserved
.long __pendsv_entry @ 14 - PendSV
.long __invalid_entry @ 15 - SysTick
.rept 64 - 16
.long __irq_entry @ 16..64 - External Interrupts
.rept CONFIG_CPU_V7M_NUM_IRQ
.long __irq_entry @ External Interrupts
.endr

View File

@ -123,7 +123,7 @@ ENTRY(secondary_startup)
ENDPROC(secondary_startup)
ENTRY(__secondary_switched)
ldr sp, [r7, #8] @ set up the stack pointer
ldr sp, [r7, #12] @ set up the stack pointer
mov fp, #0
b secondary_start_kernel
ENDPROC(__secondary_switched)

View File

@ -131,13 +131,30 @@ ENTRY(stext)
* The following calls CPU specific code in a position independent
* manner. See arch/arm/mm/proc-*.S for details. r10 = base of
* xxx_proc_info structure selected by __lookup_processor_type
* above. On return, the CPU will be ready for the MMU to be
* turned on, and r0 will hold the CPU control register value.
* above.
*
* The processor init function will be called with:
* r1 - machine type
* r2 - boot data (atags/dt) pointer
* r4 - translation table base (low word)
* r5 - translation table base (high word, if LPAE)
* r8 - translation table base 1 (pfn if LPAE)
* r9 - cpuid
* r13 - virtual address for __enable_mmu -> __turn_mmu_on
*
* On return, the CPU will be ready for the MMU to be turned on,
* r0 will hold the CPU control register value, r1, r2, r4, and
* r9 will be preserved. r5 will also be preserved if LPAE.
*/
ldr r13, =__mmap_switched @ address to jump to after
@ mmu has been enabled
adr lr, BSYM(1f) @ return (PIC) address
#ifdef CONFIG_ARM_LPAE
mov r5, #0 @ high TTBR0
mov r8, r4, lsr #12 @ TTBR1 is swapper_pg_dir pfn
#else
mov r8, r4 @ set TTBR1 to swapper_pg_dir
#endif
ldr r12, [r10, #PROCINFO_INITFUNC]
add r12, r12, r10
ret r12
@ -158,7 +175,7 @@ ENDPROC(stext)
*
* Returns:
* r0, r3, r5-r7 corrupted
* r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h)
* r4 = physical page table address
*/
__create_page_tables:
pgtbl r4, r8 @ page table address
@ -333,7 +350,6 @@ __create_page_tables:
#endif
#ifdef CONFIG_ARM_LPAE
sub r4, r4, #0x1000 @ point to the PGD table
mov r4, r4, lsr #ARCH_PGD_SHIFT
#endif
ret lr
ENDPROC(__create_page_tables)
@ -346,8 +362,8 @@ __turn_mmu_on_loc:
#if defined(CONFIG_SMP)
.text
ENTRY(secondary_startup_arm)
.arm
ENTRY(secondary_startup_arm)
THUMB( adr r9, BSYM(1f) ) @ Kernel is entered in ARM.
THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
THUMB( .thumb ) @ switch to Thumb now.
@ -381,9 +397,9 @@ ENTRY(secondary_startup)
adr r4, __secondary_data
ldmia r4, {r5, r7, r12} @ address to jump to after
sub lr, r4, r5 @ mmu has been enabled
ldr r4, [r7, lr] @ get secondary_data.pgdir
add r7, r7, #4
ldr r8, [r7, lr] @ get secondary_data.swapper_pg_dir
add r3, r7, lr
ldrd r4, [r3, #0] @ get secondary_data.pgdir
ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir
adr lr, BSYM(__enable_mmu) @ return address
mov r13, r12 @ __secondary_switched address
ldr r12, [r10, #PROCINFO_INITFUNC]
@ -397,7 +413,7 @@ ENDPROC(secondary_startup_arm)
* r6 = &secondary_data
*/
ENTRY(__secondary_switched)
ldr sp, [r7, #4] @ get secondary_data.stack
ldr sp, [r7, #12] @ get secondary_data.stack
mov fp, #0
b secondary_start_kernel
ENDPROC(__secondary_switched)
@ -416,12 +432,14 @@ __secondary_data:
/*
* Setup common bits before finally enabling the MMU. Essentially
* this is just loading the page table pointer and domain access
* registers.
* registers. All these registers need to be preserved by the
* processor setup function (or set in the case of r0)
*
* r0 = cp#15 control register
* r1 = machine ID
* r2 = atags or dtb pointer
* r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h)
* r4 = TTBR pointer (low word)
* r5 = TTBR pointer (high word if LPAE)
* r9 = processor ID
* r13 = *virtual* address to jump to upon completion
*/
@ -440,7 +458,9 @@ __enable_mmu:
#ifdef CONFIG_CPU_ICACHE_DISABLE
bic r0, r0, #CR_I
#endif
#ifndef CONFIG_ARM_LPAE
#ifdef CONFIG_ARM_LPAE
mcrr p15, 0, r4, r5, c2 @ load TTBR0
#else
mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \

View File

@ -0,0 +1,183 @@
/*
* Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/elf.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/cache.h>
#include <asm/opcodes.h>
#define PLT_ENT_STRIDE L1_CACHE_BYTES
#define PLT_ENT_COUNT (PLT_ENT_STRIDE / sizeof(u32))
#define PLT_ENT_SIZE (sizeof(struct plt_entries) / PLT_ENT_COUNT)
#ifdef CONFIG_THUMB2_KERNEL
#define PLT_ENT_LDR __opcode_to_mem_thumb32(0xf8dff000 | \
(PLT_ENT_STRIDE - 4))
#else
#define PLT_ENT_LDR __opcode_to_mem_arm(0xe59ff000 | \
(PLT_ENT_STRIDE - 8))
#endif
struct plt_entries {
u32 ldr[PLT_ENT_COUNT];
u32 lit[PLT_ENT_COUNT];
};
static bool in_init(const struct module *mod, u32 addr)
{
return addr - (u32)mod->module_init < mod->init_size;
}
u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
{
struct plt_entries *plt, *plt_end;
int c, *count;
if (in_init(mod, loc)) {
plt = (void *)mod->arch.init_plt->sh_addr;
plt_end = (void *)plt + mod->arch.init_plt->sh_size;
count = &mod->arch.init_plt_count;
} else {
plt = (void *)mod->arch.core_plt->sh_addr;
plt_end = (void *)plt + mod->arch.core_plt->sh_size;
count = &mod->arch.core_plt_count;
}
/* Look for an existing entry pointing to 'val' */
for (c = *count; plt < plt_end; c -= PLT_ENT_COUNT, plt++) {
int i;
if (!c) {
/* Populate a new set of entries */
*plt = (struct plt_entries){
{ [0 ... PLT_ENT_COUNT - 1] = PLT_ENT_LDR, },
{ val, }
};
++*count;
return (u32)plt->ldr;
}
for (i = 0; i < PLT_ENT_COUNT; i++) {
if (!plt->lit[i]) {
plt->lit[i] = val;
++*count;
}
if (plt->lit[i] == val)
return (u32)&plt->ldr[i];
}
}
BUG();
}
static int duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num,
u32 mask)
{
u32 *loc1, *loc2;
int i;
for (i = 0; i < num; i++) {
if (rel[i].r_info != rel[num].r_info)
continue;
/*
* Identical relocation types against identical symbols can
* still result in different PLT entries if the addend in the
* place is different. So resolve the target of the relocation
* to compare the values.
*/
loc1 = (u32 *)(base + rel[i].r_offset);
loc2 = (u32 *)(base + rel[num].r_offset);
if (((*loc1 ^ *loc2) & mask) == 0)
return 1;
}
return 0;
}
/* Count how many PLT entries we may need */
static unsigned int count_plts(Elf32_Addr base, const Elf32_Rel *rel, int num)
{
unsigned int ret = 0;
int i;
/*
* Sure, this is order(n^2), but it's usually short, and not
* time critical
*/
for (i = 0; i < num; i++)
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_ARM_CALL:
case R_ARM_PC24:
case R_ARM_JUMP24:
if (!duplicate_rel(base, rel, i,
__opcode_to_mem_arm(0x00ffffff)))
ret++;
break;
#ifdef CONFIG_THUMB2_KERNEL
case R_ARM_THM_CALL:
case R_ARM_THM_JUMP24:
if (!duplicate_rel(base, rel, i,
__opcode_to_mem_thumb32(0x07ff2fff)))
ret++;
#endif
}
return ret;
}
int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *mod)
{
unsigned long core_plts = 0, init_plts = 0;
Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
/*
* To store the PLTs, we expand the .text section for core module code
* and the .init.text section for initialization code.
*/
for (s = sechdrs; s < sechdrs_end; ++s)
if (strcmp(".core.plt", secstrings + s->sh_name) == 0)
mod->arch.core_plt = s;
else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
mod->arch.init_plt = s;
if (!mod->arch.core_plt || !mod->arch.init_plt) {
pr_err("%s: sections missing\n", mod->name);
return -ENOEXEC;
}
for (s = sechdrs + 1; s < sechdrs_end; ++s) {
const Elf32_Rel *rels = (void *)ehdr + s->sh_offset;
int numrels = s->sh_size / sizeof(Elf32_Rel);
Elf32_Shdr *dstsec = sechdrs + s->sh_info;
if (s->sh_type != SHT_REL)
continue;
if (strstr(secstrings + s->sh_name, ".init"))
init_plts += count_plts(dstsec->sh_addr, rels, numrels);
else
core_plts += count_plts(dstsec->sh_addr, rels, numrels);
}
mod->arch.core_plt->sh_type = SHT_NOBITS;
mod->arch.core_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
mod->arch.core_plt->sh_addralign = L1_CACHE_BYTES;
mod->arch.core_plt->sh_size = round_up(core_plts * PLT_ENT_SIZE,
sizeof(struct plt_entries));
mod->arch.core_plt_count = 0;
mod->arch.init_plt->sh_type = SHT_NOBITS;
mod->arch.init_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
mod->arch.init_plt->sh_addralign = L1_CACHE_BYTES;
mod->arch.init_plt->sh_size = round_up(init_plts * PLT_ENT_SIZE,
sizeof(struct plt_entries));
mod->arch.init_plt_count = 0;
pr_debug("%s: core.plt=%x, init.plt=%x\n", __func__,
mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size);
return 0;
}

View File

@ -40,7 +40,12 @@
#ifdef CONFIG_MMU
void *module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
void *p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0));
if (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || p)
return p;
return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
@ -110,6 +115,20 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
offset -= 0x04000000;
offset += sym->st_value - loc;
/*
* Route through a PLT entry if 'offset' exceeds the
* supported range. Note that 'offset + loc + 8'
* contains the absolute jump target, i.e.,
* @sym + addend, corrected for the +8 PC bias.
*/
if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS) &&
(offset <= (s32)0xfe000000 ||
offset >= (s32)0x02000000))
offset = get_module_plt(module, loc,
offset + loc + 8)
- loc - 8;
if (offset <= (s32)0xfe000000 ||
offset >= (s32)0x02000000) {
pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
@ -203,6 +222,17 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
offset -= 0x02000000;
offset += sym->st_value - loc;
/*
* Route through a PLT entry if 'offset' exceeds the
* supported range.
*/
if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS) &&
(offset <= (s32)0xff000000 ||
offset >= (s32)0x01000000))
offset = get_module_plt(module, loc,
offset + loc + 4)
- loc - 4;
if (offset <= (s32)0xff000000 ||
offset >= (s32)0x01000000) {
pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",

View File

@ -0,0 +1,4 @@
SECTIONS {
.core.plt : { BYTE(0) }
.init.plt : { BYTE(0) }
}

View File

@ -303,9 +303,15 @@ static int probe_current_pmu(struct arm_pmu *pmu)
static int of_pmu_irq_cfg(struct platform_device *pdev)
{
int i;
int *irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
int i, irq;
int *irqs;
/* Don't bother with PPIs; they're already affine */
irq = platform_get_irq(pdev, 0);
if (irq >= 0 && irq_is_percpu(irq))
return 0;
irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
if (!irqs)
return -ENOMEM;
@ -317,7 +323,7 @@ static int of_pmu_irq_cfg(struct platform_device *pdev)
i);
if (!dn) {
pr_warn("Failed to parse %s/interrupt-affinity[%d]\n",
of_node_full_name(dn), i);
of_node_full_name(pdev->dev.of_node), i);
break;
}

View File

@ -75,8 +75,7 @@ __setup("fpe=", fpe_setup);
extern void init_default_cache_policy(unsigned long);
extern void paging_init(const struct machine_desc *desc);
extern void early_paging_init(const struct machine_desc *,
struct proc_info_list *);
extern void early_paging_init(const struct machine_desc *);
extern void sanity_check_meminfo(void);
extern enum reboot_mode reboot_mode;
extern void setup_dma_zone(const struct machine_desc *desc);
@ -93,6 +92,9 @@ unsigned int __atags_pointer __initdata;
unsigned int system_rev;
EXPORT_SYMBOL(system_rev);
const char *system_serial;
EXPORT_SYMBOL(system_serial);
unsigned int system_serial_low;
EXPORT_SYMBOL(system_serial_low);
@ -839,8 +841,25 @@ arch_initcall(customize_machine);
static int __init init_machine_late(void)
{
struct device_node *root;
int ret;
if (machine_desc->init_late)
machine_desc->init_late();
root = of_find_node_by_path("/");
if (root) {
ret = of_property_read_string(root, "serial-number",
&system_serial);
if (ret)
system_serial = NULL;
}
if (!system_serial)
system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
system_serial_high,
system_serial_low);
return 0;
}
late_initcall(init_machine_late);
@ -936,7 +955,9 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
#ifdef CONFIG_MMU
early_paging_init(mdesc);
#endif
setup_dma_zone(mdesc);
sanity_check_meminfo();
arm_memblock_init(mdesc);
@ -1109,8 +1130,7 @@ static int c_show(struct seq_file *m, void *v)
seq_printf(m, "Hardware\t: %s\n", machine_name);
seq_printf(m, "Revision\t: %04x\n", system_rev);
seq_printf(m, "Serial\t\t: %08x%08x\n",
system_serial_high, system_serial_low);
seq_printf(m, "Serial\t\t: %s\n", system_serial);
return 0;
}

View File

@ -86,9 +86,11 @@ void __init smp_set_ops(struct smp_operations *ops)
static unsigned long get_arch_pgd(pgd_t *pgd)
{
phys_addr_t pgdir = virt_to_idmap(pgd);
BUG_ON(pgdir & ARCH_PGD_MASK);
return pgdir >> ARCH_PGD_SHIFT;
#ifdef CONFIG_ARM_LPAE
return __phys_to_pfn(virt_to_phys(pgd));
#else
return virt_to_phys(pgd);
#endif
}
int __cpu_up(unsigned int cpu, struct task_struct *idle)
@ -108,7 +110,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
#endif
#ifdef CONFIG_MMU
secondary_data.pgdir = get_arch_pgd(idmap_pgd);
secondary_data.pgdir = virt_to_phys(idmap_pgd);
secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
#endif
sync_cache_w(&secondary_data);

View File

@ -17,6 +17,9 @@
#include <asm/mach/map.h>
#include <asm/memory.h>
#include <asm/system_info.h>
#include <asm/traps.h>
#define TCMTR_FORMAT_MASK 0xe0000000U
static struct gen_pool *tcm_pool;
static bool dtcm_present;
@ -175,6 +178,77 @@ static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks,
return 0;
}
/*
* When we are running in the non-secure world and the secure world
* has not explicitly given us access to the TCM we will get an
* undefined error when reading the TCM region register in the
* setup_tcm_bank function (above).
*
* There are two variants of this register read that we need to trap,
* the read for the data TCM and the read for the instruction TCM:
* c0370628: ee196f11 mrc 15, 0, r6, cr9, cr1, {0}
* c0370674: ee196f31 mrc 15, 0, r6, cr9, cr1, {1}
*
* Our undef hook mask explicitly matches all fields of the encoded
* instruction other than the destination register. The mask also
* only allows operand 2 to have the values 0 or 1.
*
* The undefined hook is defined as __init and __initdata, and therefore
* must be removed before tcm_init returns.
*
* In this particular case (MRC with ARM condition code ALways) the
* Thumb-2 and ARM instruction encoding are identical, so this hook
* will work on a Thumb-2 kernel.
*
* See A8.8.107, DDI0406C_C ARM Architecture Reference Manual, Encoding
* T1/A1 for the bit-by-bit details.
*
* mrc p15, 0, XX, c9, c1, 0
* mrc p15, 0, XX, c9, c1, 1
* | | | | | | | +---- opc2 0|1 = 000|001
* | | | | | | +------- CRm 0 = 0001
* | | | | | +----------- CRn 0 = 1001
* | | | | +--------------- Rt ? = ????
* | | | +------------------- opc1 0 = 000
* | | +----------------------- coproc 15 = 1111
* | +-------------------------- condition ALways = 1110
* +----------------------------- instruction MRC = 1110
*
* Encoding this as per A8.8.107 of DDI0406C, Encoding T1/A1, yields:
* 1111 1111 1111 1111 0000 1111 1101 1111 Required Mask
* 1110 1110 0001 1001 ???? 1111 0001 0001 mrc p15, 0, XX, c9, c1, 0
* 1110 1110 0001 1001 ???? 1111 0011 0001 mrc p15, 0, XX, c9, c1, 1
* [ ] [ ] [ ]| [ ] [ ] [ ] [ ]| +--- CRm
* | | | | | | | | +----- SBO
* | | | | | | | +------- opc2
* | | | | | | +----------- coproc
* | | | | | +---------------- Rt
* | | | | +--------------------- CRn
* | | | +------------------------- SBO
* | | +--------------------------- opc1
* | +------------------------------- instruction
* +------------------------------------ condition
*/
#define TCM_REGION_READ_MASK 0xffff0fdf
#define TCM_REGION_READ_INSTR 0xee190f11
#define DEST_REG_SHIFT 12
#define DEST_REG_MASK 0xf
static int __init tcm_handler(struct pt_regs *regs, unsigned int instr)
{
regs->uregs[(instr >> DEST_REG_SHIFT) & DEST_REG_MASK] = 0;
regs->ARM_pc += 4;
return 0;
}
static struct undef_hook tcm_hook __initdata = {
.instr_mask = TCM_REGION_READ_MASK,
.instr_val = TCM_REGION_READ_INSTR,
.cpsr_mask = MODE_MASK,
.cpsr_val = SVC_MODE,
.fn = tcm_handler
};
/*
* This initializes the TCM memory
*/
@ -204,9 +278,18 @@ void __init tcm_init(void)
}
tcm_status = read_cpuid_tcmstatus();
/*
* This code only supports v6-compatible TCMTR implementations.
*/
if (tcm_status & TCMTR_FORMAT_MASK)
return;
dtcm_banks = (tcm_status >> 16) & 0x03;
itcm_banks = (tcm_status & 0x03);
register_undef_hook(&tcm_hook);
/* Values greater than 2 for D/ITCM banks are "reserved" */
if (dtcm_banks > 2)
dtcm_banks = 0;
@ -218,7 +301,7 @@ void __init tcm_init(void)
for (i = 0; i < dtcm_banks; i++) {
ret = setup_tcm_bank(0, i, dtcm_banks, &dtcm_end);
if (ret)
return;
goto unregister;
}
/* This means you compiled more code than fits into DTCM */
if (dtcm_code_sz > (dtcm_end - DTCM_OFFSET)) {
@ -227,6 +310,12 @@ void __init tcm_init(void)
dtcm_code_sz, (dtcm_end - DTCM_OFFSET));
goto no_dtcm;
}
/*
* This means that the DTCM sizes were 0 or the DTCM banks
* were inaccessible due to TrustZone configuration.
*/
if (!(dtcm_end - DTCM_OFFSET))
goto no_dtcm;
dtcm_res.end = dtcm_end - 1;
request_resource(&iomem_resource, &dtcm_res);
dtcm_iomap[0].length = dtcm_end - DTCM_OFFSET;
@ -250,15 +339,21 @@ no_dtcm:
for (i = 0; i < itcm_banks; i++) {
ret = setup_tcm_bank(1, i, itcm_banks, &itcm_end);
if (ret)
return;
goto unregister;
}
/* This means you compiled more code than fits into ITCM */
if (itcm_code_sz > (itcm_end - ITCM_OFFSET)) {
pr_info("CPU ITCM: %u bytes of code compiled to "
"ITCM but only %lu bytes of ITCM present\n",
itcm_code_sz, (itcm_end - ITCM_OFFSET));
return;
goto unregister;
}
/*
* This means that the ITCM sizes were 0 or the ITCM banks
* were inaccessible due to TrustZone configuration.
*/
if (!(itcm_end - ITCM_OFFSET))
goto unregister;
itcm_res.end = itcm_end - 1;
request_resource(&iomem_resource, &itcm_res);
itcm_iomap[0].length = itcm_end - ITCM_OFFSET;
@ -275,6 +370,9 @@ no_dtcm:
pr_info("CPU ITCM: %u bytes of code compiled to ITCM but no "
"ITCM banks present in CPU\n", itcm_code_sz);
}
unregister:
unregister_undef_hook(&tcm_hook);
}
/*

View File

@ -749,14 +749,6 @@ late_initcall(arm_mrc_hook_init);
#endif
void __bad_xchg(volatile void *ptr, int size)
{
pr_err("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n",
__builtin_return_address(0), ptr, size);
BUG();
}
EXPORT_SYMBOL(__bad_xchg);
/*
* A data abort trap was taken, but we did not handle the instruction.
* Try to abort the user program, or panic if it was the kernel.

View File

@ -11,6 +11,7 @@
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <linux/clkdev.h>
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/clk.h>

View File

@ -37,7 +37,6 @@
#include <linux/stat.h>
#include <linux/termios.h>
#include <asm/hardware/arm_timer.h>
#include <asm/setup.h>
#include <asm/param.h> /* HZ */
#include <asm/mach-types.h>

View File

@ -27,7 +27,6 @@
#include "keystone.h"
static struct notifier_block platform_nb;
static unsigned long keystone_dma_pfn_offset __read_mostly;
static int keystone_platform_notifier(struct notifier_block *nb,
@ -49,11 +48,18 @@ static int keystone_platform_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
static struct notifier_block platform_nb = {
.notifier_call = keystone_platform_notifier,
};
static void __init keystone_init(void)
{
keystone_pm_runtime_init();
if (platform_nb.notifier_call)
if (PHYS_OFFSET >= KEYSTONE_HIGH_PHYS_START) {
keystone_dma_pfn_offset = PFN_DOWN(KEYSTONE_HIGH_PHYS_START -
KEYSTONE_LOW_PHYS_START);
bus_register_notifier(&platform_bus_type, &platform_nb);
}
keystone_pm_runtime_init();
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
@ -62,11 +68,9 @@ static phys_addr_t keystone_virt_to_idmap(unsigned long x)
return (phys_addr_t)(x) - CONFIG_PAGE_OFFSET + KEYSTONE_LOW_PHYS_START;
}
static void __init keystone_init_meminfo(void)
static long long __init keystone_pv_fixup(void)
{
bool lpae = IS_ENABLED(CONFIG_ARM_LPAE);
bool pvpatch = IS_ENABLED(CONFIG_ARM_PATCH_PHYS_VIRT);
phys_addr_t offset = PHYS_OFFSET - KEYSTONE_LOW_PHYS_START;
long long offset;
phys_addr_t mem_start, mem_end;
mem_start = memblock_start_of_DRAM();
@ -75,32 +79,21 @@ static void __init keystone_init_meminfo(void)
/* nothing to do if we are running out of the <32-bit space */
if (mem_start >= KEYSTONE_LOW_PHYS_START &&
mem_end <= KEYSTONE_LOW_PHYS_END)
return;
if (!lpae || !pvpatch) {
pr_crit("Enable %s%s%s to run outside 32-bit space\n",
!lpae ? __stringify(CONFIG_ARM_LPAE) : "",
(!lpae && !pvpatch) ? " and " : "",
!pvpatch ? __stringify(CONFIG_ARM_PATCH_PHYS_VIRT) : "");
}
return 0;
if (mem_start < KEYSTONE_HIGH_PHYS_START ||
mem_end > KEYSTONE_HIGH_PHYS_END) {
pr_crit("Invalid address space for memory (%08llx-%08llx)\n",
(u64)mem_start, (u64)mem_end);
(u64)mem_start, (u64)mem_end);
return 0;
}
offset += KEYSTONE_HIGH_PHYS_START;
__pv_phys_pfn_offset = PFN_DOWN(offset);
__pv_offset = (offset - PAGE_OFFSET);
offset = KEYSTONE_HIGH_PHYS_START - KEYSTONE_LOW_PHYS_START;
/* Populate the arch idmap hook */
arch_virt_to_idmap = keystone_virt_to_idmap;
platform_nb.notifier_call = keystone_platform_notifier;
keystone_dma_pfn_offset = PFN_DOWN(KEYSTONE_HIGH_PHYS_START -
KEYSTONE_LOW_PHYS_START);
pr_info("Switching to high address space at 0x%llx\n", (u64)offset);
return offset;
}
static const char *const keystone_match[] __initconst = {
@ -115,5 +108,5 @@ DT_MACHINE_START(KEYSTONE, "Keystone")
.smp = smp_ops(keystone_smp_ops),
.init_machine = keystone_init,
.dt_compat = keystone_match,
.init_meminfo = keystone_init_meminfo,
.pv_fixup = keystone_pv_fixup,
MACHINE_END

View File

@ -39,19 +39,6 @@ static int keystone_smp_boot_secondary(unsigned int cpu,
return error;
}
#ifdef CONFIG_ARM_LPAE
static void __cpuinit keystone_smp_secondary_initmem(unsigned int cpu)
{
pgd_t *pgd0 = pgd_offset_k(0);
cpu_set_ttbr(1, __pa(pgd0) + TTBR1_OFFSET);
local_flush_tlb_all();
}
#else
static inline void __cpuinit keystone_smp_secondary_initmem(unsigned int cpu)
{}
#endif
struct smp_operations keystone_smp_ops __initdata = {
.smp_boot_secondary = keystone_smp_boot_secondary,
.smp_secondary_init = keystone_smp_secondary_initmem,
};

View File

@ -1238,10 +1238,7 @@ static struct clk_lookup lookups[] = {
static int __init clk_init(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(lookups); i++)
clkdev_add(&lookups[i]);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
/*
* Setup muxed SYSCLK for HCLK PLL base -this selects the

View File

@ -22,8 +22,6 @@
#include <asm/mach-types.h>
#include <asm/mach/map.h>
#include <asm/hardware/timer-sp.h>
#include "mmio.h"
#include "clcd.h"

View File

@ -7,6 +7,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clkdev.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/kernel.h>
@ -14,7 +15,6 @@
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/clk.h>
#include <linux/omapfb.h>
#include <linux/spi/spi.h>

View File

@ -232,14 +232,12 @@ void omap2xxx_clkt_vps_init(void)
struct clk_hw_omap *hw = NULL;
struct clk *clk;
const char *parent_name = "mpu_ck";
struct clk_lookup *lookup = NULL;
omap2xxx_clkt_vps_late_init();
omap2xxx_clkt_vps_check_bootloader_rates();
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
lookup = kzalloc(sizeof(*lookup), GFP_KERNEL);
if (!hw || !lookup)
if (!hw)
goto cleanup;
init.name = "virt_prcm_set";
init.ops = &virt_prcm_set_ops;
@ -249,15 +247,9 @@ void omap2xxx_clkt_vps_init(void)
hw->hw.init = &init;
clk = clk_register(NULL, &hw->hw);
lookup->dev_id = NULL;
lookup->con_id = "cpufreq_ck";
lookup->clk = clk;
clkdev_add(lookup);
clkdev_create(clk, "cpufreq_ck", NULL);
return;
cleanup:
kfree(hw);
kfree(lookup);
}
#endif

View File

@ -47,7 +47,7 @@ static void _add_clkdev(struct omap_device *od, const char *clk_alias,
const char *clk_name)
{
struct clk *r;
struct clk_lookup *l;
int rc;
if (!clk_alias || !clk_name)
return;
@ -62,21 +62,15 @@ static void _add_clkdev(struct omap_device *od, const char *clk_alias,
return;
}
r = clk_get(NULL, clk_name);
if (IS_ERR(r)) {
dev_err(&od->pdev->dev,
"clk_get for %s failed\n", clk_name);
return;
rc = clk_add_alias(clk_alias, dev_name(&od->pdev->dev), clk_name, NULL);
if (rc) {
if (rc == -ENODEV || rc == -ENOMEM)
dev_err(&od->pdev->dev,
"clkdev_alloc for %s failed\n", clk_alias);
else
dev_err(&od->pdev->dev,
"clk_get for %s failed\n", clk_name);
}
l = clkdev_alloc(r, clk_alias, dev_name(&od->pdev->dev));
if (!l) {
dev_err(&od->pdev->dev,
"clkdev_alloc for %s failed\n", clk_alias);
return;
}
clkdev_add(l);
}
/**

View File

@ -10,6 +10,7 @@
*
*/
#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/gpio.h>

View File

@ -11,6 +11,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clkdev.h>
#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/kernel.h>

View File

@ -12,6 +12,7 @@
*
*/
#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>

View File

@ -35,20 +35,19 @@
#include <linux/mtd/physmap.h>
#include <linux/memblock.h>
#include <clocksource/timer-sp804.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
#include <asm/hardware/arm_timer.h>
#include <asm/hardware/icst.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
#include <asm/mach/map.h>
#include <mach/platform.h>
#include <mach/irqs.h>
#include <asm/hardware/timer-sp.h>
#include <plat/sched_clock.h>
@ -381,10 +380,10 @@ void __init realview_timer_init(unsigned int timer_irq)
/*
* Initialise to a known state (all timers off)
*/
writel(0, timer0_va_base + TIMER_CTRL);
writel(0, timer1_va_base + TIMER_CTRL);
writel(0, timer2_va_base + TIMER_CTRL);
writel(0, timer3_va_base + TIMER_CTRL);
sp804_timer_disable(timer0_va_base);
sp804_timer_disable(timer1_va_base);
sp804_timer_disable(timer2_va_base);
sp804_timer_disable(timer3_va_base);
sp804_clocksource_init(timer3_va_base, "timer3");
sp804_clockevents_init(timer0_va_base, timer_irq, "timer0");

View File

@ -73,7 +73,7 @@ static int sa1100_normal_irqdomain_map(struct irq_domain *d,
return 0;
}
static struct irq_domain_ops sa1100_normal_irqdomain_ops = {
static const struct irq_domain_ops sa1100_normal_irqdomain_ops = {
.map = sa1100_normal_irqdomain_map,
.xlate = irq_domain_xlate_onetwocell,
};

View File

@ -41,8 +41,9 @@
#include <linux/bitops.h>
#include <linux/reboot.h>
#include <clocksource/timer-sp804.h>
#include <asm/irq.h>
#include <asm/hardware/arm_timer.h>
#include <asm/hardware/icst.h>
#include <asm/mach-types.h>
@ -52,7 +53,6 @@
#include <asm/mach/map.h>
#include <mach/hardware.h>
#include <mach/platform.h>
#include <asm/hardware/timer-sp.h>
#include <plat/sched_clock.h>
@ -798,10 +798,10 @@ void __init versatile_timer_init(void)
/*
* Initialise to a known state (all timers off)
*/
writel(0, TIMER0_VA_BASE + TIMER_CTRL);
writel(0, TIMER1_VA_BASE + TIMER_CTRL);
writel(0, TIMER2_VA_BASE + TIMER_CTRL);
writel(0, TIMER3_VA_BASE + TIMER_CTRL);
sp804_timer_disable(TIMER0_VA_BASE);
sp804_timer_disable(TIMER1_VA_BASE);
sp804_timer_disable(TIMER2_VA_BASE);
sp804_timer_disable(TIMER3_VA_BASE);
sp804_clocksource_init(TIMER3_VA_BASE, "timer3");
sp804_clockevents_init(TIMER0_VA_BASE, IRQ_TIMERINT0_1, "timer0");

View File

@ -604,6 +604,22 @@ config CPU_USE_DOMAINS
This option enables or disables the use of domain switching
via the set_fs() function.
config CPU_V7M_NUM_IRQ
int "Number of external interrupts connected to the NVIC"
depends on CPU_V7M
default 90 if ARCH_STM32
default 38 if ARCH_EFM32
default 112 if SOC_VF610
default 240
help
This option indicates the number of interrupts connected to the NVIC.
The value can be larger than the real number of interrupts supported
by the system, but must not be lower.
The default value is 240, corresponding to the maximum number of
interrupts supported by the NVIC on Cortex-M family.
If unsure, keep default value.
#
# CPU supports 36-bit I/O
#
@ -624,6 +640,10 @@ config ARM_LPAE
If unsure, say N.
config ARM_PV_FIXUP
def_bool y
depends on ARM_LPAE && ARM_PATCH_PHYS_VIRT && ARCH_KEYSTONE
config ARCH_PHYS_ADDR_T_64BIT
def_bool ARM_LPAE

View File

@ -18,6 +18,7 @@ obj-$(CONFIG_MODULES) += proc-syms.o
obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
obj-$(CONFIG_HIGHMEM) += highmem.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_ARM_PV_FIXUP) += pv-fixup-asm.o
obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o
obj-$(CONFIG_CPU_ABRT_EV4) += abort-ev4.o

View File

@ -38,10 +38,11 @@ struct l2c_init_data {
unsigned way_size_0;
unsigned num_lock;
void (*of_parse)(const struct device_node *, u32 *, u32 *);
void (*enable)(void __iomem *, u32, unsigned);
void (*enable)(void __iomem *, unsigned);
void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
void (*save)(void __iomem *);
void (*configure)(void __iomem *);
void (*unlock)(void __iomem *, unsigned);
struct outer_cache_fns outer_cache;
};
@ -110,14 +111,6 @@ static inline void l2c_unlock(void __iomem *base, unsigned num)
static void l2c_configure(void __iomem *base)
{
if (outer_cache.configure) {
outer_cache.configure(&l2x0_saved_regs);
return;
}
if (l2x0_data->configure)
l2x0_data->configure(base);
l2c_write_sec(l2x0_saved_regs.aux_ctrl, base, L2X0_AUX_CTRL);
}
@ -125,18 +118,16 @@ static void l2c_configure(void __iomem *base)
* Enable the L2 cache controller. This function must only be
* called when the cache controller is known to be disabled.
*/
static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock)
static void l2c_enable(void __iomem *base, unsigned num_lock)
{
unsigned long flags;
/* Do not touch the controller if already enabled. */
if (readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)
return;
if (outer_cache.configure)
outer_cache.configure(&l2x0_saved_regs);
else
l2x0_data->configure(base);
l2x0_saved_regs.aux_ctrl = aux;
l2c_configure(base);
l2c_unlock(base, num_lock);
l2x0_data->unlock(base, num_lock);
local_irq_save(flags);
__l2c_op_way(base + L2X0_INV_WAY);
@ -163,7 +154,11 @@ static void l2c_save(void __iomem *base)
static void l2c_resume(void)
{
l2c_enable(l2x0_base, l2x0_saved_regs.aux_ctrl, l2x0_data->num_lock);
void __iomem *base = l2x0_base;
/* Do not touch the controller if already enabled. */
if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
l2c_enable(base, l2x0_data->num_lock);
}
/*
@ -252,6 +247,8 @@ static const struct l2c_init_data l2c210_data __initconst = {
.num_lock = 1,
.enable = l2c_enable,
.save = l2c_save,
.configure = l2c_configure,
.unlock = l2c_unlock,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
@ -391,16 +388,22 @@ static void l2c220_sync(void)
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2c220_enable(void __iomem *base, u32 aux, unsigned num_lock)
static void l2c220_enable(void __iomem *base, unsigned num_lock)
{
/*
* Always enable non-secure access to the lockdown registers -
* we write to them as part of the L2C enable sequence so they
* need to be accessible.
*/
aux |= L220_AUX_CTRL_NS_LOCKDOWN;
l2x0_saved_regs.aux_ctrl |= L220_AUX_CTRL_NS_LOCKDOWN;
l2c_enable(base, aux, num_lock);
l2c_enable(base, num_lock);
}
static void l2c220_unlock(void __iomem *base, unsigned num_lock)
{
if (readl_relaxed(base + L2X0_AUX_CTRL) & L220_AUX_CTRL_NS_LOCKDOWN)
l2c_unlock(base, num_lock);
}
static const struct l2c_init_data l2c220_data = {
@ -409,6 +412,8 @@ static const struct l2c_init_data l2c220_data = {
.num_lock = 1,
.enable = l2c220_enable,
.save = l2c_save,
.configure = l2c_configure,
.unlock = l2c220_unlock,
.outer_cache = {
.inv_range = l2c220_inv_range,
.clean_range = l2c220_clean_range,
@ -569,6 +574,8 @@ static void l2c310_configure(void __iomem *base)
{
unsigned revision;
l2c_configure(base);
/* restore pl310 setup */
l2c_write_sec(l2x0_saved_regs.tag_latency, base,
L310_TAG_LATENCY_CTRL);
@ -603,10 +610,11 @@ static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, v
return NOTIFY_OK;
}
static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
{
unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK;
bool cortex_a9 = read_cpuid_part() == ARM_CPU_PART_CORTEX_A9;
u32 aux = l2x0_saved_regs.aux_ctrl;
if (rev >= L310_CACHE_ID_RTL_R2P0) {
if (cortex_a9) {
@ -649,9 +657,9 @@ static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
* we write to them as part of the L2C enable sequence so they
* need to be accessible.
*/
aux |= L310_AUX_CTRL_NS_LOCKDOWN;
l2x0_saved_regs.aux_ctrl = aux | L310_AUX_CTRL_NS_LOCKDOWN;
l2c_enable(base, aux, num_lock);
l2c_enable(base, num_lock);
/* Read back resulting AUX_CTRL value as it could have been altered. */
aux = readl_relaxed(base + L2X0_AUX_CTRL);
@ -755,6 +763,12 @@ static void l2c310_resume(void)
set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
}
static void l2c310_unlock(void __iomem *base, unsigned num_lock)
{
if (readl_relaxed(base + L2X0_AUX_CTRL) & L310_AUX_CTRL_NS_LOCKDOWN)
l2c_unlock(base, num_lock);
}
static const struct l2c_init_data l2c310_init_fns __initconst = {
.type = "L2C-310",
.way_size_0 = SZ_8K,
@ -763,6 +777,7 @@ static const struct l2c_init_data l2c310_init_fns __initconst = {
.fixup = l2c310_fixup,
.save = l2c310_save,
.configure = l2c310_configure,
.unlock = l2c310_unlock,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
@ -856,8 +871,11 @@ static int __init __l2c_init(const struct l2c_init_data *data,
* Check if l2x0 controller is already enabled. If we are booting
* in non-secure mode accessing the below registers will fault.
*/
if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
data->enable(l2x0_base, aux, data->num_lock);
if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
l2x0_saved_regs.aux_ctrl = aux;
data->enable(l2x0_base, data->num_lock);
}
outer_cache = fns;
@ -1066,6 +1084,8 @@ static const struct l2c_init_data of_l2c210_data __initconst = {
.of_parse = l2x0_of_parse,
.enable = l2c_enable,
.save = l2c_save,
.configure = l2c_configure,
.unlock = l2c_unlock,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
@ -1084,6 +1104,8 @@ static const struct l2c_init_data of_l2c220_data __initconst = {
.of_parse = l2x0_of_parse,
.enable = l2c220_enable,
.save = l2c_save,
.configure = l2c_configure,
.unlock = l2c220_unlock,
.outer_cache = {
.inv_range = l2c220_inv_range,
.clean_range = l2c220_clean_range,
@ -1199,6 +1221,26 @@ static void __init l2c310_of_parse(const struct device_node *np,
pr_err("L2C-310 OF arm,prefetch-offset property value is missing\n");
}
ret = of_property_read_u32(np, "prefetch-data", &val);
if (ret == 0) {
if (val)
prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH;
else
prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF prefetch-data property value is missing\n");
}
ret = of_property_read_u32(np, "prefetch-instr", &val);
if (ret == 0) {
if (val)
prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
else
prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF prefetch-instr property value is missing\n");
}
l2x0_saved_regs.prefetch_ctrl = prefetch;
}
@ -1211,6 +1253,7 @@ static const struct l2c_init_data of_l2c310_data __initconst = {
.fixup = l2c310_fixup,
.save = l2c310_save,
.configure = l2c310_configure,
.unlock = l2c310_unlock,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
@ -1240,6 +1283,7 @@ static const struct l2c_init_data of_l2c310_coherent_data __initconst = {
.fixup = l2c310_fixup,
.save = l2c310_save,
.configure = l2c310_configure,
.unlock = l2c310_unlock,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
@ -1366,7 +1410,7 @@ static void aurora_save(void __iomem *base)
* For Aurora cache in no outer mode, enable via the CP15 coprocessor
* broadcasting of cache commands to L2.
*/
static void __init aurora_enable_no_outer(void __iomem *base, u32 aux,
static void __init aurora_enable_no_outer(void __iomem *base,
unsigned num_lock)
{
u32 u;
@ -1377,7 +1421,7 @@ static void __init aurora_enable_no_outer(void __iomem *base, u32 aux,
isb();
l2c_enable(base, aux, num_lock);
l2c_enable(base, num_lock);
}
static void __init aurora_fixup(void __iomem *base, u32 cache_id,
@ -1416,6 +1460,8 @@ static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
.enable = l2c_enable,
.fixup = aurora_fixup,
.save = aurora_save,
.configure = l2c_configure,
.unlock = l2c_unlock,
.outer_cache = {
.inv_range = aurora_inv_range,
.clean_range = aurora_clean_range,
@ -1435,6 +1481,8 @@ static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
.enable = aurora_enable_no_outer,
.fixup = aurora_fixup,
.save = aurora_save,
.configure = l2c_configure,
.unlock = l2c_unlock,
.outer_cache = {
.resume = l2c_resume,
},
@ -1585,6 +1633,7 @@ static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
.enable = l2c310_enable,
.save = l2c310_save,
.configure = l2c310_configure,
.unlock = l2c310_unlock,
.outer_cache = {
.inv_range = bcm_inv_range,
.clean_range = bcm_clean_range,
@ -1608,6 +1657,7 @@ static void __init tauros3_save(void __iomem *base)
static void tauros3_configure(void __iomem *base)
{
l2c_configure(base);
writel_relaxed(l2x0_saved_regs.aux2_ctrl,
base + TAUROS3_AUX2_CTRL);
writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
@ -1621,6 +1671,7 @@ static const struct l2c_init_data of_tauros3_data __initconst = {
.enable = l2c_enable,
.save = tauros3_save,
.configure = tauros3_configure,
.unlock = l2c_unlock,
/* Tauros3 broadcasts L1 cache operations to L2 */
.outer_cache = {
.resume = l2c_resume,

View File

@ -148,11 +148,14 @@ static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs);
static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle, struct dma_attrs *attrs);
static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
struct dma_attrs *attrs);
struct dma_map_ops arm_coherent_dma_ops = {
.alloc = arm_coherent_dma_alloc,
.free = arm_coherent_dma_free,
.mmap = arm_dma_mmap,
.mmap = arm_coherent_dma_mmap,
.get_sgtable = arm_dma_get_sgtable,
.map_page = arm_coherent_dma_map_page,
.map_sg = arm_dma_map_sg,
@ -690,10 +693,7 @@ static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
attrs, __builtin_return_address(0));
}
/*
* Create userspace mapping for the DMA-coherent memory.
*/
int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
struct dma_attrs *attrs)
{
@ -704,8 +704,6 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
unsigned long pfn = dma_to_pfn(dev, dma_addr);
unsigned long off = vma->vm_pgoff;
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
@ -720,6 +718,26 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
return ret;
}
/*
* Create userspace mapping for the DMA-coherent memory.
*/
static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
struct dma_attrs *attrs)
{
return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
}
int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
struct dma_attrs *attrs)
{
#ifdef CONFIG_MMU
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
#endif /* CONFIG_MMU */
return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
}
/*
* Free a buffer as defined by the above mapping.
*/
@ -1878,7 +1896,7 @@ struct dma_map_ops iommu_coherent_ops = {
* arm_iommu_attach_device function.
*/
struct dma_iommu_mapping *
arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size)
{
unsigned int bits = size >> PAGE_SHIFT;
unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
@ -1886,6 +1904,10 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
int extensions = 1;
int err = -ENOMEM;
/* currently only 32-bit DMA address space is supported */
if (size > DMA_BIT_MASK(32) + 1)
return ERR_PTR(-ERANGE);
if (!bitmap_size)
return ERR_PTR(-EINVAL);
@ -2057,13 +2079,6 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
if (!iommu)
return false;
/*
* currently arm_iommu_create_mapping() takes a max of size_t
* for size param. So check this limit for now.
*/
if (size > SIZE_MAX)
return false;
mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
if (IS_ERR(mapping)) {
pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",

View File

@ -1112,22 +1112,22 @@ void __init sanity_check_meminfo(void)
}
/*
* Find the first non-section-aligned page, and point
* Find the first non-pmd-aligned page, and point
* memblock_limit at it. This relies on rounding the
* limit down to be section-aligned, which happens at
* the end of this function.
* limit down to be pmd-aligned, which happens at the
* end of this function.
*
* With this algorithm, the start or end of almost any
* bank can be non-section-aligned. The only exception
* is that the start of the bank 0 must be section-
* bank can be non-pmd-aligned. The only exception is
* that the start of the bank 0 must be section-
* aligned, since otherwise memory would need to be
* allocated when mapping the start of bank 0, which
* occurs before any free memory is mapped.
*/
if (!memblock_limit) {
if (!IS_ALIGNED(block_start, SECTION_SIZE))
if (!IS_ALIGNED(block_start, PMD_SIZE))
memblock_limit = block_start;
else if (!IS_ALIGNED(block_end, SECTION_SIZE))
else if (!IS_ALIGNED(block_end, PMD_SIZE))
memblock_limit = arm_lowmem_limit;
}
@ -1137,12 +1137,12 @@ void __init sanity_check_meminfo(void)
high_memory = __va(arm_lowmem_limit - 1) + 1;
/*
* Round the memblock limit down to a section size. This
* Round the memblock limit down to a pmd size. This
* helps to ensure that we will allocate memory from the
* last full section, which should be mapped.
* last full pmd, which should be mapped.
*/
if (memblock_limit)
memblock_limit = round_down(memblock_limit, SECTION_SIZE);
memblock_limit = round_down(memblock_limit, PMD_SIZE);
if (!memblock_limit)
memblock_limit = arm_lowmem_limit;
@ -1387,123 +1387,98 @@ static void __init map_lowmem(void)
}
}
#ifdef CONFIG_ARM_LPAE
#ifdef CONFIG_ARM_PV_FIXUP
extern unsigned long __atags_pointer;
typedef void pgtables_remap(long long offset, unsigned long pgd, void *bdata);
pgtables_remap lpae_pgtables_remap_asm;
/*
* early_paging_init() recreates boot time page table setup, allowing machines
* to switch over to a high (>4G) address space on LPAE systems
*/
void __init early_paging_init(const struct machine_desc *mdesc,
struct proc_info_list *procinfo)
void __init early_paging_init(const struct machine_desc *mdesc)
{
pmdval_t pmdprot = procinfo->__cpu_mm_mmu_flags;
unsigned long map_start, map_end;
pgd_t *pgd0, *pgdk;
pud_t *pud0, *pudk, *pud_start;
pmd_t *pmd0, *pmdk;
phys_addr_t phys;
int i;
pgtables_remap *lpae_pgtables_remap;
unsigned long pa_pgd;
unsigned int cr, ttbcr;
long long offset;
void *boot_data;
if (!(mdesc->init_meminfo))
if (!mdesc->pv_fixup)
return;
/* remap kernel code and data */
map_start = init_mm.start_code & PMD_MASK;
map_end = ALIGN(init_mm.brk, PMD_SIZE);
offset = mdesc->pv_fixup();
if (offset == 0)
return;
/* get a handle on things... */
pgd0 = pgd_offset_k(0);
pud_start = pud0 = pud_offset(pgd0, 0);
pmd0 = pmd_offset(pud0, 0);
/*
* Get the address of the remap function in the 1:1 identity
* mapping setup by the early page table assembly code. We
* must get this prior to the pv update. The following barrier
* ensures that this is complete before we fixup any P:V offsets.
*/
lpae_pgtables_remap = (pgtables_remap *)(unsigned long)__pa(lpae_pgtables_remap_asm);
pa_pgd = __pa(swapper_pg_dir);
boot_data = __va(__atags_pointer);
barrier();
pgdk = pgd_offset_k(map_start);
pudk = pud_offset(pgdk, map_start);
pmdk = pmd_offset(pudk, map_start);
pr_info("Switching physical address space to 0x%08llx\n",
(u64)PHYS_OFFSET + offset);
mdesc->init_meminfo();
/* Re-set the phys pfn offset, and the pv offset */
__pv_offset += offset;
__pv_phys_pfn_offset += PFN_DOWN(offset);
/* Run the patch stub to update the constants */
fixup_pv_table(&__pv_table_begin,
(&__pv_table_end - &__pv_table_begin) << 2);
/*
* Cache cleaning operations for self-modifying code
* We should clean the entries by MVA but running a
* for loop over every pv_table entry pointer would
* just complicate the code.
*/
flush_cache_louis();
dsb(ishst);
isb();
/*
* FIXME: This code is not architecturally compliant: we modify
* the mappings in-place, indeed while they are in use by this
* very same code. This may lead to unpredictable behaviour of
* the CPU.
*
* Even modifying the mappings in a separate page table does
* not resolve this.
*
* The architecture strongly recommends that when a mapping is
* changed, that it is changed by first going via an invalid
* mapping and back to the new mapping. This is to ensure that
* no TLB conflicts (caused by the TLB having more than one TLB
* entry match a translation) can occur. However, doing that
* here will result in unmapping the code we are running.
*/
pr_warn("WARNING: unsafe modification of in-place page tables - tainting kernel\n");
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
/*
* Remap level 1 table. This changes the physical addresses
* used to refer to the level 2 page tables to the high
* physical address alias, leaving everything else the same.
*/
for (i = 0; i < PTRS_PER_PGD; pud0++, i++) {
set_pud(pud0,
__pud(__pa(pmd0) | PMD_TYPE_TABLE | L_PGD_SWAPPER));
pmd0 += PTRS_PER_PMD;
}
/*
* Remap the level 2 table, pointing the mappings at the high
* physical address alias of these pages.
*/
phys = __pa(map_start);
do {
*pmdk++ = __pmd(phys | pmdprot);
phys += PMD_SIZE;
} while (phys < map_end);
/*
* Ensure that the above updates are flushed out of the cache.
* This is not strictly correct; on a system where the caches
* are coherent with each other, but the MMU page table walks
* may not be coherent, flush_cache_all() may be a no-op, and
* this will fail.
* We changing not only the virtual to physical mapping, but also
* the physical addresses used to access memory. We need to flush
* all levels of cache in the system with caching disabled to
* ensure that all data is written back, and nothing is prefetched
* into the caches. We also need to prevent the TLB walkers
* allocating into the caches too. Note that this is ARMv7 LPAE
* specific.
*/
cr = get_cr();
set_cr(cr & ~(CR_I | CR_C));
asm("mrc p15, 0, %0, c2, c0, 2" : "=r" (ttbcr));
asm volatile("mcr p15, 0, %0, c2, c0, 2"
: : "r" (ttbcr & ~(3 << 8 | 3 << 10)));
flush_cache_all();
/*
* Re-write the TTBR values to point them at the high physical
* alias of the page tables. We expect __va() will work on
* cpu_get_pgd(), which returns the value of TTBR0.
* Fixup the page tables - this must be in the idmap region as
* we need to disable the MMU to do this safely, and hence it
* needs to be assembly. It's fairly simple, as we're using the
* temporary tables setup by the initial assembly code.
*/
cpu_switch_mm(pgd0, &init_mm);
cpu_set_ttbr(1, __pa(pgd0) + TTBR1_OFFSET);
lpae_pgtables_remap(offset, pa_pgd, boot_data);
/* Finally flush any stale TLB values. */
local_flush_bp_all();
local_flush_tlb_all();
/* Re-enable the caches and cacheable TLB walks */
asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr));
set_cr(cr);
}
#else
void __init early_paging_init(const struct machine_desc *mdesc,
struct proc_info_list *procinfo)
void __init early_paging_init(const struct machine_desc *mdesc)
{
if (mdesc->init_meminfo)
mdesc->init_meminfo();
long long offset;
if (!mdesc->pv_fixup)
return;
offset = mdesc->pv_fixup();
if (offset == 0)
return;
pr_crit("Physical address space modification is only to support Keystone2.\n");
pr_crit("Please enable ARM_LPAE and ARM_PATCH_PHYS_VIRT support to use this\n");
pr_crit("feature. Your kernel may crash now, have a good day.\n");
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
}
#endif

View File

@ -303,15 +303,6 @@ void __init sanity_check_meminfo(void)
memblock_set_current_limit(end);
}
/*
* early_paging_init() recreates boot time page table setup, allowing machines
* to switch over to a high (>4G) address space on LPAE systems
*/
void __init early_paging_init(const struct machine_desc *mdesc,
struct proc_info_list *procinfo)
{
}
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.

View File

@ -22,8 +22,6 @@
*
* These are the low level assembler for performing cache and TLB
* functions on the arm1020.
*
* CONFIG_CPU_ARM1020_CPU_IDLE -> nohlt
*/
#include <linux/linkage.h>
#include <linux/init.h>

View File

@ -22,8 +22,6 @@
*
* These are the low level assembler for performing cache and TLB
* functions on the arm1020e.
*
* CONFIG_CPU_ARM1020_CPU_IDLE -> nohlt
*/
#include <linux/linkage.h>
#include <linux/init.h>

View File

@ -441,9 +441,6 @@ ENTRY(cpu_arm925_set_pte_ext)
.type __arm925_setup, #function
__arm925_setup:
mov r0, #0
#if defined(CONFIG_CPU_ICACHE_STREAMING_DISABLE)
orr r0,r0,#1 << 7
#endif
/* Transparent on, D-cache clean & flush mode. See NOTE2 above */
orr r0,r0,#1 << 1 @ transparent mode on

View File

@ -602,7 +602,6 @@ __\name\()_proc_info:
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
initfn __feroceon_setup, __\name\()_proc_info
.long __feroceon_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP

View File

@ -36,14 +36,16 @@
*
* It is assumed that:
* - we are not using split page tables
*
* Note that we always need to flush BTAC/BTB if IBE is set
* even on Cortex-A8 revisions not affected by 430973.
* If IBE is not set, the flush BTAC/BTB won't do anything.
*/
ENTRY(cpu_ca8_switch_mm)
#ifdef CONFIG_MMU
mov r2, #0
#ifdef CONFIG_ARM_ERRATA_430973
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
#endif
#endif
ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_MMU
mmid r1, r1 @ get mm->context.id
@ -148,10 +150,10 @@ ENDPROC(cpu_v7_set_pte_ext)
* Macro for setting up the TTBRx and TTBCR registers.
* - \ttb0 and \ttb1 updated with the corresponding flags.
*/
.macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp
.macro v7_ttb_setup, zero, ttbr0l, ttbr0h, ttbr1, tmp
mcr p15, 0, \zero, c2, c0, 2 @ TTB control register
ALT_SMP(orr \ttbr0, \ttbr0, #TTB_FLAGS_SMP)
ALT_UP(orr \ttbr0, \ttbr0, #TTB_FLAGS_UP)
ALT_SMP(orr \ttbr0l, \ttbr0l, #TTB_FLAGS_SMP)
ALT_UP(orr \ttbr0l, \ttbr0l, #TTB_FLAGS_UP)
ALT_SMP(orr \ttbr1, \ttbr1, #TTB_FLAGS_SMP)
ALT_UP(orr \ttbr1, \ttbr1, #TTB_FLAGS_UP)
mcr p15, 0, \ttbr1, c2, c0, 1 @ load TTB1

View File

@ -126,11 +126,10 @@ ENDPROC(cpu_v7_set_pte_ext)
* Macro for setting up the TTBRx and TTBCR registers.
* - \ttbr1 updated.
*/
.macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp
.macro v7_ttb_setup, zero, ttbr0l, ttbr0h, ttbr1, tmp
ldr \tmp, =swapper_pg_dir @ swapper_pg_dir virtual address
mov \tmp, \tmp, lsr #ARCH_PGD_SHIFT
cmp \ttbr1, \tmp @ PHYS_OFFSET > PAGE_OFFSET?
mrc p15, 0, \tmp, c2, c0, 2 @ TTB control register
cmp \ttbr1, \tmp, lsr #12 @ PHYS_OFFSET > PAGE_OFFSET?
mrc p15, 0, \tmp, c2, c0, 2 @ TTB control egister
orr \tmp, \tmp, #TTB_EAE
ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP)
ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP)
@ -143,13 +142,10 @@ ENDPROC(cpu_v7_set_pte_ext)
*/
orrls \tmp, \tmp, #TTBR1_SIZE @ TTBCR.T1SZ
mcr p15, 0, \tmp, c2, c0, 2 @ TTBCR
mov \tmp, \ttbr1, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits
mov \ttbr1, \ttbr1, lsl #ARCH_PGD_SHIFT @ lower bits
mov \tmp, \ttbr1, lsr #20
mov \ttbr1, \ttbr1, lsl #12
addls \ttbr1, \ttbr1, #TTBR1_OFFSET
mcrr p15, 1, \ttbr1, \tmp, c2 @ load TTBR1
mov \tmp, \ttbr0, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits
mov \ttbr0, \ttbr0, lsl #ARCH_PGD_SHIFT @ lower bits
mcrr p15, 0, \ttbr0, \tmp, c2 @ load TTBR0
.endm
/*

View File

@ -252,6 +252,12 @@ ENDPROC(cpu_pj4b_do_resume)
* Initialise TLB, Caches, and MMU state ready to switch the MMU
* on. Return in r0 the new CP15 C1 control register setting.
*
* r1, r2, r4, r5, r9, r13 must be preserved - r13 is not a stack
* r4: TTBR0 (low word)
* r5: TTBR0 (high word if LPAE)
* r8: TTBR1
* r9: Main ID register
*
* This should be able to cover all ARMv7 cores.
*
* It is assumed that:
@ -279,6 +285,78 @@ __v7_ca17mp_setup:
#endif
b __v7_setup
/*
* Errata:
* r0, r10 available for use
* r1, r2, r4, r5, r9, r13: must be preserved
* r3: contains MIDR rX number in bits 23-20
* r6: contains MIDR rXpY as 8-bit XY number
* r9: MIDR
*/
__ca8_errata:
#if defined(CONFIG_ARM_ERRATA_430973) && !defined(CONFIG_ARCH_MULTIPLATFORM)
teq r3, #0x00100000 @ only present in r1p*
mrceq p15, 0, r0, c1, c0, 1 @ read aux control register
orreq r0, r0, #(1 << 6) @ set IBE to 1
mcreq p15, 0, r0, c1, c0, 1 @ write aux control register
#endif
#ifdef CONFIG_ARM_ERRATA_458693
teq r6, #0x20 @ only present in r2p0
mrceq p15, 0, r0, c1, c0, 1 @ read aux control register
orreq r0, r0, #(1 << 5) @ set L1NEON to 1
orreq r0, r0, #(1 << 9) @ set PLDNOP to 1
mcreq p15, 0, r0, c1, c0, 1 @ write aux control register
#endif
#ifdef CONFIG_ARM_ERRATA_460075
teq r6, #0x20 @ only present in r2p0
mrceq p15, 1, r0, c9, c0, 2 @ read L2 cache aux ctrl register
tsteq r0, #1 << 22
orreq r0, r0, #(1 << 22) @ set the Write Allocate disable bit
mcreq p15, 1, r0, c9, c0, 2 @ write the L2 cache aux ctrl register
#endif
b __errata_finish
__ca9_errata:
#ifdef CONFIG_ARM_ERRATA_742230
cmp r6, #0x22 @ only present up to r2p2
mrcle p15, 0, r0, c15, c0, 1 @ read diagnostic register
orrle r0, r0, #1 << 4 @ set bit #4
mcrle p15, 0, r0, c15, c0, 1 @ write diagnostic register
#endif
#ifdef CONFIG_ARM_ERRATA_742231
teq r6, #0x20 @ present in r2p0
teqne r6, #0x21 @ present in r2p1
teqne r6, #0x22 @ present in r2p2
mrceq p15, 0, r0, c15, c0, 1 @ read diagnostic register
orreq r0, r0, #1 << 12 @ set bit #12
orreq r0, r0, #1 << 22 @ set bit #22
mcreq p15, 0, r0, c15, c0, 1 @ write diagnostic register
#endif
#ifdef CONFIG_ARM_ERRATA_743622
teq r3, #0x00200000 @ only present in r2p*
mrceq p15, 0, r0, c15, c0, 1 @ read diagnostic register
orreq r0, r0, #1 << 6 @ set bit #6
mcreq p15, 0, r0, c15, c0, 1 @ write diagnostic register
#endif
#if defined(CONFIG_ARM_ERRATA_751472) && defined(CONFIG_SMP)
ALT_SMP(cmp r6, #0x30) @ present prior to r3p0
ALT_UP_B(1f)
mrclt p15, 0, r0, c15, c0, 1 @ read diagnostic register
orrlt r0, r0, #1 << 11 @ set bit #11
mcrlt p15, 0, r0, c15, c0, 1 @ write diagnostic register
1:
#endif
b __errata_finish
__ca15_errata:
#ifdef CONFIG_ARM_ERRATA_773022
cmp r6, #0x4 @ only present up to r0p4
mrcle p15, 0, r0, c1, c0, 1 @ read aux control register
orrle r0, r0, #1 << 1 @ disable loop buffer
mcrle p15, 0, r0, c1, c0, 1 @ write aux control register
#endif
b __errata_finish
__v7_pj4b_setup:
#ifdef CONFIG_CPU_PJ4B
@ -339,96 +417,38 @@ __v7_setup:
bl v7_flush_dcache_louis
ldmia r12, {r0-r5, r7, r9, r11, lr}
mrc p15, 0, r0, c0, c0, 0 @ read main ID register
and r10, r0, #0xff000000 @ ARM?
teq r10, #0x41000000
bne 3f
and r5, r0, #0x00f00000 @ variant
and r6, r0, #0x0000000f @ revision
orr r6, r6, r5, lsr #20-4 @ combine variant and revision
ubfx r0, r0, #4, #12 @ primary part number
and r0, r9, #0xff000000 @ ARM?
teq r0, #0x41000000
bne __errata_finish
and r3, r9, #0x00f00000 @ variant
and r6, r9, #0x0000000f @ revision
orr r6, r6, r3, lsr #20-4 @ combine variant and revision
ubfx r0, r9, #4, #12 @ primary part number
/* Cortex-A8 Errata */
ldr r10, =0x00000c08 @ Cortex-A8 primary part number
teq r0, r10
bne 2f
#if defined(CONFIG_ARM_ERRATA_430973) && !defined(CONFIG_ARCH_MULTIPLATFORM)
teq r5, #0x00100000 @ only present in r1p*
mrceq p15, 0, r10, c1, c0, 1 @ read aux control register
orreq r10, r10, #(1 << 6) @ set IBE to 1
mcreq p15, 0, r10, c1, c0, 1 @ write aux control register
#endif
#ifdef CONFIG_ARM_ERRATA_458693
teq r6, #0x20 @ only present in r2p0
mrceq p15, 0, r10, c1, c0, 1 @ read aux control register
orreq r10, r10, #(1 << 5) @ set L1NEON to 1
orreq r10, r10, #(1 << 9) @ set PLDNOP to 1
mcreq p15, 0, r10, c1, c0, 1 @ write aux control register
#endif
#ifdef CONFIG_ARM_ERRATA_460075
teq r6, #0x20 @ only present in r2p0
mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register
tsteq r10, #1 << 22
orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit
mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register
#endif
b 3f
beq __ca8_errata
/* Cortex-A9 Errata */
2: ldr r10, =0x00000c09 @ Cortex-A9 primary part number
ldr r10, =0x00000c09 @ Cortex-A9 primary part number
teq r0, r10
bne 3f
#ifdef CONFIG_ARM_ERRATA_742230
cmp r6, #0x22 @ only present up to r2p2
mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register
orrle r10, r10, #1 << 4 @ set bit #4
mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register
#endif
#ifdef CONFIG_ARM_ERRATA_742231
teq r6, #0x20 @ present in r2p0
teqne r6, #0x21 @ present in r2p1
teqne r6, #0x22 @ present in r2p2
mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
orreq r10, r10, #1 << 12 @ set bit #12
orreq r10, r10, #1 << 22 @ set bit #22
mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
#endif
#ifdef CONFIG_ARM_ERRATA_743622
teq r5, #0x00200000 @ only present in r2p*
mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
orreq r10, r10, #1 << 6 @ set bit #6
mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
#endif
#if defined(CONFIG_ARM_ERRATA_751472) && defined(CONFIG_SMP)
ALT_SMP(cmp r6, #0x30) @ present prior to r3p0
ALT_UP_B(1f)
mrclt p15, 0, r10, c15, c0, 1 @ read diagnostic register
orrlt r10, r10, #1 << 11 @ set bit #11
mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register
1:
#endif
beq __ca9_errata
/* Cortex-A15 Errata */
3: ldr r10, =0x00000c0f @ Cortex-A15 primary part number
ldr r10, =0x00000c0f @ Cortex-A15 primary part number
teq r0, r10
bne 4f
beq __ca15_errata
#ifdef CONFIG_ARM_ERRATA_773022
cmp r6, #0x4 @ only present up to r0p4
mrcle p15, 0, r10, c1, c0, 1 @ read aux control register
orrle r10, r10, #1 << 1 @ disable loop buffer
mcrle p15, 0, r10, c1, c0, 1 @ write aux control register
#endif
4: mov r10, #0
__errata_finish:
mov r10, #0
mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
#ifdef CONFIG_MMU
mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup
ldr r5, =PRRR @ PRRR
v7_ttb_setup r10, r4, r5, r8, r3 @ TTBCR, TTBRx setup
ldr r3, =PRRR @ PRRR
ldr r6, =NMRR @ NMRR
mcr p15, 0, r5, c10, c2, 0 @ write PRRR
mcr p15, 0, r3, c10, c2, 0 @ write PRRR
mcr p15, 0, r6, c10, c2, 1 @ write NMRR
#endif
dsb @ Complete invalidations
@ -437,22 +457,22 @@ __v7_setup:
and r0, r0, #(0xf << 12) @ ThumbEE enabled field
teq r0, #(1 << 12) @ check if ThumbEE is present
bne 1f
mov r5, #0
mcr p14, 6, r5, c1, c0, 0 @ Initialize TEEHBR to 0
mov r3, #0
mcr p14, 6, r3, c1, c0, 0 @ Initialize TEEHBR to 0
mrc p14, 6, r0, c0, c0, 0 @ load TEECR
orr r0, r0, #1 @ set the 1st bit in order to
mcr p14, 6, r0, c0, c0, 0 @ stop userspace TEEHBR access
1:
#endif
adr r5, v7_crval
ldmia r5, {r5, r6}
adr r3, v7_crval
ldmia r3, {r3, r6}
ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables
#ifdef CONFIG_SWP_EMULATE
orr r5, r5, #(1 << 10) @ set SW bit in "clear"
orr r3, r3, #(1 << 10) @ set SW bit in "clear"
bic r6, r6, #(1 << 10) @ clear it in "mmuset"
#endif
mrc p15, 0, r0, c1, c0, 0 @ read control register
bic r0, r0, r5 @ clear bits them
bic r0, r0, r3 @ clear bits them
orr r0, r0, r6 @ set them
THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions
ret lr @ return to head.S:__ret

View File

@ -0,0 +1,88 @@
/*
* Copyright (C) 2015 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This assembly is required to safely remap the physical address space
* for Keystone 2
*/
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/cp15.h>
#include <asm/memory.h>
#include <asm/pgtable.h>
.section ".idmap.text", "ax"
#define L1_ORDER 3
#define L2_ORDER 3
ENTRY(lpae_pgtables_remap_asm)
stmfd sp!, {r4-r8, lr}
mrc p15, 0, r8, c1, c0, 0 @ read control reg
bic ip, r8, #CR_M @ disable caches and MMU
mcr p15, 0, ip, c1, c0, 0
dsb
isb
/* Update level 2 entries covering the kernel */
ldr r6, =(_end - 1)
add r7, r2, #0x1000
add r6, r7, r6, lsr #SECTION_SHIFT - L2_ORDER
add r7, r7, #PAGE_OFFSET >> (SECTION_SHIFT - L2_ORDER)
1: ldrd r4, [r7]
adds r4, r4, r0
adc r5, r5, r1
strd r4, [r7], #1 << L2_ORDER
cmp r7, r6
bls 1b
/* Update level 2 entries for the boot data */
add r7, r2, #0x1000
add r7, r7, r3, lsr #SECTION_SHIFT - L2_ORDER
bic r7, r7, #(1 << L2_ORDER) - 1
ldrd r4, [r7]
adds r4, r4, r0
adc r5, r5, r1
strd r4, [r7], #1 << L2_ORDER
ldrd r4, [r7]
adds r4, r4, r0
adc r5, r5, r1
strd r4, [r7]
/* Update level 1 entries */
mov r6, #4
mov r7, r2
2: ldrd r4, [r7]
adds r4, r4, r0
adc r5, r5, r1
strd r4, [r7], #1 << L1_ORDER
subs r6, r6, #1
bne 2b
mrrc p15, 0, r4, r5, c2 @ read TTBR0
adds r4, r4, r0 @ update physical address
adc r5, r5, r1
mcrr p15, 0, r4, r5, c2 @ write back TTBR0
mrrc p15, 1, r4, r5, c2 @ read TTBR1
adds r4, r4, r0 @ update physical address
adc r5, r5, r1
mcrr p15, 1, r4, r5, c2 @ write back TTBR1
dsb
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ I+BTB cache invalidate
mcr p15, 0, ip, c8, c7, 0 @ local_flush_tlb_all()
dsb
isb
mcr p15, 0, r8, c1, c0, 0 @ re-enable MMU
dsb
isb
ldmfd sp!, {r4-r8, pc}
ENDPROC(lpae_pgtables_remap_asm)

View File

@ -28,11 +28,7 @@
void __init orion_clkdev_add(const char *con_id, const char *dev_id,
struct clk *clk)
{
struct clk_lookup *cl;
cl = clkdev_alloc(clk, con_id, dev_id);
if (cl)
clkdev_add(cl);
clkdev_create(clk, con_id, "%s", dev_id);
}
/* Create clkdev entries for all orion platforms except kirkwood.

View File

@ -6,9 +6,15 @@ obj-vdso := vgettimeofday.o datapage.o
targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.so.raw vdso.lds
obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
ccflags-y := -shared -fPIC -fno-common -fno-builtin -fno-stack-protector
ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 -DDISABLE_BRANCH_PROFILING
ccflags-y += -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
ccflags-y := -fPIC -fno-common -fno-builtin -fno-stack-protector
ccflags-y += -DDISABLE_BRANCH_PROFILING
VDSO_LDFLAGS := -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1
VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
VDSO_LDFLAGS += -nostdlib -shared
VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
VDSO_LDFLAGS += $(call cc-option, -fuse-ld=bfd)
obj-$(CONFIG_VDSO) += vdso.o
extra-$(CONFIG_VDSO) += vdso.lds
@ -40,10 +46,8 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
# Actual build commands
quiet_cmd_vdsold = VDSO $@
cmd_vdsold = $(CC) $(c_flags) -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) \
$(call cc-ldoption, -Wl$(comma)--build-id) \
-Wl,-Bsymbolic -Wl,-z,max-page-size=4096 \
-Wl,-z,common-page-size=4096 -o $@
cmd_vdsold = $(CC) $(c_flags) $(VDSO_LDFLAGS) \
-Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@
quiet_cmd_vdsomunge = MUNGE $@
cmd_vdsomunge = $(objtree)/$(obj)/vdsomunge $< $@

View File

@ -246,8 +246,7 @@ int __init arch_clk_init(void)
for (i = 0; i < ARRAY_SIZE(main_clks); i++)
ret |= clk_register(main_clks[i]);
for (i = 0; i < ARRAY_SIZE(lookups); i++)
clkdev_add(&lookups[i]);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
if (!ret)
ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),

View File

@ -141,8 +141,8 @@ int __init arch_clk_init(void)
for (i = 0; i < ARRAY_SIZE(clks); i++)
ret |= clk_register(clks[i]);
for (i = 0; i < ARRAY_SIZE(lookups); i++)
clkdev_add(&lookups[i]);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
if (!ret)
ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),

View File

@ -164,8 +164,8 @@ int __init arch_clk_init(void)
for (i = 0; i < ARRAY_SIZE(clks); i++)
ret |= clk_register(clks[i]);
for (i = 0; i < ARRAY_SIZE(lookups); i++)
clkdev_add(&lookups[i]);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
if (!ret)
ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),

View File

@ -179,8 +179,8 @@ int __init arch_clk_init(void)
for (i = 0; i < ARRAY_SIZE(clks); i++)
ret |= clk_register(clks[i]);
for (i = 0; i < ARRAY_SIZE(lookups); i++)
clkdev_add(&lookups[i]);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
if (!ret)
ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),

View File

@ -138,8 +138,8 @@ int __init arch_clk_init(void)
for (i = 0; i < ARRAY_SIZE(clks); i++)
ret |= clk_register(clks[i]);
for (i = 0; i < ARRAY_SIZE(lookups); i++)
clkdev_add(&lookups[i]);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
if (!ret)
ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),

View File

@ -242,14 +242,12 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
goto err_reg;
}
s2mps11_clk->lookup = clkdev_alloc(s2mps11_clk->clk,
s2mps11_clk->lookup = clkdev_create(s2mps11_clk->clk,
s2mps11_name(s2mps11_clk), NULL);
if (!s2mps11_clk->lookup) {
ret = -ENOMEM;
goto err_lup;
}
clkdev_add(s2mps11_clk->lookup);
}
for (i = 0; i < S2MPS11_CLKS_NUM; i++) {

View File

@ -177,7 +177,7 @@ struct clk *clk_get_sys(const char *dev_id, const char *con_id)
if (!cl)
goto out;
clk = __clk_create_clk(__clk_get_hw(cl->clk), dev_id, con_id);
clk = __clk_create_clk(cl->clk_hw, dev_id, con_id);
if (IS_ERR(clk))
goto out;
@ -215,18 +215,26 @@ void clk_put(struct clk *clk)
}
EXPORT_SYMBOL(clk_put);
void clkdev_add(struct clk_lookup *cl)
static void __clkdev_add(struct clk_lookup *cl)
{
mutex_lock(&clocks_mutex);
list_add_tail(&cl->node, &clocks);
mutex_unlock(&clocks_mutex);
}
void clkdev_add(struct clk_lookup *cl)
{
if (!cl->clk_hw)
cl->clk_hw = __clk_get_hw(cl->clk);
__clkdev_add(cl);
}
EXPORT_SYMBOL(clkdev_add);
void __init clkdev_add_table(struct clk_lookup *cl, size_t num)
void clkdev_add_table(struct clk_lookup *cl, size_t num)
{
mutex_lock(&clocks_mutex);
while (num--) {
cl->clk_hw = __clk_get_hw(cl->clk);
list_add_tail(&cl->node, &clocks);
cl++;
}
@ -243,7 +251,7 @@ struct clk_lookup_alloc {
};
static struct clk_lookup * __init_refok
vclkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt,
vclkdev_alloc(struct clk_hw *hw, const char *con_id, const char *dev_fmt,
va_list ap)
{
struct clk_lookup_alloc *cla;
@ -252,7 +260,7 @@ vclkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt,
if (!cla)
return NULL;
cla->cl.clk = clk;
cla->cl.clk_hw = hw;
if (con_id) {
strlcpy(cla->con_id, con_id, sizeof(cla->con_id));
cla->cl.con_id = cla->con_id;
@ -266,6 +274,19 @@ vclkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt,
return &cla->cl;
}
static struct clk_lookup *
vclkdev_create(struct clk_hw *hw, const char *con_id, const char *dev_fmt,
va_list ap)
{
struct clk_lookup *cl;
cl = vclkdev_alloc(hw, con_id, dev_fmt, ap);
if (cl)
__clkdev_add(cl);
return cl;
}
struct clk_lookup * __init_refok
clkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, ...)
{
@ -273,28 +294,49 @@ clkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, ...)
va_list ap;
va_start(ap, dev_fmt);
cl = vclkdev_alloc(clk, con_id, dev_fmt, ap);
cl = vclkdev_alloc(__clk_get_hw(clk), con_id, dev_fmt, ap);
va_end(ap);
return cl;
}
EXPORT_SYMBOL(clkdev_alloc);
int clk_add_alias(const char *alias, const char *alias_dev_name, char *id,
struct device *dev)
/**
* clkdev_create - allocate and add a clkdev lookup structure
* @clk: struct clk to associate with all clk_lookups
* @con_id: connection ID string on device
* @dev_fmt: format string describing device name
*
* Returns a clk_lookup structure, which can be later unregistered and
* freed.
*/
struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id,
const char *dev_fmt, ...)
{
struct clk *r = clk_get(dev, id);
struct clk_lookup *cl;
va_list ap;
va_start(ap, dev_fmt);
cl = vclkdev_create(__clk_get_hw(clk), con_id, dev_fmt, ap);
va_end(ap);
return cl;
}
EXPORT_SYMBOL_GPL(clkdev_create);
int clk_add_alias(const char *alias, const char *alias_dev_name,
const char *con_id, struct device *dev)
{
struct clk *r = clk_get(dev, con_id);
struct clk_lookup *l;
if (IS_ERR(r))
return PTR_ERR(r);
l = clkdev_alloc(r, alias, alias_dev_name);
l = clkdev_create(r, alias, "%s", alias_dev_name);
clk_put(r);
if (!l)
return -ENODEV;
clkdev_add(l);
return 0;
return l ? 0 : -ENODEV;
}
EXPORT_SYMBOL(clk_add_alias);
@ -334,15 +376,10 @@ int clk_register_clkdev(struct clk *clk, const char *con_id,
return PTR_ERR(clk);
va_start(ap, dev_fmt);
cl = vclkdev_alloc(clk, con_id, dev_fmt, ap);
cl = vclkdev_create(__clk_get_hw(clk), con_id, dev_fmt, ap);
va_end(ap);
if (!cl)
return -ENOMEM;
clkdev_add(cl);
return 0;
return cl ? 0 : -ENOMEM;
}
EXPORT_SYMBOL(clk_register_clkdev);
@ -365,8 +402,8 @@ int clk_register_clkdevs(struct clk *clk, struct clk_lookup *cl, size_t num)
return PTR_ERR(clk);
for (i = 0; i < num; i++, cl++) {
cl->clk = clk;
clkdev_add(cl);
cl->clk_hw = __clk_get_hw(clk);
__clkdev_add(cl);
}
return 0;

View File

@ -132,6 +132,12 @@ config ARM_GLOBAL_TIMER
help
This options enables support for the ARM global timer unit
config ARM_TIMER_SP804
bool "Support for Dual Timer SP804 module"
depends on GENERIC_SCHED_CLOCK
select CLKSRC_MMIO
select CLKSRC_OF if OF
config CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
bool
depends on ARM_GLOBAL_TIMER

View File

@ -45,6 +45,7 @@ obj-$(CONFIG_MTK_TIMER) += mtk_timer.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp804.o
obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o
obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o
obj-$(CONFIG_ARCH_KEYSTONE) += timer-keystone.o

View File

@ -26,7 +26,8 @@
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/sched_clock.h>
#include <asm/hardware/arm_timer.h>
#include "timer-sp.h"
static void __iomem * sched_clk_base;

View File

@ -1,6 +1,3 @@
#ifndef __ASM_ARM_HARDWARE_ARM_TIMER_H
#define __ASM_ARM_HARDWARE_ARM_TIMER_H
/*
* ARM timer implementation, found in Integrator, Versatile and Realview
* platforms. Not all platforms support all registers and bits in these
@ -31,5 +28,3 @@
#define TIMER_RIS 0x10 /* CVR ro */
#define TIMER_MIS 0x14 /* CVR ro */
#define TIMER_BGLOAD 0x18 /* CVR rw */
#endif

View File

@ -1,5 +1,5 @@
/*
* linux/arch/arm/common/timer-sp.c
* linux/drivers/clocksource/timer-sp.c
*
* Copyright (C) 1999 - 2003 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd
@ -30,8 +30,9 @@
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
#include <asm/hardware/arm_timer.h>
#include <asm/hardware/timer-sp.h>
#include <clocksource/timer-sp804.h>
#include "timer-sp.h"
static long __init sp804_get_clock_rate(struct clk *clk)
{
@ -71,6 +72,11 @@ static u64 notrace sp804_read(void)
return ~readl_relaxed(sched_clock_base + TIMER_VALUE);
}
void __init sp804_timer_disable(void __iomem *base)
{
writel(0, base + TIMER_CTRL);
}
void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base,
const char *name,
struct clk *clk,

View File

@ -1,9 +1,13 @@
#ifndef __CLKSOURCE_TIMER_SP804_H
#define __CLKSOURCE_TIMER_SP804_H
struct clk;
void __sp804_clocksource_and_sched_clock_init(void __iomem *,
const char *, struct clk *, int);
void __sp804_clockevents_init(void __iomem *, unsigned int,
struct clk *, const char *);
void sp804_timer_disable(void __iomem *);
static inline void sp804_clocksource_init(void __iomem *base, const char *name)
{
@ -21,3 +25,4 @@ static inline void sp804_clockevents_init(void __iomem *base, unsigned int irq,
__sp804_clockevents_init(base, irq, NULL, name);
}
#endif

View File

@ -306,6 +306,20 @@ void devm_clk_put(struct device *dev, struct clk *clk);
* @clk: clock source
* @rate: desired clock rate in Hz
*
* This answers the question "if I were to pass @rate to clk_set_rate(),
* what clock rate would I end up with?" without changing the hardware
* in any way. In other words:
*
* rate = clk_round_rate(clk, r);
*
* and:
*
* clk_set_rate(clk, r);
* rate = clk_get_rate(clk);
*
* are equivalent except the former does not modify the clock hardware
* in any way.
*
* Returns rounded clock rate in Hz, or negative errno.
*/
long clk_round_rate(struct clk *clk, unsigned long rate);
@ -471,19 +485,6 @@ static inline void clk_disable_unprepare(struct clk *clk)
clk_unprepare(clk);
}
/**
* clk_add_alias - add a new clock alias
* @alias: name for clock alias
* @alias_dev_name: device name
* @id: platform specific clock name
* @dev: device
*
* Allows using generic clock names for drivers by adding a new alias.
* Assumes clkdev, see clkdev.h for more info.
*/
int clk_add_alias(const char *alias, const char *alias_dev_name, char *id,
struct device *dev);
struct device_node;
struct of_phandle_args;

View File

@ -22,6 +22,7 @@ struct clk_lookup {
const char *dev_id;
const char *con_id;
struct clk *clk;
struct clk_hw *clk_hw;
};
#define CLKDEV_INIT(d, n, c) \
@ -37,8 +38,11 @@ struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
void clkdev_add(struct clk_lookup *cl);
void clkdev_drop(struct clk_lookup *cl);
struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id,
const char *dev_fmt, ...);
void clkdev_add_table(struct clk_lookup *, size_t);
int clk_add_alias(const char *, const char *, char *, struct device *);
int clk_add_alias(const char *, const char *, const char *, struct device *);
int clk_register_clkdev(struct clk *, const char *, const char *, ...);
int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t);

View File

@ -162,12 +162,11 @@ static int __init migor_init(void)
if (ret < 0)
return ret;
siumckb_lookup = clkdev_alloc(&siumckb_clk, "siumckb_clk", NULL);
siumckb_lookup = clkdev_create(&siumckb_clk, "siumckb_clk", NULL);
if (!siumckb_lookup) {
ret = -ENOMEM;
goto eclkdevalloc;
}
clkdev_add(siumckb_lookup);
/* Port number used on this machine: port B */
migor_snd_device = platform_device_alloc("soc-audio", 1);