Merge branches 'devel-stable', 'entry', 'fixes', 'mach-types', 'misc' and 'smp-hotplug' into for-linus
This commit is contained in:
@@ -462,6 +462,7 @@ static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
|
||||
sys->busnr = busnr;
|
||||
sys->swizzle = hw->swizzle;
|
||||
sys->map_irq = hw->map_irq;
|
||||
sys->align_resource = hw->align_resource;
|
||||
INIT_LIST_HEAD(&sys->resources);
|
||||
|
||||
if (hw->private_data)
|
||||
@@ -574,6 +575,8 @@ char * __init pcibios_setup(char *str)
|
||||
resource_size_t pcibios_align_resource(void *data, const struct resource *res,
|
||||
resource_size_t size, resource_size_t align)
|
||||
{
|
||||
struct pci_dev *dev = data;
|
||||
struct pci_sys_data *sys = dev->sysdata;
|
||||
resource_size_t start = res->start;
|
||||
|
||||
if (res->flags & IORESOURCE_IO && start & 0x300)
|
||||
@@ -581,6 +584,9 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
|
||||
|
||||
start = (start + align - 1) & ~(align - 1);
|
||||
|
||||
if (sys->align_resource)
|
||||
return sys->align_resource(dev, res, start, size, align);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
|
||||
@@ -192,18 +192,6 @@ __dabt_svc:
|
||||
svc_entry
|
||||
mov r2, sp
|
||||
dabt_helper
|
||||
|
||||
@
|
||||
@ IRQs off again before pulling preserved data off the stack
|
||||
@
|
||||
disable_irq_notrace
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
tst r5, #PSR_I_BIT
|
||||
bleq trace_hardirqs_on
|
||||
tst r5, #PSR_I_BIT
|
||||
blne trace_hardirqs_off
|
||||
#endif
|
||||
svc_exit r5 @ return from exception
|
||||
UNWIND(.fnend )
|
||||
ENDPROC(__dabt_svc)
|
||||
@@ -223,12 +211,7 @@ __irq_svc:
|
||||
blne svc_preempt
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
@ The parent context IRQs must have been enabled to get here in
|
||||
@ the first place, so there's no point checking the PSR I bit.
|
||||
bl trace_hardirqs_on
|
||||
#endif
|
||||
svc_exit r5 @ return from exception
|
||||
svc_exit r5, irq = 1 @ return from exception
|
||||
UNWIND(.fnend )
|
||||
ENDPROC(__irq_svc)
|
||||
|
||||
@@ -295,22 +278,8 @@ __und_svc_fault:
|
||||
mov r0, sp @ struct pt_regs *regs
|
||||
bl __und_fault
|
||||
|
||||
@
|
||||
@ IRQs off again before pulling preserved data off the stack
|
||||
@
|
||||
__und_svc_finish:
|
||||
disable_irq_notrace
|
||||
|
||||
@
|
||||
@ restore SPSR and restart the instruction
|
||||
@
|
||||
ldr r5, [sp, #S_PSR] @ Get SVC cpsr
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
tst r5, #PSR_I_BIT
|
||||
bleq trace_hardirqs_on
|
||||
tst r5, #PSR_I_BIT
|
||||
blne trace_hardirqs_off
|
||||
#endif
|
||||
svc_exit r5 @ return from exception
|
||||
UNWIND(.fnend )
|
||||
ENDPROC(__und_svc)
|
||||
@@ -320,18 +289,6 @@ __pabt_svc:
|
||||
svc_entry
|
||||
mov r2, sp @ regs
|
||||
pabt_helper
|
||||
|
||||
@
|
||||
@ IRQs off again before pulling preserved data off the stack
|
||||
@
|
||||
disable_irq_notrace
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
tst r5, #PSR_I_BIT
|
||||
bleq trace_hardirqs_on
|
||||
tst r5, #PSR_I_BIT
|
||||
blne trace_hardirqs_off
|
||||
#endif
|
||||
svc_exit r5 @ return from exception
|
||||
UNWIND(.fnend )
|
||||
ENDPROC(__pabt_svc)
|
||||
@@ -396,6 +353,7 @@ ENDPROC(__pabt_svc)
|
||||
#ifdef CONFIG_IRQSOFF_TRACER
|
||||
bl trace_hardirqs_off
|
||||
#endif
|
||||
ct_user_exit save = 0
|
||||
.endm
|
||||
|
||||
.macro kuser_cmpxchg_check
|
||||
|
||||
@@ -35,12 +35,11 @@ ret_fast_syscall:
|
||||
ldr r1, [tsk, #TI_FLAGS]
|
||||
tst r1, #_TIF_WORK_MASK
|
||||
bne fast_work_pending
|
||||
#if defined(CONFIG_IRQSOFF_TRACER)
|
||||
asm_trace_hardirqs_on
|
||||
#endif
|
||||
|
||||
/* perform architecture specific actions before user return */
|
||||
arch_ret_to_user r1, lr
|
||||
ct_user_enter
|
||||
|
||||
restore_user_regs fast = 1, offset = S_OFF
|
||||
UNWIND(.fnend )
|
||||
@@ -71,11 +70,11 @@ ENTRY(ret_to_user_from_irq)
|
||||
tst r1, #_TIF_WORK_MASK
|
||||
bne work_pending
|
||||
no_work_pending:
|
||||
#if defined(CONFIG_IRQSOFF_TRACER)
|
||||
asm_trace_hardirqs_on
|
||||
#endif
|
||||
|
||||
/* perform architecture specific actions before user return */
|
||||
arch_ret_to_user r1, lr
|
||||
ct_user_enter save = 0
|
||||
|
||||
restore_user_regs fast = 0, offset = 0
|
||||
ENDPROC(ret_to_user_from_irq)
|
||||
@@ -276,7 +275,13 @@ ENDPROC(ftrace_graph_caller_old)
|
||||
*/
|
||||
|
||||
.macro mcount_enter
|
||||
/*
|
||||
* This pad compensates for the push {lr} at the call site. Note that we are
|
||||
* unable to unwind through a function which does not otherwise save its lr.
|
||||
*/
|
||||
UNWIND(.pad #4)
|
||||
stmdb sp!, {r0-r3, lr}
|
||||
UNWIND(.save {r0-r3, lr})
|
||||
.endm
|
||||
|
||||
.macro mcount_get_lr reg
|
||||
@@ -289,6 +294,7 @@ ENDPROC(ftrace_graph_caller_old)
|
||||
.endm
|
||||
|
||||
ENTRY(__gnu_mcount_nc)
|
||||
UNWIND(.fnstart)
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
mov ip, lr
|
||||
ldmia sp!, {lr}
|
||||
@@ -296,17 +302,22 @@ ENTRY(__gnu_mcount_nc)
|
||||
#else
|
||||
__mcount
|
||||
#endif
|
||||
UNWIND(.fnend)
|
||||
ENDPROC(__gnu_mcount_nc)
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
ENTRY(ftrace_caller)
|
||||
UNWIND(.fnstart)
|
||||
__ftrace_caller
|
||||
UNWIND(.fnend)
|
||||
ENDPROC(ftrace_caller)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
ENTRY(ftrace_graph_caller)
|
||||
UNWIND(.fnstart)
|
||||
__ftrace_graph_caller
|
||||
UNWIND(.fnend)
|
||||
ENDPROC(ftrace_graph_caller)
|
||||
#endif
|
||||
|
||||
@@ -394,6 +405,7 @@ ENTRY(vector_swi)
|
||||
mcr p15, 0, ip, c1, c0 @ update control register
|
||||
#endif
|
||||
enable_irq
|
||||
ct_user_exit
|
||||
|
||||
get_thread_info tsk
|
||||
adr tbl, sys_call_table @ load syscall table pointer
|
||||
|
||||
@@ -74,7 +74,24 @@
|
||||
.endm
|
||||
|
||||
#ifndef CONFIG_THUMB2_KERNEL
|
||||
.macro svc_exit, rpsr
|
||||
.macro svc_exit, rpsr, irq = 0
|
||||
.if \irq != 0
|
||||
@ IRQs already off
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
@ The parent context IRQs must have been enabled to get here in
|
||||
@ the first place, so there's no point checking the PSR I bit.
|
||||
bl trace_hardirqs_on
|
||||
#endif
|
||||
.else
|
||||
@ IRQs off again before pulling preserved data off the stack
|
||||
disable_irq_notrace
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
tst \rpsr, #PSR_I_BIT
|
||||
bleq trace_hardirqs_on
|
||||
tst \rpsr, #PSR_I_BIT
|
||||
blne trace_hardirqs_off
|
||||
#endif
|
||||
.endif
|
||||
msr spsr_cxsf, \rpsr
|
||||
#if defined(CONFIG_CPU_V6)
|
||||
ldr r0, [sp]
|
||||
@@ -120,7 +137,24 @@
|
||||
mov pc, \reg
|
||||
.endm
|
||||
#else /* CONFIG_THUMB2_KERNEL */
|
||||
.macro svc_exit, rpsr
|
||||
.macro svc_exit, rpsr, irq = 0
|
||||
.if \irq != 0
|
||||
@ IRQs already off
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
@ The parent context IRQs must have been enabled to get here in
|
||||
@ the first place, so there's no point checking the PSR I bit.
|
||||
bl trace_hardirqs_on
|
||||
#endif
|
||||
.else
|
||||
@ IRQs off again before pulling preserved data off the stack
|
||||
disable_irq_notrace
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
tst \rpsr, #PSR_I_BIT
|
||||
bleq trace_hardirqs_on
|
||||
tst \rpsr, #PSR_I_BIT
|
||||
blne trace_hardirqs_off
|
||||
#endif
|
||||
.endif
|
||||
ldr lr, [sp, #S_SP] @ top of the stack
|
||||
ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
|
||||
clrex @ clear the exclusive monitor
|
||||
@@ -163,6 +197,34 @@
|
||||
.endm
|
||||
#endif /* !CONFIG_THUMB2_KERNEL */
|
||||
|
||||
/*
|
||||
* Context tracking subsystem. Used to instrument transitions
|
||||
* between user and kernel mode.
|
||||
*/
|
||||
.macro ct_user_exit, save = 1
|
||||
#ifdef CONFIG_CONTEXT_TRACKING
|
||||
.if \save
|
||||
stmdb sp!, {r0-r3, ip, lr}
|
||||
bl user_exit
|
||||
ldmia sp!, {r0-r3, ip, lr}
|
||||
.else
|
||||
bl user_exit
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro ct_user_enter, save = 1
|
||||
#ifdef CONFIG_CONTEXT_TRACKING
|
||||
.if \save
|
||||
stmdb sp!, {r0-r3, ip, lr}
|
||||
bl user_enter
|
||||
ldmia sp!, {r0-r3, ip, lr}
|
||||
.else
|
||||
bl user_enter
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* These are the registers used in the syscall handler, and allow us to
|
||||
* have in theory up to 7 arguments to a function - r0 to r6.
|
||||
|
||||
@@ -267,7 +267,7 @@ __create_page_tables:
|
||||
addne r6, r6, #1 << SECTION_SHIFT
|
||||
strne r6, [r3]
|
||||
|
||||
#if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
|
||||
#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
|
||||
sub r4, r4, #4 @ Fixup page table pointer
|
||||
@ for 64-bit descriptors
|
||||
#endif
|
||||
|
||||
@@ -966,7 +966,7 @@ static void reset_ctrl_regs(void *unused)
|
||||
}
|
||||
|
||||
if (err) {
|
||||
pr_warning("CPU %d debug is powered down!\n", cpu);
|
||||
pr_warn_once("CPU %d debug is powered down!\n", cpu);
|
||||
cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
|
||||
return;
|
||||
}
|
||||
@@ -987,7 +987,7 @@ clear_vcr:
|
||||
isb();
|
||||
|
||||
if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
|
||||
pr_warning("CPU %d failed to disable vector catch\n", cpu);
|
||||
pr_warn_once("CPU %d failed to disable vector catch\n", cpu);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1007,7 +1007,7 @@ clear_vcr:
|
||||
}
|
||||
|
||||
if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
|
||||
pr_warning("CPU %d failed to clear debug register pairs\n", cpu);
|
||||
pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1043,7 +1043,7 @@ static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action,
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = {
|
||||
static struct notifier_block dbg_cpu_pm_nb = {
|
||||
.notifier_call = dbg_cpu_pm_notify,
|
||||
};
|
||||
|
||||
|
||||
@@ -253,7 +253,10 @@ validate_event(struct pmu_hw_events *hw_events,
|
||||
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
||||
struct pmu *leader_pmu = event->group_leader->pmu;
|
||||
|
||||
if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
|
||||
if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
|
||||
return 1;
|
||||
|
||||
if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
|
||||
return 1;
|
||||
|
||||
return armpmu->get_event_idx(hw_events, event) >= 0;
|
||||
|
||||
@@ -26,7 +26,7 @@ static int save_return_addr(struct stackframe *frame, void *d)
|
||||
struct return_address_data *data = d;
|
||||
|
||||
if (!data->level) {
|
||||
data->addr = (void *)frame->lr;
|
||||
data->addr = (void *)frame->pc;
|
||||
|
||||
return 1;
|
||||
} else {
|
||||
@@ -41,7 +41,8 @@ void *return_address(unsigned int level)
|
||||
struct stackframe frame;
|
||||
register unsigned long current_sp asm ("sp");
|
||||
|
||||
data.level = level + 1;
|
||||
data.level = level + 2;
|
||||
data.addr = NULL;
|
||||
|
||||
frame.fp = (unsigned long)__builtin_frame_address(0);
|
||||
frame.sp = current_sp;
|
||||
|
||||
@@ -45,12 +45,12 @@ static u32 notrace jiffy_sched_clock_read(void)
|
||||
|
||||
static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
|
||||
|
||||
static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
|
||||
static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
|
||||
{
|
||||
return (cyc * mult) >> shift;
|
||||
}
|
||||
|
||||
static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask)
|
||||
static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask)
|
||||
{
|
||||
u64 epoch_ns;
|
||||
u32 epoch_cyc;
|
||||
|
||||
@@ -56,7 +56,6 @@
|
||||
#include <asm/virt.h>
|
||||
|
||||
#include "atags.h"
|
||||
#include "tcm.h"
|
||||
|
||||
|
||||
#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
|
||||
@@ -353,6 +352,23 @@ void __init early_print(const char *str, ...)
|
||||
printk("%s", buf);
|
||||
}
|
||||
|
||||
static void __init cpuid_init_hwcaps(void)
|
||||
{
|
||||
unsigned int divide_instrs;
|
||||
|
||||
if (cpu_architecture() < CPU_ARCH_ARMv7)
|
||||
return;
|
||||
|
||||
divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
|
||||
|
||||
switch (divide_instrs) {
|
||||
case 2:
|
||||
elf_hwcap |= HWCAP_IDIVA;
|
||||
case 1:
|
||||
elf_hwcap |= HWCAP_IDIVT;
|
||||
}
|
||||
}
|
||||
|
||||
static void __init feat_v6_fixup(void)
|
||||
{
|
||||
int id = read_cpuid_id();
|
||||
@@ -373,7 +389,7 @@ static void __init feat_v6_fixup(void)
|
||||
*
|
||||
* cpu_init sets up the per-CPU stacks.
|
||||
*/
|
||||
void cpu_init(void)
|
||||
void notrace cpu_init(void)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
struct stack *stk = &stacks[cpu];
|
||||
@@ -483,8 +499,11 @@ static void __init setup_processor(void)
|
||||
snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
|
||||
list->elf_name, ENDIANNESS);
|
||||
elf_hwcap = list->elf_hwcap;
|
||||
|
||||
cpuid_init_hwcaps();
|
||||
|
||||
#ifndef CONFIG_ARM_THUMB
|
||||
elf_hwcap &= ~HWCAP_THUMB;
|
||||
elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
|
||||
#endif
|
||||
|
||||
feat_v6_fixup();
|
||||
@@ -524,7 +543,7 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
|
||||
size -= start & ~PAGE_MASK;
|
||||
bank->start = PAGE_ALIGN(start);
|
||||
|
||||
#ifndef CONFIG_LPAE
|
||||
#ifndef CONFIG_ARM_LPAE
|
||||
if (bank->start + size < bank->start) {
|
||||
printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
|
||||
"32-bit physical address space\n", (long long)start);
|
||||
@@ -778,8 +797,6 @@ void __init setup_arch(char **cmdline_p)
|
||||
|
||||
reserve_crashkernel();
|
||||
|
||||
tcm_init();
|
||||
|
||||
#ifdef CONFIG_MULTI_IRQ_HANDLER
|
||||
handle_arch_irq = mdesc->handle_irq;
|
||||
#endif
|
||||
|
||||
@@ -211,6 +211,13 @@ void __cpuinit __cpu_die(unsigned int cpu)
|
||||
}
|
||||
printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
|
||||
|
||||
/*
|
||||
* platform_cpu_kill() is generally expected to do the powering off
|
||||
* and/or cutting of clocks to the dying CPU. Optionally, this may
|
||||
* be done by the CPU which is dying in preference to supporting
|
||||
* this call, but that means there is _no_ synchronisation between
|
||||
* the requesting CPU and the dying CPU actually losing power.
|
||||
*/
|
||||
if (!platform_cpu_kill(cpu))
|
||||
printk("CPU%u: unable to kill\n", cpu);
|
||||
}
|
||||
@@ -230,14 +237,41 @@ void __ref cpu_die(void)
|
||||
idle_task_exit();
|
||||
|
||||
local_irq_disable();
|
||||
mb();
|
||||
|
||||
/* Tell __cpu_die() that this CPU is now safe to dispose of */
|
||||
/*
|
||||
* Flush the data out of the L1 cache for this CPU. This must be
|
||||
* before the completion to ensure that data is safely written out
|
||||
* before platform_cpu_kill() gets called - which may disable
|
||||
* *this* CPU and power down its cache.
|
||||
*/
|
||||
flush_cache_louis();
|
||||
|
||||
/*
|
||||
* Tell __cpu_die() that this CPU is now safe to dispose of. Once
|
||||
* this returns, power and/or clocks can be removed at any point
|
||||
* from this CPU and its cache by platform_cpu_kill().
|
||||
*/
|
||||
RCU_NONIDLE(complete(&cpu_died));
|
||||
|
||||
/*
|
||||
* actual CPU shutdown procedure is at least platform (if not
|
||||
* CPU) specific.
|
||||
* Ensure that the cache lines associated with that completion are
|
||||
* written out. This covers the case where _this_ CPU is doing the
|
||||
* powering down, to ensure that the completion is visible to the
|
||||
* CPU waiting for this one.
|
||||
*/
|
||||
flush_cache_louis();
|
||||
|
||||
/*
|
||||
* The actual CPU shutdown procedure is at least platform (if not
|
||||
* CPU) specific. This may remove power, or it may simply spin.
|
||||
*
|
||||
* Platforms are generally expected *NOT* to return from this call,
|
||||
* although there are some which do because they have no way to
|
||||
* power down the CPU. These platforms are the _only_ reason we
|
||||
* have a return path which uses the fragment of assembly below.
|
||||
*
|
||||
* The return path should not be used for platforms which can
|
||||
* power off the CPU.
|
||||
*/
|
||||
if (smp_ops.cpu_die)
|
||||
smp_ops.cpu_die(cpu);
|
||||
@@ -673,9 +707,6 @@ static int cpufreq_callback(struct notifier_block *nb,
|
||||
if (freq->flags & CPUFREQ_CONST_LOOPS)
|
||||
return NOTIFY_OK;
|
||||
|
||||
if (arm_delay_ops.const_clock)
|
||||
return NOTIFY_OK;
|
||||
|
||||
if (!per_cpu(l_p_j_ref, cpu)) {
|
||||
per_cpu(l_p_j_ref, cpu) =
|
||||
per_cpu(cpu_data, cpu).loops_per_jiffy;
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
/**********************************************************************/
|
||||
|
||||
@@ -69,12 +70,73 @@ static inline void ipi_flush_bp_all(void *ignored)
|
||||
local_flush_bp_all();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM_ERRATA_798181
|
||||
static int erratum_a15_798181(void)
|
||||
{
|
||||
unsigned int midr = read_cpuid_id();
|
||||
|
||||
/* Cortex-A15 r0p0..r3p2 affected */
|
||||
if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
#else
|
||||
static int erratum_a15_798181(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void ipi_flush_tlb_a15_erratum(void *arg)
|
||||
{
|
||||
dmb();
|
||||
}
|
||||
|
||||
static void broadcast_tlb_a15_erratum(void)
|
||||
{
|
||||
if (!erratum_a15_798181())
|
||||
return;
|
||||
|
||||
dummy_flush_tlb_a15_erratum();
|
||||
smp_call_function(ipi_flush_tlb_a15_erratum, NULL, 1);
|
||||
}
|
||||
|
||||
static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
|
||||
{
|
||||
int cpu, this_cpu;
|
||||
cpumask_t mask = { CPU_BITS_NONE };
|
||||
|
||||
if (!erratum_a15_798181())
|
||||
return;
|
||||
|
||||
dummy_flush_tlb_a15_erratum();
|
||||
this_cpu = get_cpu();
|
||||
for_each_online_cpu(cpu) {
|
||||
if (cpu == this_cpu)
|
||||
continue;
|
||||
/*
|
||||
* We only need to send an IPI if the other CPUs are running
|
||||
* the same ASID as the one being invalidated. There is no
|
||||
* need for locking around the active_asids check since the
|
||||
* switch_mm() function has at least one dmb() (as required by
|
||||
* this workaround) in case a context switch happens on
|
||||
* another CPU after the condition below.
|
||||
*/
|
||||
if (atomic64_read(&mm->context.id) ==
|
||||
atomic64_read(&per_cpu(active_asids, cpu)))
|
||||
cpumask_set_cpu(cpu, &mask);
|
||||
}
|
||||
smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
void flush_tlb_all(void)
|
||||
{
|
||||
if (tlb_ops_need_broadcast())
|
||||
on_each_cpu(ipi_flush_tlb_all, NULL, 1);
|
||||
else
|
||||
local_flush_tlb_all();
|
||||
broadcast_tlb_a15_erratum();
|
||||
}
|
||||
|
||||
void flush_tlb_mm(struct mm_struct *mm)
|
||||
@@ -83,6 +145,7 @@ void flush_tlb_mm(struct mm_struct *mm)
|
||||
on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
|
||||
else
|
||||
local_flush_tlb_mm(mm);
|
||||
broadcast_tlb_mm_a15_erratum(mm);
|
||||
}
|
||||
|
||||
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
|
||||
@@ -95,6 +158,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
|
||||
&ta, 1);
|
||||
} else
|
||||
local_flush_tlb_page(vma, uaddr);
|
||||
broadcast_tlb_mm_a15_erratum(vma->vm_mm);
|
||||
}
|
||||
|
||||
void flush_tlb_kernel_page(unsigned long kaddr)
|
||||
@@ -105,6 +169,7 @@ void flush_tlb_kernel_page(unsigned long kaddr)
|
||||
on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
|
||||
} else
|
||||
local_flush_tlb_kernel_page(kaddr);
|
||||
broadcast_tlb_a15_erratum();
|
||||
}
|
||||
|
||||
void flush_tlb_range(struct vm_area_struct *vma,
|
||||
@@ -119,6 +184,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
|
||||
&ta, 1);
|
||||
} else
|
||||
local_flush_tlb_range(vma, start, end);
|
||||
broadcast_tlb_mm_a15_erratum(vma->vm_mm);
|
||||
}
|
||||
|
||||
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
@@ -130,6 +196,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
|
||||
} else
|
||||
local_flush_tlb_kernel_range(start, end);
|
||||
broadcast_tlb_a15_erratum();
|
||||
}
|
||||
|
||||
void flush_bp_all(void)
|
||||
|
||||
@@ -17,7 +17,6 @@
|
||||
#include <asm/mach/map.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/system_info.h>
|
||||
#include "tcm.h"
|
||||
|
||||
static struct gen_pool *tcm_pool;
|
||||
static bool dtcm_present;
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2008-2009 ST-Ericsson AB
|
||||
* License terms: GNU General Public License (GPL) version 2
|
||||
* TCM memory handling for ARM systems
|
||||
*
|
||||
* Author: Linus Walleij <linus.walleij@stericsson.com>
|
||||
* Author: Rickard Andersson <rickard.andersson@stericsson.com>
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_HAVE_TCM
|
||||
void __init tcm_init(void);
|
||||
#else
|
||||
/* No TCM support, just blank inlines to be optimized out */
|
||||
inline void tcm_init(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
Reference in New Issue
Block a user