From 905cdf9dda5d89d843667b2f11da2308d1fd1c34 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 17 Mar 2015 23:37:58 -0400 Subject: [PATCH 01/75] ARM: hisi/hip04: remove the MCPM overhead This platform is currently relying on the MCPM infrastructure for no apparent reason. The MCPM concurrency handling brings no benefits here as there is no asynchronous CPU wake-ups to be concerned about (this is used for CPU hotplug and secondary boot only, not for CPU idle). This platform is also different from the other MCPM users because a given CPU can't shut itself down completely without the assistance of another CPU. This is at odds with the on-going MCPM backend refactoring. To simplify things, this is converted to hook directly into the smp_operations callbacks, bypassing the MCPM infrastructure. Tested-by: Wei Xu Cc: Haojian Zhuang Signed-off-by: Nicolas Pitre --- arch/arm/mach-hisi/platmcpm.c | 127 +++++++++++----------------------- 1 file changed, 42 insertions(+), 85 deletions(-) diff --git a/arch/arm/mach-hisi/platmcpm.c b/arch/arm/mach-hisi/platmcpm.c index 280f3f14f77c..880cbfa9c343 100644 --- a/arch/arm/mach-hisi/platmcpm.c +++ b/arch/arm/mach-hisi/platmcpm.c @@ -6,6 +6,8 @@ * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. */ +#include +#include #include #include #include @@ -13,7 +15,9 @@ #include #include -#include +#include +#include +#include #include "core.h" @@ -94,11 +98,16 @@ static void hip04_set_snoop_filter(unsigned int cluster, unsigned int on) } while (data != readl_relaxed(fabric + FAB_SF_MODE)); } -static int hip04_mcpm_power_up(unsigned int cpu, unsigned int cluster) +static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle) { + unsigned int mpidr, cpu, cluster; unsigned long data; void __iomem *sys_dreq, *sys_status; + mpidr = cpu_logical_map(l_cpu); + cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); + cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); + if (!sysctrl) return -ENODEV; if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER) @@ -118,6 +127,7 @@ static int hip04_mcpm_power_up(unsigned int cpu, unsigned int cluster) cpu_relax(); data = readl_relaxed(sys_status); } while (data & CLUSTER_DEBUG_RESET_STATUS); + hip04_set_snoop_filter(cluster, 1); } data = CORE_RESET_BIT(cpu) | NEON_RESET_BIT(cpu) | \ @@ -126,11 +136,15 @@ static int hip04_mcpm_power_up(unsigned int cpu, unsigned int cluster) do { cpu_relax(); } while (data == readl_relaxed(sys_status)); + /* * We may fail to power up core again without this delay. * It's not mentioned in document. It's found by test. */ udelay(20); + + arch_send_wakeup_ipi_mask(cpumask_of(l_cpu)); + out: hip04_cpu_table[cluster][cpu]++; spin_unlock_irq(&boot_lock); @@ -138,31 +152,29 @@ out: return 0; } -static void hip04_mcpm_power_down(void) +static void hip04_cpu_die(unsigned int l_cpu) { unsigned int mpidr, cpu, cluster; - bool skip_wfi = false, last_man = false; + bool last_man; - mpidr = read_cpuid_mpidr(); + mpidr = cpu_logical_map(l_cpu); cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); - __mcpm_cpu_going_down(cpu, cluster); - spin_lock(&boot_lock); - BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP); hip04_cpu_table[cluster][cpu]--; if (hip04_cpu_table[cluster][cpu] == 1) { /* A power_up request went ahead of us. */ - skip_wfi = true; + spin_unlock(&boot_lock); + return; } else if (hip04_cpu_table[cluster][cpu] > 1) { pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu); BUG(); } last_man = hip04_cluster_is_down(cluster); - if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { - spin_unlock(&boot_lock); + spin_unlock(&boot_lock); + if (last_man) { /* Since it's Cortex A15, disable L2 prefetching. */ asm volatile( "mcr p15, 1, %0, c15, c0, 3 \n\t" @@ -170,34 +182,30 @@ static void hip04_mcpm_power_down(void) "dsb " : : "r" (0x400) ); v7_exit_coherency_flush(all); - hip04_set_snoop_filter(cluster, 0); - __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); } else { - spin_unlock(&boot_lock); v7_exit_coherency_flush(louis); } - __mcpm_cpu_down(cpu, cluster); - - if (!skip_wfi) + for (;;) wfi(); } -static int hip04_mcpm_wait_for_powerdown(unsigned int cpu, unsigned int cluster) +static int hip04_cpu_kill(unsigned int l_cpu) { + unsigned int mpidr, cpu, cluster; unsigned int data, tries, count; - int ret = -ETIMEDOUT; + mpidr = cpu_logical_map(l_cpu); + cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); + cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); BUG_ON(cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER); count = TIMEOUT_MSEC / POLL_MSEC; spin_lock_irq(&boot_lock); for (tries = 0; tries < count; tries++) { - if (hip04_cpu_table[cluster][cpu]) { - ret = -EBUSY; + if (hip04_cpu_table[cluster][cpu]) goto err; - } cpu_relax(); data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster)); if (data & CORE_WFI_STATUS(cpu)) @@ -220,64 +228,19 @@ static int hip04_mcpm_wait_for_powerdown(unsigned int cpu, unsigned int cluster) } if (tries >= count) goto err; + if (hip04_cluster_is_down(cluster)) + hip04_set_snoop_filter(cluster, 0); spin_unlock_irq(&boot_lock); - return 0; + return 1; err: spin_unlock_irq(&boot_lock); - return ret; + return 0; } -static void hip04_mcpm_powered_up(void) -{ - unsigned int mpidr, cpu, cluster; - - mpidr = read_cpuid_mpidr(); - cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); - cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); - - spin_lock(&boot_lock); - if (!hip04_cpu_table[cluster][cpu]) - hip04_cpu_table[cluster][cpu] = 1; - spin_unlock(&boot_lock); -} - -static void __naked hip04_mcpm_power_up_setup(unsigned int affinity_level) -{ - asm volatile (" \n" -" cmp r0, #0 \n" -" bxeq lr \n" - /* calculate fabric phys address */ -" adr r2, 2f \n" -" ldmia r2, {r1, r3} \n" -" sub r0, r2, r1 \n" -" ldr r2, [r0, r3] \n" - /* get cluster id from MPIDR */ -" mrc p15, 0, r0, c0, c0, 5 \n" -" ubfx r1, r0, #8, #8 \n" - /* 1 << cluster id */ -" mov r0, #1 \n" -" mov r3, r0, lsl r1 \n" -" ldr r0, [r2, #"__stringify(FAB_SF_MODE)"] \n" -" tst r0, r3 \n" -" bxne lr \n" -" orr r1, r0, r3 \n" -" str r1, [r2, #"__stringify(FAB_SF_MODE)"] \n" -"1: ldr r0, [r2, #"__stringify(FAB_SF_MODE)"] \n" -" tst r0, r3 \n" -" beq 1b \n" -" bx lr \n" - -" .align 2 \n" -"2: .word . \n" -" .word fabric_phys_addr \n" - ); -} - -static const struct mcpm_platform_ops hip04_mcpm_ops = { - .power_up = hip04_mcpm_power_up, - .power_down = hip04_mcpm_power_down, - .wait_for_powerdown = hip04_mcpm_wait_for_powerdown, - .powered_up = hip04_mcpm_powered_up, +static struct smp_operations __initdata hip04_smp_ops = { + .smp_boot_secondary = hip04_boot_secondary, + .cpu_die = hip04_cpu_die, + .cpu_kill = hip04_cpu_kill, }; static bool __init hip04_cpu_table_init(void) @@ -298,7 +261,7 @@ static bool __init hip04_cpu_table_init(void) return true; } -static int __init hip04_mcpm_init(void) +static int __init hip04_smp_init(void) { struct device_node *np, *np_sctl, *np_fab; struct resource fab_res; @@ -353,10 +316,6 @@ static int __init hip04_mcpm_init(void) ret = -EINVAL; goto err_table; } - ret = mcpm_platform_register(&hip04_mcpm_ops); - if (ret) { - goto err_table; - } /* * Fill the instruction address that is used after secondary core @@ -364,13 +323,11 @@ static int __init hip04_mcpm_init(void) */ writel_relaxed(hip04_boot_method[0], relocation); writel_relaxed(0xa5a5a5a5, relocation + 4); /* magic number */ - writel_relaxed(virt_to_phys(mcpm_entry_point), relocation + 8); + writel_relaxed(virt_to_phys(secondary_startup), relocation + 8); writel_relaxed(0, relocation + 12); iounmap(relocation); - mcpm_sync_init(hip04_mcpm_power_up_setup); - mcpm_smp_set_ops(); - pr_info("HiP04 MCPM initialized\n"); + smp_set_ops(&hip04_smp_ops); return ret; err_table: iounmap(fabric); @@ -383,4 +340,4 @@ err_reloc: err: return ret; } -early_initcall(hip04_mcpm_init); +early_initcall(hip04_smp_init); From 77404d81cadf192cc1261d6269f622a06b83cdd5 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 28 Apr 2015 13:44:00 -0400 Subject: [PATCH 02/75] ARM: MCPM: remove backward compatibility code Now that no one uses the old callbacks anymore, let's remove them and associated support code. Signed-off-by: Nicolas Pitre Acked-by: Dave Martin --- arch/arm/common/mcpm_entry.c | 47 +++++------------------------------- arch/arm/include/asm/mcpm.h | 6 ----- 2 files changed, 6 insertions(+), 47 deletions(-) diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c index 5f8a52ac7edf..0908f96278c4 100644 --- a/arch/arm/common/mcpm_entry.c +++ b/arch/arm/common/mcpm_entry.c @@ -78,16 +78,11 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster) bool cpu_is_down, cluster_is_down; int ret = 0; + pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); if (!platform_ops) return -EUNATCH; /* try not to shadow power_up errors */ might_sleep(); - /* backward compatibility callback */ - if (platform_ops->power_up) - return platform_ops->power_up(cpu, cluster); - - pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); - /* * Since this is called with IRQs enabled, and no arch_spin_lock_irq * variant exists, we need to disable IRQs manually here. @@ -128,29 +123,17 @@ void mcpm_cpu_power_down(void) bool cpu_going_down, last_man; phys_reset_t phys_reset; - if (WARN_ON_ONCE(!platform_ops)) - return; - BUG_ON(!irqs_disabled()); - - /* - * Do this before calling into the power_down method, - * as it might not always be safe to do afterwards. - */ - setup_mm_for_reboot(); - - /* backward compatibility callback */ - if (platform_ops->power_down) { - platform_ops->power_down(); - goto not_dead; - } - mpidr = read_cpuid_mpidr(); cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); + if (WARN_ON_ONCE(!platform_ops)) + return; + BUG_ON(!irqs_disabled()); + + setup_mm_for_reboot(); __mcpm_cpu_going_down(cpu, cluster); - arch_spin_lock(&mcpm_lock); BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP); @@ -187,7 +170,6 @@ void mcpm_cpu_power_down(void) if (cpu_going_down) wfi(); -not_dead: /* * It is possible for a power_up request to happen concurrently * with a power_down request for the same CPU. In this case the @@ -224,17 +206,6 @@ void mcpm_cpu_suspend(u64 expected_residency) if (WARN_ON_ONCE(!platform_ops)) return; - /* backward compatibility callback */ - if (platform_ops->suspend) { - phys_reset_t phys_reset; - BUG_ON(!irqs_disabled()); - setup_mm_for_reboot(); - platform_ops->suspend(expected_residency); - phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); - phys_reset(virt_to_phys(mcpm_entry_point)); - BUG(); - } - /* Some platforms might have to enable special resume modes, etc. */ if (platform_ops->cpu_suspend_prepare) { unsigned int mpidr = read_cpuid_mpidr(); @@ -256,12 +227,6 @@ int mcpm_cpu_powered_up(void) if (!platform_ops) return -EUNATCH; - /* backward compatibility callback */ - if (platform_ops->powered_up) { - platform_ops->powered_up(); - return 0; - } - mpidr = read_cpuid_mpidr(); cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h index 50b378f59e08..e2118c941dbf 100644 --- a/arch/arm/include/asm/mcpm.h +++ b/arch/arm/include/asm/mcpm.h @@ -234,12 +234,6 @@ struct mcpm_platform_ops { void (*cpu_is_up)(unsigned int cpu, unsigned int cluster); void (*cluster_is_up)(unsigned int cluster); int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster); - - /* deprecated callbacks */ - int (*power_up)(unsigned int cpu, unsigned int cluster); - void (*power_down)(void); - void (*suspend)(u64); - void (*powered_up)(void); }; /** From 7cc8b991cdc985aaa73bf9c429c810cd442fb74d Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 28 Apr 2015 14:11:07 -0400 Subject: [PATCH 03/75] ARM: MCPM: make internal helpers private to the core code This concerns the following helpers: __mcpm_cpu_going_down() __mcpm_cpu_down() __mcpm_outbound_enter_critical() __mcpm_outbound_leave_critical() __mcpm_cluster_state() They are and should only be used by the core code now. Therefore their declarations are removed from mcpm.h and their definitions are made static, hence the need to move them before their users which accounts for the bulk of this patch. This left the mcpm_sync_struct definition at an odd location, therefore it is moved as well with some comment clarifications. Signed-off-by: Nicolas Pitre Acked-by: Dave Martin --- arch/arm/common/mcpm_entry.c | 229 ++++++++++++++++++----------------- arch/arm/include/asm/mcpm.h | 52 ++++---- 2 files changed, 138 insertions(+), 143 deletions(-) diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c index 0908f96278c4..c5fe2e33e6c3 100644 --- a/arch/arm/common/mcpm_entry.c +++ b/arch/arm/common/mcpm_entry.c @@ -20,6 +20,121 @@ #include #include + +struct sync_struct mcpm_sync; + +/* + * __mcpm_cpu_going_down: Indicates that the cpu is being torn down. + * This must be called at the point of committing to teardown of a CPU. + * The CPU cache (SCTRL.C bit) is expected to still be active. + */ +static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) +{ + mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; + sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); +} + +/* + * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the + * cluster can be torn down without disrupting this CPU. + * To avoid deadlocks, this must be called before a CPU is powered down. + * The CPU cache (SCTRL.C bit) is expected to be off. + * However L2 cache might or might not be active. + */ +static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) +{ + dmb(); + mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; + sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); + sev(); +} + +/* + * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section. + * @state: the final state of the cluster: + * CLUSTER_UP: no destructive teardown was done and the cluster has been + * restored to the previous state (CPU cache still active); or + * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off + * (CPU cache disabled, L2 cache either enabled or disabled). + */ +static void __mcpm_outbound_leave_critical(unsigned int cluster, int state) +{ + dmb(); + mcpm_sync.clusters[cluster].cluster = state; + sync_cache_w(&mcpm_sync.clusters[cluster].cluster); + sev(); +} + +/* + * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section. + * This function should be called by the last man, after local CPU teardown + * is complete. CPU cache expected to be active. + * + * Returns: + * false: the critical section was not entered because an inbound CPU was + * observed, or the cluster is already being set up; + * true: the critical section was entered: it is now safe to tear down the + * cluster. + */ +static bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster) +{ + unsigned int i; + struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster]; + + /* Warn inbound CPUs that the cluster is being torn down: */ + c->cluster = CLUSTER_GOING_DOWN; + sync_cache_w(&c->cluster); + + /* Back out if the inbound cluster is already in the critical region: */ + sync_cache_r(&c->inbound); + if (c->inbound == INBOUND_COMING_UP) + goto abort; + + /* + * Wait for all CPUs to get out of the GOING_DOWN state, so that local + * teardown is complete on each CPU before tearing down the cluster. + * + * If any CPU has been woken up again from the DOWN state, then we + * shouldn't be taking the cluster down at all: abort in that case. + */ + sync_cache_r(&c->cpus); + for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) { + int cpustate; + + if (i == cpu) + continue; + + while (1) { + cpustate = c->cpus[i].cpu; + if (cpustate != CPU_GOING_DOWN) + break; + + wfe(); + sync_cache_r(&c->cpus[i].cpu); + } + + switch (cpustate) { + case CPU_DOWN: + continue; + + default: + goto abort; + } + } + + return true; + +abort: + __mcpm_outbound_leave_critical(cluster, CLUSTER_UP); + return false; +} + +static int __mcpm_cluster_state(unsigned int cluster) +{ + sync_cache_r(&mcpm_sync.clusters[cluster].cluster); + return mcpm_sync.clusters[cluster].cluster; +} + extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr) @@ -299,120 +414,6 @@ int __init mcpm_loopback(void (*cache_disable)(void)) #endif -struct sync_struct mcpm_sync; - -/* - * __mcpm_cpu_going_down: Indicates that the cpu is being torn down. - * This must be called at the point of committing to teardown of a CPU. - * The CPU cache (SCTRL.C bit) is expected to still be active. - */ -void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) -{ - mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; - sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); -} - -/* - * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the - * cluster can be torn down without disrupting this CPU. - * To avoid deadlocks, this must be called before a CPU is powered down. - * The CPU cache (SCTRL.C bit) is expected to be off. - * However L2 cache might or might not be active. - */ -void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) -{ - dmb(); - mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; - sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); - sev(); -} - -/* - * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section. - * @state: the final state of the cluster: - * CLUSTER_UP: no destructive teardown was done and the cluster has been - * restored to the previous state (CPU cache still active); or - * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off - * (CPU cache disabled, L2 cache either enabled or disabled). - */ -void __mcpm_outbound_leave_critical(unsigned int cluster, int state) -{ - dmb(); - mcpm_sync.clusters[cluster].cluster = state; - sync_cache_w(&mcpm_sync.clusters[cluster].cluster); - sev(); -} - -/* - * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section. - * This function should be called by the last man, after local CPU teardown - * is complete. CPU cache expected to be active. - * - * Returns: - * false: the critical section was not entered because an inbound CPU was - * observed, or the cluster is already being set up; - * true: the critical section was entered: it is now safe to tear down the - * cluster. - */ -bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster) -{ - unsigned int i; - struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster]; - - /* Warn inbound CPUs that the cluster is being torn down: */ - c->cluster = CLUSTER_GOING_DOWN; - sync_cache_w(&c->cluster); - - /* Back out if the inbound cluster is already in the critical region: */ - sync_cache_r(&c->inbound); - if (c->inbound == INBOUND_COMING_UP) - goto abort; - - /* - * Wait for all CPUs to get out of the GOING_DOWN state, so that local - * teardown is complete on each CPU before tearing down the cluster. - * - * If any CPU has been woken up again from the DOWN state, then we - * shouldn't be taking the cluster down at all: abort in that case. - */ - sync_cache_r(&c->cpus); - for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) { - int cpustate; - - if (i == cpu) - continue; - - while (1) { - cpustate = c->cpus[i].cpu; - if (cpustate != CPU_GOING_DOWN) - break; - - wfe(); - sync_cache_r(&c->cpus[i].cpu); - } - - switch (cpustate) { - case CPU_DOWN: - continue; - - default: - goto abort; - } - } - - return true; - -abort: - __mcpm_outbound_leave_critical(cluster, CLUSTER_UP); - return false; -} - -int __mcpm_cluster_state(unsigned int cluster) -{ - sync_cache_r(&mcpm_sync.clusters[cluster].cluster); - return mcpm_sync.clusters[cluster].cluster; -} - extern unsigned long mcpm_power_up_setup_phys; int __init mcpm_sync_init( diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h index e2118c941dbf..6a40d5f8db60 100644 --- a/arch/arm/include/asm/mcpm.h +++ b/arch/arm/include/asm/mcpm.h @@ -245,35 +245,6 @@ struct mcpm_platform_ops { */ int __init mcpm_platform_register(const struct mcpm_platform_ops *ops); -/* Synchronisation structures for coordinating safe cluster setup/teardown: */ - -/* - * When modifying this structure, make sure you update the MCPM_SYNC_ defines - * to match. - */ -struct mcpm_sync_struct { - /* individual CPU states */ - struct { - s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE); - } cpus[MAX_CPUS_PER_CLUSTER]; - - /* cluster state */ - s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE); - - /* inbound-side state */ - s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE); -}; - -struct sync_struct { - struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS]; -}; - -void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster); -void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster); -void __mcpm_outbound_leave_critical(unsigned int cluster, int state); -bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster); -int __mcpm_cluster_state(unsigned int cluster); - /** * mcpm_sync_init - Initialize the cluster synchronization support * @@ -312,6 +283,29 @@ int __init mcpm_loopback(void (*cache_disable)(void)); void __init mcpm_smp_set_ops(void); +/* + * Synchronisation structures for coordinating safe cluster setup/teardown. + * This is private to the MCPM core code and shared between C and assembly. + * When modifying this structure, make sure you update the MCPM_SYNC_ defines + * to match. + */ +struct mcpm_sync_struct { + /* individual CPU states */ + struct { + s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE); + } cpus[MAX_CPUS_PER_CLUSTER]; + + /* cluster state */ + s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE); + + /* inbound-side state */ + s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE); +}; + +struct sync_struct { + struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS]; +}; + #else /* From 1c2c7d51c8101ab3c5d8585713d2dcd52d77d33e Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 28 Apr 2015 14:51:36 -0400 Subject: [PATCH 04/75] ARM: MCPM: add references to the available documentation in the code Signed-off-by: Nicolas Pitre Acked-by: Dave Martin --- arch/arm/common/mcpm_entry.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c index c5fe2e33e6c3..492467587c58 100644 --- a/arch/arm/common/mcpm_entry.c +++ b/arch/arm/common/mcpm_entry.c @@ -20,6 +20,11 @@ #include #include +/* + * The public API for this code is documented in arch/arm/include/asm/mcpm.h. + * For a comprehensive description of the main algorithm used here, please + * see Documentation/arm/cluster-pm-race-avoidance.txt. + */ struct sync_struct mcpm_sync; From 7895f73169ade9a74940ae6b0b4ee82faf286861 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 28 Apr 2015 15:51:19 -0400 Subject: [PATCH 05/75] ARM: MCPM: remove residency argument from mcpm_cpu_suspend() This is currently unused. If a suspend must be limited to CPU level only by preventing the last man from triggering a cluster level suspend then this should be determined according to many other criteria the MCPM layer is currently not aware of. It is unlikely that mcpm_cpu_suspend() would be the proper conduit for that information anyway. Signed-off-by: Nicolas Pitre Acked-by: Dave Martin --- arch/arm/common/mcpm_entry.c | 2 +- arch/arm/include/asm/mcpm.h | 15 +++++---------- arch/arm/mach-exynos/suspend.c | 8 +------- drivers/cpuidle/cpuidle-big_little.c | 8 +------- 4 files changed, 8 insertions(+), 25 deletions(-) diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c index 492467587c58..a923524d1040 100644 --- a/arch/arm/common/mcpm_entry.c +++ b/arch/arm/common/mcpm_entry.c @@ -321,7 +321,7 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster) return ret; } -void mcpm_cpu_suspend(u64 expected_residency) +void mcpm_cpu_suspend(void) { if (WARN_ON_ONCE(!platform_ops)) return; diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h index 6a40d5f8db60..acd4983d9b1f 100644 --- a/arch/arm/include/asm/mcpm.h +++ b/arch/arm/include/asm/mcpm.h @@ -137,17 +137,12 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster); /** * mcpm_cpu_suspend - bring the calling CPU in a suspended state * - * @expected_residency: duration in microseconds the CPU is expected - * to remain suspended, or 0 if unknown/infinity. - * - * The calling CPU is suspended. The expected residency argument is used - * as a hint by the platform specific backend to implement the appropriate - * sleep state level according to the knowledge it has on wake-up latency - * for the given hardware. + * The calling CPU is suspended. This is similar to mcpm_cpu_power_down() + * except for possible extra platform specific configuration steps to allow + * an asynchronous wake-up e.g. with a pending interrupt. * * If this CPU is found to be the "last man standing" in the cluster - * then the cluster may be prepared for power-down too, if the expected - * residency makes it worthwhile. + * then the cluster may be prepared for power-down too. * * This must be called with interrupts disabled. * @@ -157,7 +152,7 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster); * This will return if mcpm_platform_register() has not been called * previously in which case the caller should take appropriate action. */ -void mcpm_cpu_suspend(u64 expected_residency); +void mcpm_cpu_suspend(void); /** * mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c index 3e6aea7f83af..372bd0b099c1 100644 --- a/arch/arm/mach-exynos/suspend.c +++ b/arch/arm/mach-exynos/suspend.c @@ -311,13 +311,7 @@ static int exynos5420_cpu_suspend(unsigned long arg) if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM)) { mcpm_set_entry_vector(cpu, cluster, exynos_cpu_resume); - - /* - * Residency value passed to mcpm_cpu_suspend back-end - * has to be given clear semantics. Set to 0 as a - * temporary value. - */ - mcpm_cpu_suspend(0); + mcpm_cpu_suspend(); } pr_info("Failed to suspend the system\n"); diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c index 40c34faffe59..db2ede565f1a 100644 --- a/drivers/cpuidle/cpuidle-big_little.c +++ b/drivers/cpuidle/cpuidle-big_little.c @@ -108,13 +108,7 @@ static int notrace bl_powerdown_finisher(unsigned long arg) unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); mcpm_set_entry_vector(cpu, cluster, cpu_resume); - - /* - * Residency value passed to mcpm_cpu_suspend back-end - * has to be given clear semantics. Set to 0 as a - * temporary value. - */ - mcpm_cpu_suspend(0); + mcpm_cpu_suspend(); /* return value != 0 means failure */ return 1; From 6b1814cde5c79c6aa4d02c9aedc14a709c2c0737 Mon Sep 17 00:00:00 2001 From: Maxime Coquelin stm32 Date: Fri, 10 Apr 2015 09:46:46 +0100 Subject: [PATCH 06/75] ARM: 8340/1: ARMv7-M: Enlarge vector table up to 256 entries MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit From Cortex-M reference manuals, the nvic supports up to 240 interrupts. So the number of entries in vectors table is up to 256. This patch adds a new config flag to specify the number of external interrupts. Some ifdeferies are added in order to respect the natural alignment without wasting too much space on smaller systems. Acked-by: Uwe Kleine-König Acked-by: Stefan Agner Tested-by: Chanwoo Choi Signed-off-by: Maxime Coquelin Signed-off-by: Russell King --- arch/arm/kernel/entry-v7m.S | 13 +++++++++---- arch/arm/mm/Kconfig | 15 +++++++++++++++ 2 files changed, 24 insertions(+), 4 deletions(-) diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S index 8944f4991c3c..b6c8bb9315e7 100644 --- a/arch/arm/kernel/entry-v7m.S +++ b/arch/arm/kernel/entry-v7m.S @@ -117,9 +117,14 @@ ENTRY(__switch_to) ENDPROC(__switch_to) .data - .align 8 +#if CONFIG_CPU_V7M_NUM_IRQ <= 112 + .align 9 +#else + .align 10 +#endif + /* - * Vector table (64 words => 256 bytes natural alignment) + * Vector table (Natural alignment need to be ensured) */ ENTRY(vector_table) .long 0 @ 0 - Reset stack pointer @@ -138,6 +143,6 @@ ENTRY(vector_table) .long __invalid_entry @ 13 - Reserved .long __pendsv_entry @ 14 - PendSV .long __invalid_entry @ 15 - SysTick - .rept 64 - 16 - .long __irq_entry @ 16..64 - External Interrupts + .rept CONFIG_CPU_V7M_NUM_IRQ + .long __irq_entry @ External Interrupts .endr diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index b4f92b9a13ac..6173aa3b7f44 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -604,6 +604,21 @@ config CPU_USE_DOMAINS This option enables or disables the use of domain switching via the set_fs() function. +config CPU_V7M_NUM_IRQ + int "Number of external interrupts connected to the NVIC" + depends on CPU_V7M + default 90 if ARCH_STM32 + default 38 if ARCH_EFM32 + default 240 + help + This option indicates the number of interrupts connected to the NVIC. + The value can be larger than the real number of interrupts supported + by the system, but must not be lower. + The default value is 240, corresponding to the maximum number of + interrupts supported by the NVIC on Cortex-M family. + + If unsure, keep default value. + # # CPU supports 36-bit I/O # From 5bb5d66d89041b7891cb42617343b1e8067cc3fa Mon Sep 17 00:00:00 2001 From: Peter Hurley Date: Mon, 13 Apr 2015 14:18:50 +0100 Subject: [PATCH 07/75] ARM: 8341/1: io: Unpessimize relaxed io accessors commit 195bbcac2e5c12f7fb ("ARM: 7500/1: io: avoid writeback addressing modes for __raw_ accessors") disables writeback addressing modes for raw i/o. However, the "+Q" output constraint forces the compiler to disable load hoist optimizations (because the output constraint informs the compiler of memory stores which the compiler assumes may alias other memory). Since the relaxed accessors only guarantee ordering wrt i/o accesses to the same device and not to main memory, there's never a possibility of an accessor invalidating a hoisted load (because only non-i/o loads would have been hoisted). The effect is especially noticable with complex address inputs in loops. For example, the following code: #include #include static const int *remap; void wr_loop(void __iomem *base, int c, int val) { int i; for (i = 0; i < c; i++) writew_relaxed(val, base + remap[c >> 2]); } generates current master | this patch 0: e3510000 cmp r1, #0 | 0: e3510000 cmp r1, #0 4: d12fff1e bxle lr | 4: d12fff1e bxle lr 8: e3003000 movw r3, #0 | 8: e3c1c003 bic ip, r1, #3 c: e3403000 movt r3, #0 | c: e6ff2072 uxth r2, r2 10: e92d4010 push {r4, lr} | 10: e3a03000 mov r3, #0 14: e6ff2072 uxth r2, r2 | 14: e59cc000 ldr ip, [ip] 18: e3c14003 bic r4, r1, #3 | 18: e080000c add r0, r0, ip 1c: e593e000 ldr lr, [r3] | 20: e3a03000 mov r3, #0 | 1c: e1c020b0 strh r2, [r0] | 20: e2833001 add r3, r3, #1 24: e79ec004 ldr ip, [lr, r4] | 24: e1530001 cmp r3, r1 28: e080c00c add ip, r0, ip | 28: 1afffffb bne 1c 2c: e1cc20b0 strh r2, [ip] | 2c: e12fff1e bx lr 30: e2833001 add r3, r3, #1 | 34: e1530001 cmp r3, r1 | 38: 1afffff9 bne 24 | | 3c: e8bd8010 pop {r4, pc} | Acked-by: Will Deacon Signed-off-by: Peter Hurley Signed-off-by: Russell King --- arch/arm/include/asm/io.h | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index db58deb00aa7..7744e58bca38 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -73,17 +73,16 @@ void __raw_readsl(const volatile void __iomem *addr, void *data, int longlen); static inline void __raw_writew(u16 val, volatile void __iomem *addr) { asm volatile("strh %1, %0" - : "+Q" (*(volatile u16 __force *)addr) - : "r" (val)); + : : "Q" (*(volatile u16 __force *)addr), "r" (val)); } #define __raw_readw __raw_readw static inline u16 __raw_readw(const volatile void __iomem *addr) { u16 val; - asm volatile("ldrh %1, %0" - : "+Q" (*(volatile u16 __force *)addr), - "=r" (val)); + asm volatile("ldrh %0, %1" + : "=r" (val) + : "Q" (*(volatile u16 __force *)addr)); return val; } #endif @@ -92,25 +91,23 @@ static inline u16 __raw_readw(const volatile void __iomem *addr) static inline void __raw_writeb(u8 val, volatile void __iomem *addr) { asm volatile("strb %1, %0" - : "+Qo" (*(volatile u8 __force *)addr) - : "r" (val)); + : : "Qo" (*(volatile u8 __force *)addr), "r" (val)); } #define __raw_writel __raw_writel static inline void __raw_writel(u32 val, volatile void __iomem *addr) { asm volatile("str %1, %0" - : "+Qo" (*(volatile u32 __force *)addr) - : "r" (val)); + : : "Qo" (*(volatile u32 __force *)addr), "r" (val)); } #define __raw_readb __raw_readb static inline u8 __raw_readb(const volatile void __iomem *addr) { u8 val; - asm volatile("ldrb %1, %0" - : "+Qo" (*(volatile u8 __force *)addr), - "=r" (val)); + asm volatile("ldrb %0, %1" + : "=r" (val) + : "Qo" (*(volatile u8 __force *)addr)); return val; } @@ -118,9 +115,9 @@ static inline u8 __raw_readb(const volatile void __iomem *addr) static inline u32 __raw_readl(const volatile void __iomem *addr) { u32 val; - asm volatile("ldr %1, %0" - : "+Qo" (*(volatile u32 __force *)addr), - "=r" (val)); + asm volatile("ldr %0, %1" + : "=r" (val) + : "Qo" (*(volatile u32 __force *)addr)); return val; } From 9827e8e55532f5a7967d31da847fbd3185a5b5ed Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 27 Apr 2015 14:55:12 +0100 Subject: [PATCH 08/75] ARM: 8346/1: sa1100: Constify irq_domain_ops The irq_domain_ops are not modified by the driver and the irqdomain core code accepts pointer to a const data. Signed-off-by: Krzysztof Kozlowski Signed-off-by: Russell King --- arch/arm/mach-sa1100/irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/mach-sa1100/irq.c b/arch/arm/mach-sa1100/irq.c index 65aebfa66fe5..ea3eb4144b66 100644 --- a/arch/arm/mach-sa1100/irq.c +++ b/arch/arm/mach-sa1100/irq.c @@ -73,7 +73,7 @@ static int sa1100_normal_irqdomain_map(struct irq_domain *d, return 0; } -static struct irq_domain_ops sa1100_normal_irqdomain_ops = { +static const struct irq_domain_ops sa1100_normal_irqdomain_ops = { .map = sa1100_normal_irqdomain_map, .xlate = irq_domain_xlate_onetwocell, }; From e748994f5cc59e82ef28e31bae680f15fdadb26f Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Mon, 4 May 2015 15:22:41 +0100 Subject: [PATCH 09/75] ARM: 8353/1: mm: Fix Cortex-A8 erratum 430973 segfaults for bootloaders and multiarch Looks like apps can be made to segfault easily on armhf distros just by running cpuburn-a8 in the background, then starting apt get update unless erratum 430973 workaround is enabled. This happens on r3p2 also, which has 430973 fixed in hardware. Turns out the reason for this is some bootloaders incorrectly setting the auxilary register IBE bit, which probably causes us to hit erratum 687067 on Cortex-A8 later than r1p2. If the bootloader incorrectly sets the IBE bit in the auxilary control register for Cortex-A8 revisions with 430973 fixed in hardware, we need to call flush BTAC/BTB to avoid segfaults probably caused by erratum 687067. So let's flush BTAC/BTB unconditionally for Cortex-A8. It won't do anything unless the IBE bit is set. Note that we keep the erratum 430973 Kconfig option still around and disabled for multiarch as it may be unsafe to enable for some secure SoC. It is known safe to be enabled for n900, but won't do anything on n900 as the IBE bit needs to be set with SMC. Also note that SoCs probably should also add checks and print warnings for the misconfigured IBE bit depending on the Cortex-A8 revision so the bootloaders can be fixed Cortex-A8 revisions later than r1p2 to not set the IBE bit. Tested-by: Sebastian Reichel Signed-off-by: Tony Lindgren Signed-off-by: Russell King --- arch/arm/mm/proc-v7-2level.S | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S index 10405b8d31af..6f2f8f3cb33c 100644 --- a/arch/arm/mm/proc-v7-2level.S +++ b/arch/arm/mm/proc-v7-2level.S @@ -36,14 +36,16 @@ * * It is assumed that: * - we are not using split page tables + * + * Note that we always need to flush BTAC/BTB if IBE is set + * even on Cortex-A8 revisions not affected by 430973. + * If IBE is not set, the flush BTAC/BTB won't do anything. */ ENTRY(cpu_ca8_switch_mm) #ifdef CONFIG_MMU mov r2, #0 -#ifdef CONFIG_ARM_ERRATA_430973 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB #endif -#endif ENTRY(cpu_v7_switch_mm) #ifdef CONFIG_MMU mmid r1, r1 @ get mm->context.id From 7d485f647c1f4a6976264c90447fb0dbf07b111d Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 24 Nov 2014 16:54:35 +0100 Subject: [PATCH 10/75] ARM: 8220/1: allow modules outside of bl range Loading modules far away from the kernel in memory is problematic because the 'bl' instruction only has limited reach, and modules are not built with PLTs. Instead of using the -mlong-calls option (which affects all compiler emitted bl instructions, but not the ones in assembler), this patch allocates some additional space at module load time, and populates it with PLT like veneers when encountering relocations that are out of range. This should work with all relocations against symbols exported by the kernel, including those resulting from GCC generated implicit function calls for ftrace etc. The module memory size increases by about 5% on average, regardless of whether any PLT entries were actually needed. However, due to the page based rounding that occurs when allocating module memory, the average memory footprint increase is negligible. Reviewed-by: Nicolas Pitre Signed-off-by: Ard Biesheuvel Signed-off-by: Russell King --- arch/arm/Kconfig | 17 +++- arch/arm/Makefile | 4 + arch/arm/include/asm/module.h | 12 ++- arch/arm/kernel/Makefile | 1 + arch/arm/kernel/module-plts.c | 181 ++++++++++++++++++++++++++++++++++ arch/arm/kernel/module.c | 32 +++++- arch/arm/kernel/module.lds | 4 + 7 files changed, 248 insertions(+), 3 deletions(-) create mode 100644 arch/arm/kernel/module-plts.c create mode 100644 arch/arm/kernel/module.lds diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 45df48ba0b12..d0950ce75f3e 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -60,7 +60,7 @@ config ARM select HAVE_KPROBES if !XIP_KERNEL select HAVE_KRETPROBES if (HAVE_KPROBES) select HAVE_MEMBLOCK - select HAVE_MOD_ARCH_SPECIFIC if ARM_UNWIND + select HAVE_MOD_ARCH_SPECIFIC select HAVE_OPROFILE if (HAVE_PERF_EVENTS) select HAVE_OPTPROBES if !THUMB2_KERNEL select HAVE_PERF_EVENTS @@ -1681,6 +1681,21 @@ config HAVE_ARCH_TRANSPARENT_HUGEPAGE config ARCH_WANT_GENERAL_HUGETLB def_bool y +config ARM_MODULE_PLTS + bool "Use PLTs to allow module memory to spill over into vmalloc area" + depends on MODULES + help + Allocate PLTs when loading modules so that jumps and calls whose + targets are too far away for their relative offsets to be encoded + in the instructions themselves can be bounced via veneers in the + module's PLT. This allows modules to be allocated in the generic + vmalloc area after the dedicated module memory area has been + exhausted. The modules will use slightly more memory, but after + rounding up to page size, the actual memory footprint is usually + the same. + + Say y if you are getting out of memory errors while loading modules + source "mm/Kconfig" config FORCE_MAX_ZONEORDER diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 985227cbbd1b..ffb53e86599e 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -19,6 +19,10 @@ LDFLAGS_vmlinux += --be8 LDFLAGS_MODULE += --be8 endif +ifeq ($(CONFIG_ARM_MODULE_PLTS),y) +LDFLAGS_MODULE += -T $(srctree)/arch/arm/kernel/module.lds +endif + OBJCOPYFLAGS :=-O binary -R .comment -S GZFLAGS :=-9 #KBUILD_CFLAGS +=-pipe diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h index ed690c49ef93..e358b7966c06 100644 --- a/arch/arm/include/asm/module.h +++ b/arch/arm/include/asm/module.h @@ -16,11 +16,21 @@ enum { ARM_SEC_UNLIKELY, ARM_SEC_MAX, }; +#endif struct mod_arch_specific { +#ifdef CONFIG_ARM_UNWIND struct unwind_table *unwind[ARM_SEC_MAX]; -}; #endif +#ifdef CONFIG_ARM_MODULE_PLTS + struct elf32_shdr *core_plt; + struct elf32_shdr *init_plt; + int core_plt_count; + int init_plt_count; +#endif +}; + +u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val); /* * Add the ARM architecture version to the version magic string diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 752725dcbf42..32c0990d1968 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -34,6 +34,7 @@ obj-$(CONFIG_CPU_IDLE) += cpuidle.o obj-$(CONFIG_ISA_DMA_API) += dma.o obj-$(CONFIG_FIQ) += fiq.o fiqasm.o obj-$(CONFIG_MODULES) += armksyms.o module.o +obj-$(CONFIG_ARM_MODULE_PLTS) += module-plts.o obj-$(CONFIG_ISA_DMA) += dma-isa.o obj-$(CONFIG_PCI) += bios32.o isa.o obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c new file mode 100644 index 000000000000..71a65c49871d --- /dev/null +++ b/arch/arm/kernel/module-plts.c @@ -0,0 +1,181 @@ +/* + * Copyright (C) 2014 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +#include +#include + +#define PLT_ENT_STRIDE L1_CACHE_BYTES +#define PLT_ENT_COUNT (PLT_ENT_STRIDE / sizeof(u32)) +#define PLT_ENT_SIZE (sizeof(struct plt_entries) / PLT_ENT_COUNT) + +#ifdef CONFIG_THUMB2_KERNEL +#define PLT_ENT_LDR __opcode_to_mem_thumb32(0xf8dff000 | \ + (PLT_ENT_STRIDE - 4)) +#else +#define PLT_ENT_LDR __opcode_to_mem_arm(0xe59ff000 | \ + (PLT_ENT_STRIDE - 8)) +#endif + +struct plt_entries { + u32 ldr[PLT_ENT_COUNT]; + u32 lit[PLT_ENT_COUNT]; +}; + +static bool in_init(const struct module *mod, u32 addr) +{ + return addr - (u32)mod->module_init < mod->init_size; +} + +u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val) +{ + struct plt_entries *plt, *plt_end; + int c, *count; + + if (in_init(mod, loc)) { + plt = (void *)mod->arch.init_plt->sh_addr; + plt_end = (void *)plt + mod->arch.init_plt->sh_size; + count = &mod->arch.init_plt_count; + } else { + plt = (void *)mod->arch.core_plt->sh_addr; + plt_end = (void *)plt + mod->arch.core_plt->sh_size; + count = &mod->arch.core_plt_count; + } + + /* Look for an existing entry pointing to 'val' */ + for (c = *count; plt < plt_end; c -= PLT_ENT_COUNT, plt++) { + int i; + + if (!c) { + /* Populate a new set of entries */ + *plt = (struct plt_entries){ + { [0 ... PLT_ENT_COUNT - 1] = PLT_ENT_LDR, }, + { val, } + }; + ++*count; + return (u32)plt->ldr; + } + for (i = 0; i < PLT_ENT_COUNT; i++) { + if (!plt->lit[i]) { + plt->lit[i] = val; + ++*count; + } + if (plt->lit[i] == val) + return (u32)&plt->ldr[i]; + } + } + BUG(); +} + +static int duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num, + u32 mask) +{ + u32 *loc1, *loc2; + int i; + + for (i = 0; i < num; i++) { + if (rel[i].r_info != rel[num].r_info) + continue; + + /* + * Identical relocation types against identical symbols can + * still result in different PLT entries if the addend in the + * place is different. So resolve the target of the relocation + * to compare the values. + */ + loc1 = (u32 *)(base + rel[i].r_offset); + loc2 = (u32 *)(base + rel[num].r_offset); + if (((*loc1 ^ *loc2) & mask) == 0) + return 1; + } + return 0; +} + +/* Count how many PLT entries we may need */ +static unsigned int count_plts(Elf32_Addr base, const Elf32_Rel *rel, int num) +{ + unsigned int ret = 0; + int i; + + /* + * Sure, this is order(n^2), but it's usually short, and not + * time critical + */ + for (i = 0; i < num; i++) + switch (ELF32_R_TYPE(rel[i].r_info)) { + case R_ARM_CALL: + case R_ARM_PC24: + case R_ARM_JUMP24: + if (!duplicate_rel(base, rel, i, + __opcode_to_mem_arm(0x00ffffff))) + ret++; + break; + case R_ARM_THM_CALL: + case R_ARM_THM_JUMP24: + if (!duplicate_rel(base, rel, i, + __opcode_to_mem_thumb32(0x07ff2fff))) + ret++; + } + return ret; +} + +int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, + char *secstrings, struct module *mod) +{ + unsigned long core_plts = 0, init_plts = 0; + Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum; + + /* + * To store the PLTs, we expand the .text section for core module code + * and the .init.text section for initialization code. + */ + for (s = sechdrs; s < sechdrs_end; ++s) + if (strcmp(".core.plt", secstrings + s->sh_name) == 0) + mod->arch.core_plt = s; + else if (strcmp(".init.plt", secstrings + s->sh_name) == 0) + mod->arch.init_plt = s; + + if (!mod->arch.core_plt || !mod->arch.init_plt) { + pr_err("%s: sections missing\n", mod->name); + return -ENOEXEC; + } + + for (s = sechdrs + 1; s < sechdrs_end; ++s) { + const Elf32_Rel *rels = (void *)ehdr + s->sh_offset; + int numrels = s->sh_size / sizeof(Elf32_Rel); + Elf32_Shdr *dstsec = sechdrs + s->sh_info; + + if (s->sh_type != SHT_REL) + continue; + + if (strstr(secstrings + s->sh_name, ".init")) + init_plts += count_plts(dstsec->sh_addr, rels, numrels); + else + core_plts += count_plts(dstsec->sh_addr, rels, numrels); + } + + mod->arch.core_plt->sh_type = SHT_NOBITS; + mod->arch.core_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; + mod->arch.core_plt->sh_addralign = L1_CACHE_BYTES; + mod->arch.core_plt->sh_size = round_up(core_plts * PLT_ENT_SIZE, + sizeof(struct plt_entries)); + mod->arch.core_plt_count = 0; + + mod->arch.init_plt->sh_type = SHT_NOBITS; + mod->arch.init_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; + mod->arch.init_plt->sh_addralign = L1_CACHE_BYTES; + mod->arch.init_plt->sh_size = round_up(init_plts * PLT_ENT_SIZE, + sizeof(struct plt_entries)); + mod->arch.init_plt_count = 0; + pr_debug("%s: core.plt=%x, init.plt=%x\n", __func__, + mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size); + return 0; +} diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index af791f4a6205..efdddcb97dd1 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -40,7 +40,12 @@ #ifdef CONFIG_MMU void *module_alloc(unsigned long size) { - return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, + void *p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, + GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, + __builtin_return_address(0)); + if (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || p) + return p; + return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, __builtin_return_address(0)); } @@ -110,6 +115,20 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, offset -= 0x04000000; offset += sym->st_value - loc; + + /* + * Route through a PLT entry if 'offset' exceeds the + * supported range. Note that 'offset + loc + 8' + * contains the absolute jump target, i.e., + * @sym + addend, corrected for the +8 PC bias. + */ + if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS) && + (offset <= (s32)0xfe000000 || + offset >= (s32)0x02000000)) + offset = get_module_plt(module, loc, + offset + loc + 8) + - loc - 8; + if (offset <= (s32)0xfe000000 || offset >= (s32)0x02000000) { pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n", @@ -203,6 +222,17 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, offset -= 0x02000000; offset += sym->st_value - loc; + /* + * Route through a PLT entry if 'offset' exceeds the + * supported range. + */ + if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS) && + (offset <= (s32)0xff000000 || + offset >= (s32)0x01000000)) + offset = get_module_plt(module, loc, + offset + loc + 4) + - loc - 4; + if (offset <= (s32)0xff000000 || offset >= (s32)0x01000000) { pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n", diff --git a/arch/arm/kernel/module.lds b/arch/arm/kernel/module.lds new file mode 100644 index 000000000000..3682fa107918 --- /dev/null +++ b/arch/arm/kernel/module.lds @@ -0,0 +1,4 @@ +SECTIONS { + .core.plt : { BYTE(0) } + .init.plt : { BYTE(0) } +} From 7ddfe625cbc14c83153c78aacd52a20c5805920e Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 7 May 2015 14:22:40 +0100 Subject: [PATCH 11/75] ARM: optimize memset_io()/memcpy_fromio()/memcpy_toio() If we are building for a LE platform, and we haven't overriden the MMIO ops, then we can optimize the mem*io operations using the standard string functions. Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/io.h | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 7744e58bca38..addfb3dd095f 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -23,6 +23,7 @@ #ifdef __KERNEL__ +#include #include #include #include @@ -316,9 +317,33 @@ extern void _memset_io(volatile void __iomem *, int, size_t); #define writesw(p,d,l) __raw_writesw(p,d,l) #define writesl(p,d,l) __raw_writesl(p,d,l) +#ifndef __ARMBE__ +static inline void memset_io(volatile void __iomem *dst, unsigned c, + size_t count) +{ + memset((void __force *)dst, c, count); +} +#define memset_io(dst,c,count) memset_io(dst,c,count) + +static inline void memcpy_fromio(void *to, const volatile void __iomem *from, + size_t count) +{ + memcpy(to, (const void __force *)from, count); +} +#define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count) + +static inline void memcpy_toio(volatile void __iomem *to, const void *from, + size_t count) +{ + memcpy((void __force *)to, from, count); +} +#define memcpy_toio(to,from,count) memcpy_toio(to,from,count) + +#else #define memset_io(c,v,l) _memset_io(c,(v),(l)) #define memcpy_fromio(a,c,l) _memcpy_fromio((a),c,(l)) #define memcpy_toio(c,a,l) _memcpy_toio(c,(a),(l)) +#endif #endif /* readl */ From 13dd92bb4599b5655cafe1f2c0365396a096b94a Mon Sep 17 00:00:00 2001 From: Paul Kocialkowski Date: Wed, 6 May 2015 15:22:36 +0100 Subject: [PATCH 12/75] ARM: 8354/1: Documentation: devicetree: root node serial-number property documentation Open firmware is already using the serial-number property for passing the device's serial number from the bootloader to the kernel. In addition, lshw already has support for scanning this property. The serial number is a string that somewhat represents the device's serial number. It might come from some form of storage (e.g. an eeprom) and be programmed at factory-time by the manufacturer or come from identification bits available in e.g. the SoC (note that the soc_id property in the SoC bus should hold a full account of those bits). The serial number is taken as-is from the bootloader, so it is up to the bootloader to define where the serial number comes from and what length it should be. Some use cases for the serial number require it to have a maximum length (e.g. for USB serial number) and some other cases imply more restrictions on what the serial number should look like (e.g. in Android, the ro.serialno property is usually a 16-bytes (plus one null byte) representation of a 64 bit number). Signed-off-by: Paul Kocialkowski Acked-by: Rob Herring Signed-off-by: Russell King --- Documentation/devicetree/booting-without-of.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Documentation/devicetree/booting-without-of.txt b/Documentation/devicetree/booting-without-of.txt index e49e423268c0..04d34f6a58f3 100644 --- a/Documentation/devicetree/booting-without-of.txt +++ b/Documentation/devicetree/booting-without-of.txt @@ -856,6 +856,10 @@ address which can extend beyond that limit. name may clash with standard defined ones, you prefix them with your vendor name and a comma. + Additional properties for the root node: + + - serial-number : a string representing the device's serial number + b) The /cpus node This node is the parent of all individual CPU nodes. It doesn't From 3f599875e5202986b350618a617527ab441bf206 Mon Sep 17 00:00:00 2001 From: Paul Kocialkowski Date: Wed, 6 May 2015 15:23:56 +0100 Subject: [PATCH 13/75] ARM: 8355/1: arch: Show the serial number from devicetree in cpuinfo This grabs the serial number shown in cpuinfo from the serial-number device-tree property in priority. When booting with ATAGs (and without device-tree), the provided number is still shown instead. Signed-off-by: Paul Kocialkowski Signed-off-by: Russell King --- arch/arm/include/asm/system_info.h | 1 + arch/arm/kernel/setup.c | 23 +++++++++++++++++++++-- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/arch/arm/include/asm/system_info.h b/arch/arm/include/asm/system_info.h index 720ea0320a6d..3860cbd401ec 100644 --- a/arch/arm/include/asm/system_info.h +++ b/arch/arm/include/asm/system_info.h @@ -17,6 +17,7 @@ /* information about the system we're running on */ extern unsigned int system_rev; +extern const char *system_serial; extern unsigned int system_serial_low; extern unsigned int system_serial_high; extern unsigned int mem_fclk_21285; diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 6c777e908a24..ee3e329ecf58 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -93,6 +93,9 @@ unsigned int __atags_pointer __initdata; unsigned int system_rev; EXPORT_SYMBOL(system_rev); +const char *system_serial; +EXPORT_SYMBOL(system_serial); + unsigned int system_serial_low; EXPORT_SYMBOL(system_serial_low); @@ -839,8 +842,25 @@ arch_initcall(customize_machine); static int __init init_machine_late(void) { + struct device_node *root; + int ret; + if (machine_desc->init_late) machine_desc->init_late(); + + root = of_find_node_by_path("/"); + if (root) { + ret = of_property_read_string(root, "serial-number", + &system_serial); + if (ret) + system_serial = NULL; + } + + if (!system_serial) + system_serial = kasprintf(GFP_KERNEL, "%08x%08x", + system_serial_high, + system_serial_low); + return 0; } late_initcall(init_machine_late); @@ -1109,8 +1129,7 @@ static int c_show(struct seq_file *m, void *v) seq_printf(m, "Hardware\t: %s\n", machine_name); seq_printf(m, "Revision\t: %04x\n", system_rev); - seq_printf(m, "Serial\t\t: %08x%08x\n", - system_serial_high, system_serial_low); + seq_printf(m, "Serial\t\t: %s\n", system_serial); return 0; } From 5890298a834c04aaa9b5fb576e5f2b77e79ab38d Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Apr 2015 14:16:05 +0100 Subject: [PATCH 14/75] ARM: kvm: fix a bad BSYM() usage BSYM() should only be used when refering to local symbols in the same assembly file which are resolved by the assembler, and not for linker-fixed up symbols. The use of BSYM() with panic is incorrect as the linker is involved in fixing up this relocation, and it knows whether panic() is ARM or Thumb. Acked-by: Nicolas Pitre Acked-by: Dave Martin Signed-off-by: Russell King --- arch/arm/kvm/interrupts.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S index 79caf79b304a..87847d2c5f99 100644 --- a/arch/arm/kvm/interrupts.S +++ b/arch/arm/kvm/interrupts.S @@ -309,7 +309,7 @@ ENTRY(kvm_call_hyp) THUMB( orr r2, r2, #PSR_T_BIT ) msr spsr_cxsf, r2 mrs r1, ELR_hyp - ldr r2, =BSYM(panic) + ldr r2, =panic msr ELR_hyp, r2 ldr r0, =\panic_str clrex @ Clear exclusive monitor From 14327c662822e5e874cb971a7162067519300ca8 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Apr 2015 14:17:25 +0100 Subject: [PATCH 15/75] ARM: replace BSYM() with badr assembly macro BSYM() was invented to allow us to work around a problem with the assembler, where local symbols resolved by the assembler for the 'adr' instruction did not take account of their ISA. Since we don't want BSYM() used elsewhere, replace BSYM() with a new macro 'badr', which is like the 'adr' pseudo-op, but with the BSYM() mechanics integrated into it. This ensures that the BSYM()-ification is only used in conjunction with 'adr'. Acked-by: Dave Martin Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/boot/compressed/head.S | 4 ++-- arch/arm/common/mcpm_head.S | 2 +- arch/arm/include/asm/assembler.h | 17 ++++++++++++++++- arch/arm/include/asm/entry-macro-multi.S | 4 ++-- arch/arm/include/asm/unified.h | 2 -- arch/arm/kernel/entry-armv.S | 12 ++++++------ arch/arm/kernel/entry-common.S | 6 +++--- arch/arm/kernel/entry-ftrace.S | 2 +- arch/arm/kernel/head-nommu.S | 6 +++--- arch/arm/kernel/head.S | 8 ++++---- arch/arm/kernel/sleep.S | 2 +- arch/arm/lib/call_with_stack.S | 2 +- arch/arm/mm/proc-v7m.S | 2 +- 13 files changed, 41 insertions(+), 28 deletions(-) diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 2c45b5709fa4..06e983f59980 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -130,7 +130,7 @@ start: .endr ARM( mov r0, r0 ) ARM( b 1f ) - THUMB( adr r12, BSYM(1f) ) + THUMB( badr r12, 1f ) THUMB( bx r12 ) .word _magic_sig @ Magic numbers to help the loader @@ -447,7 +447,7 @@ dtb_check_done: bl cache_clean_flush - adr r0, BSYM(restart) + badr r0, restart add r0, r0, r6 mov pc, r0 diff --git a/arch/arm/common/mcpm_head.S b/arch/arm/common/mcpm_head.S index e02db4b81a66..08b3bb9bc6a2 100644 --- a/arch/arm/common/mcpm_head.S +++ b/arch/arm/common/mcpm_head.S @@ -49,7 +49,7 @@ ENTRY(mcpm_entry_point) ARM_BE8(setend be) - THUMB( adr r12, BSYM(1f) ) + THUMB( badr r12, 1f ) THUMB( bx r12 ) THUMB( .thumb ) 1: diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 186270b3e194..4abe57279c66 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -177,6 +177,21 @@ restore_irqs_notrace \oldcpsr .endm +/* + * Assembly version of "adr rd, BSYM(sym)". This should only be used to + * reference local symbols in the same assembly file which are to be + * resolved by the assembler. Other usage is undefined. + */ + .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo + .macro badr\c, rd, sym +#ifdef CONFIG_THUMB2_KERNEL + adr\c \rd, \sym + 1 +#else + adr\c \rd, \sym +#endif + .endm + .endr + /* * Get current thread_info. */ @@ -326,7 +341,7 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) bne 1f orr \reg, \reg, #PSR_A_BIT - adr lr, BSYM(2f) + badr lr, 2f msr spsr_cxsf, \reg __MSR_ELR_HYP(14) __ERET diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S index 469a2b30fa27..609184f522ee 100644 --- a/arch/arm/include/asm/entry-macro-multi.S +++ b/arch/arm/include/asm/entry-macro-multi.S @@ -10,7 +10,7 @@ @ @ routine called with r0 = irq number, r1 = struct pt_regs * @ - adrne lr, BSYM(1b) + badrne lr, 1b bne asm_do_IRQ #ifdef CONFIG_SMP @@ -23,7 +23,7 @@ ALT_SMP(test_for_ipi r0, r2, r6, lr) ALT_UP_B(9997f) movne r1, sp - adrne lr, BSYM(1b) + badrne lr, 1b bne do_IPI #endif 9997: diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h index 200f9a7cd623..a91ae499614c 100644 --- a/arch/arm/include/asm/unified.h +++ b/arch/arm/include/asm/unified.h @@ -45,7 +45,6 @@ #define THUMB(x...) x #ifdef __ASSEMBLY__ #define W(instr) instr.w -#define BSYM(sym) sym + 1 #else #define WASM(instr) #instr ".w" #endif @@ -59,7 +58,6 @@ #define THUMB(x...) #ifdef __ASSEMBLY__ #define W(instr) instr -#define BSYM(sym) sym #else #define WASM(instr) #instr #endif diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 570306c49406..f8f7398c74c2 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -40,7 +40,7 @@ #ifdef CONFIG_MULTI_IRQ_HANDLER ldr r1, =handle_arch_irq mov r0, sp - adr lr, BSYM(9997f) + badr lr, 9997f ldr pc, [r1] #else arch_irq_handler_default @@ -273,7 +273,7 @@ __und_svc: str r4, [sp, #S_PC] orr r0, r9, r0, lsl #16 #endif - adr r9, BSYM(__und_svc_finish) + badr r9, __und_svc_finish mov r2, r4 bl call_fpe @@ -469,7 +469,7 @@ __und_usr: @ instruction, or the more conventional lr if we are to treat @ this as a real undefined instruction @ - adr r9, BSYM(ret_from_exception) + badr r9, ret_from_exception @ IRQs must be enabled before attempting to read the instruction from @ user space since that could cause a page/translation fault if the @@ -486,7 +486,7 @@ __und_usr: @ r2 = PC value for the following instruction (:= regs->ARM_pc) @ r4 = PC value for the faulting instruction @ lr = 32-bit undefined instruction function - adr lr, BSYM(__und_usr_fault_32) + badr lr, __und_usr_fault_32 b call_fpe __und_usr_thumb: @@ -522,7 +522,7 @@ ARM_BE8(rev16 r0, r0) @ little endian instruction add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update orr r0, r0, r5, lsl #16 - adr lr, BSYM(__und_usr_fault_32) + badr lr, __und_usr_fault_32 @ r0 = the two 16-bit Thumb instructions which caused the exception @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc) @ r4 = PC value for the first 16-bit Thumb instruction @@ -716,7 +716,7 @@ __und_usr_fault_32: __und_usr_fault_16: mov r1, #2 1: mov r0, sp - adr lr, BSYM(ret_from_exception) + badr lr, ret_from_exception b __und_fault ENDPROC(__und_usr_fault_32) ENDPROC(__und_usr_fault_16) diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index f8ccc21fa032..6ab159384667 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -88,7 +88,7 @@ ENTRY(ret_from_fork) bl schedule_tail cmp r5, #0 movne r0, r4 - adrne lr, BSYM(1f) + badrne lr, 1f retne r5 1: get_thread_info tsk b ret_slow_syscall @@ -196,7 +196,7 @@ local_restart: bne __sys_trace cmp scno, #NR_syscalls @ check upper syscall limit - adr lr, BSYM(ret_fast_syscall) @ return address + badr lr, ret_fast_syscall @ return address ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine add r1, sp, #S_OFF @@ -231,7 +231,7 @@ __sys_trace: add r0, sp, #S_OFF bl syscall_trace_enter - adr lr, BSYM(__sys_trace_return) @ return address + badr lr, __sys_trace_return @ return address mov scno, r0 @ syscall number (possibly new) add r1, sp, #S_R0 + S_OFF @ pointer to regs cmp scno, #NR_syscalls @ check upper syscall limit diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S index fe57c73e70a4..c73c4030ca5d 100644 --- a/arch/arm/kernel/entry-ftrace.S +++ b/arch/arm/kernel/entry-ftrace.S @@ -87,7 +87,7 @@ 1: mcount_get_lr r1 @ lr of instrumented func mcount_adjust_addr r0, lr @ instrumented function - adr lr, BSYM(2f) + badr lr, 2f mov pc, r2 2: mcount_exit .endm diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index aebfbf79a1a3..b6f3cb6333e4 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -46,7 +46,7 @@ ENTRY(stext) .arm ENTRY(stext) - THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM. + THUMB( badr r9, 1f ) @ Kernel is always entered in ARM. THUMB( bx r9 ) @ If this is a Thumb-2 kernel, THUMB( .thumb ) @ switch to Thumb now. THUMB(1: ) @@ -79,7 +79,7 @@ ENTRY(stext) #endif ldr r13, =__mmap_switched @ address to jump to after @ initialising sctlr - adr lr, BSYM(1f) @ return (PIC) address + badr lr, 1f @ return (PIC) address ldr r12, [r10, #PROCINFO_INITFUNC] add r12, r12, r10 ret r12 @@ -115,7 +115,7 @@ ENTRY(secondary_startup) bl __setup_mpu @ Initialize the MPU #endif - adr lr, BSYM(__after_proc_init) @ return address + badr lr, __after_proc_init @ return address mov r13, r12 @ __secondary_switched address ldr r12, [r10, #PROCINFO_INITFUNC] add r12, r12, r10 diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 3637973a9708..ab3c478aaced 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -80,7 +80,7 @@ ENTRY(stext) ARM_BE8(setend be ) @ ensure we are in BE8 mode - THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM. + THUMB( badr r9, 1f ) @ Kernel is always entered in ARM. THUMB( bx r9 ) @ If this is a Thumb-2 kernel, THUMB( .thumb ) @ switch to Thumb now. THUMB(1: ) @@ -136,7 +136,7 @@ ENTRY(stext) */ ldr r13, =__mmap_switched @ address to jump to after @ mmu has been enabled - adr lr, BSYM(1f) @ return (PIC) address + badr lr, 1f @ return (PIC) address mov r8, r4 @ set TTBR1 to swapper_pg_dir ldr r12, [r10, #PROCINFO_INITFUNC] add r12, r12, r10 @@ -348,7 +348,7 @@ __turn_mmu_on_loc: .text ENTRY(secondary_startup_arm) .arm - THUMB( adr r9, BSYM(1f) ) @ Kernel is entered in ARM. + THUMB( badr r9, 1f ) @ Kernel is entered in ARM. THUMB( bx r9 ) @ If this is a Thumb-2 kernel, THUMB( .thumb ) @ switch to Thumb now. THUMB(1: ) @@ -384,7 +384,7 @@ ENTRY(secondary_startup) ldr r4, [r7, lr] @ get secondary_data.pgdir add r7, r7, #4 ldr r8, [r7, lr] @ get secondary_data.swapper_pg_dir - adr lr, BSYM(__enable_mmu) @ return address + badr lr, __enable_mmu @ return address mov r13, r12 @ __secondary_switched address ldr r12, [r10, #PROCINFO_INITFUNC] add r12, r12, r10 @ initialise processor diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index 7d37bfc50830..76bb3128e135 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S @@ -81,7 +81,7 @@ ENTRY(__cpu_suspend) mov r1, r4 @ size of save block add r0, sp, #8 @ pointer to save block bl __cpu_suspend_save - adr lr, BSYM(cpu_suspend_abort) + badr lr, cpu_suspend_abort ldmfd sp!, {r0, pc} @ call suspend fn ENDPROC(__cpu_suspend) .ltorg diff --git a/arch/arm/lib/call_with_stack.S b/arch/arm/lib/call_with_stack.S index ed1a421813cb..bf3a40889205 100644 --- a/arch/arm/lib/call_with_stack.S +++ b/arch/arm/lib/call_with_stack.S @@ -35,7 +35,7 @@ ENTRY(call_with_stack) mov r2, r0 mov r0, r1 - adr lr, BSYM(1f) + badr lr, 1f ret r2 1: ldr lr, [sp] diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S index e08e1f2bab76..67d9209077c6 100644 --- a/arch/arm/mm/proc-v7m.S +++ b/arch/arm/mm/proc-v7m.S @@ -98,7 +98,7 @@ __v7m_setup: str r5, [r0, V7M_SCB_SHPR3] @ set PendSV priority @ SVC to run the kernel in this mode - adr r1, BSYM(1f) + badr r1, 1f ldr r5, [r12, #11 * 4] @ read the SVC vector entry str r1, [r12, #11 * 4] @ write the temporary SVC vector entry mov r6, lr @ save LR From d965b0fca7dcde3f82c982e0bf1631069fdeb8c9 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 15 May 2015 11:56:45 +0100 Subject: [PATCH 16/75] ARM: l2c: restore the behaviour documented above l2c_enable() l2c_enable() is documented that it must not be called if the cache has already been enabled. Unfortunately, commit 6b49241ac252 ("ARM: 8259/1: l2c: Refactor the driver to use commit-like interface") changed this without updating the comment, for very little reason. Revert this change and restore the expected behaviour. Signed-off-by: Russell King --- arch/arm/mm/cache-l2x0.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index e309c8f35af5..1471c0f29bd3 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -129,10 +129,6 @@ static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock) { unsigned long flags; - /* Do not touch the controller if already enabled. */ - if (readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN) - return; - l2x0_saved_regs.aux_ctrl = aux; l2c_configure(base); @@ -163,7 +159,11 @@ static void l2c_save(void __iomem *base) static void l2c_resume(void) { - l2c_enable(l2x0_base, l2x0_saved_regs.aux_ctrl, l2x0_data->num_lock); + void __iomem *base = l2x0_base; + + /* Do not touch the controller if already enabled. */ + if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) + l2c_enable(base, l2x0_saved_regs.aux_ctrl, l2x0_data->num_lock); } /* From 7705dd256ce363f8b01429efb2f0dc4d1ee23c89 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 15 May 2015 11:07:14 +0100 Subject: [PATCH 17/75] ARM: l2c: write auxiliary control register first Before calling the controller specific configuration function, write the auxiliary control register first, so that bits shared with other registers (such as the prefetch control register) are not overwritten by the later write to the auxctrl register. Signed-off-by: Russell King --- arch/arm/mm/cache-l2x0.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 1471c0f29bd3..977eb9f4f77e 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -115,10 +115,10 @@ static void l2c_configure(void __iomem *base) return; } + l2c_write_sec(l2x0_saved_regs.aux_ctrl, base, L2X0_AUX_CTRL); + if (l2x0_data->configure) l2x0_data->configure(base); - - l2c_write_sec(l2x0_saved_regs.aux_ctrl, base, L2X0_AUX_CTRL); } /* From 50beefde30224888d6d63224405ace4bdd4b32a0 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 15 May 2015 11:05:54 +0100 Subject: [PATCH 18/75] ARM: l2c: clean up l2c_configure() l2c_configure() does not follow the pattern of other l2c_* functions. Fix this so that it does to avoid future confusion. Signed-off-by: Russell King --- arch/arm/mm/cache-l2x0.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 977eb9f4f77e..2864a7bcc24b 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -110,15 +110,7 @@ static inline void l2c_unlock(void __iomem *base, unsigned num) static void l2c_configure(void __iomem *base) { - if (outer_cache.configure) { - outer_cache.configure(&l2x0_saved_regs); - return; - } - l2c_write_sec(l2x0_saved_regs.aux_ctrl, base, L2X0_AUX_CTRL); - - if (l2x0_data->configure) - l2x0_data->configure(base); } /* @@ -130,7 +122,11 @@ static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock) unsigned long flags; l2x0_saved_regs.aux_ctrl = aux; - l2c_configure(base); + + if (outer_cache.configure) + outer_cache.configure(&l2x0_saved_regs); + else + l2x0_data->configure(base); l2c_unlock(base, num_lock); @@ -252,6 +248,7 @@ static const struct l2c_init_data l2c210_data __initconst = { .num_lock = 1, .enable = l2c_enable, .save = l2c_save, + .configure = l2c_configure, .outer_cache = { .inv_range = l2c210_inv_range, .clean_range = l2c210_clean_range, @@ -409,6 +406,7 @@ static const struct l2c_init_data l2c220_data = { .num_lock = 1, .enable = l2c220_enable, .save = l2c_save, + .configure = l2c_configure, .outer_cache = { .inv_range = l2c220_inv_range, .clean_range = l2c220_clean_range, @@ -569,6 +567,8 @@ static void l2c310_configure(void __iomem *base) { unsigned revision; + l2c_configure(base); + /* restore pl310 setup */ l2c_write_sec(l2x0_saved_regs.tag_latency, base, L310_TAG_LATENCY_CTRL); @@ -1066,6 +1066,7 @@ static const struct l2c_init_data of_l2c210_data __initconst = { .of_parse = l2x0_of_parse, .enable = l2c_enable, .save = l2c_save, + .configure = l2c_configure, .outer_cache = { .inv_range = l2c210_inv_range, .clean_range = l2c210_clean_range, @@ -1084,6 +1085,7 @@ static const struct l2c_init_data of_l2c220_data __initconst = { .of_parse = l2x0_of_parse, .enable = l2c220_enable, .save = l2c_save, + .configure = l2c_configure, .outer_cache = { .inv_range = l2c220_inv_range, .clean_range = l2c220_clean_range, @@ -1416,6 +1418,7 @@ static const struct l2c_init_data of_aurora_with_outer_data __initconst = { .enable = l2c_enable, .fixup = aurora_fixup, .save = aurora_save, + .configure = l2c_configure, .outer_cache = { .inv_range = aurora_inv_range, .clean_range = aurora_clean_range, @@ -1435,6 +1438,7 @@ static const struct l2c_init_data of_aurora_no_outer_data __initconst = { .enable = aurora_enable_no_outer, .fixup = aurora_fixup, .save = aurora_save, + .configure = l2c_configure, .outer_cache = { .resume = l2c_resume, }, @@ -1608,6 +1612,7 @@ static void __init tauros3_save(void __iomem *base) static void tauros3_configure(void __iomem *base) { + l2c_configure(base); writel_relaxed(l2x0_saved_regs.aux2_ctrl, base + TAUROS3_AUX2_CTRL); writel_relaxed(l2x0_saved_regs.prefetch_ctrl, From e946a8cbe4a47a7c2615ffb0d45712e72c7d0f3a Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 15 May 2015 11:51:51 +0100 Subject: [PATCH 19/75] ARM: l2c: only unlock caches if NS_LOCKDOWN bit is set Some L2C caches have a bit which allows non-secure software to control the cache lockdown. Some platforms are unable to set this bit. To avoid receiving an abort while trying to unlock the cache lines, check the state of this bit before unlocking. We do this by providing a new method in the l2c_init_data to perform the unlocking. Signed-off-by: Russell King --- arch/arm/mm/cache-l2x0.c | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 2864a7bcc24b..95f33620353b 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -42,6 +42,7 @@ struct l2c_init_data { void (*fixup)(void __iomem *, u32, struct outer_cache_fns *); void (*save)(void __iomem *); void (*configure)(void __iomem *); + void (*unlock)(void __iomem *, unsigned); struct outer_cache_fns outer_cache; }; @@ -128,7 +129,7 @@ static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock) else l2x0_data->configure(base); - l2c_unlock(base, num_lock); + l2x0_data->unlock(base, num_lock); local_irq_save(flags); __l2c_op_way(base + L2X0_INV_WAY); @@ -249,6 +250,7 @@ static const struct l2c_init_data l2c210_data __initconst = { .enable = l2c_enable, .save = l2c_save, .configure = l2c_configure, + .unlock = l2c_unlock, .outer_cache = { .inv_range = l2c210_inv_range, .clean_range = l2c210_clean_range, @@ -400,6 +402,12 @@ static void l2c220_enable(void __iomem *base, u32 aux, unsigned num_lock) l2c_enable(base, aux, num_lock); } +static void l2c220_unlock(void __iomem *base, unsigned num_lock) +{ + if (readl_relaxed(base + L2X0_AUX_CTRL) & L220_AUX_CTRL_NS_LOCKDOWN) + l2c_unlock(base, num_lock); +} + static const struct l2c_init_data l2c220_data = { .type = "L2C-220", .way_size_0 = SZ_8K, @@ -407,6 +415,7 @@ static const struct l2c_init_data l2c220_data = { .enable = l2c220_enable, .save = l2c_save, .configure = l2c_configure, + .unlock = l2c220_unlock, .outer_cache = { .inv_range = l2c220_inv_range, .clean_range = l2c220_clean_range, @@ -755,6 +764,12 @@ static void l2c310_resume(void) set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); } +static void l2c310_unlock(void __iomem *base, unsigned num_lock) +{ + if (readl_relaxed(base + L2X0_AUX_CTRL) & L310_AUX_CTRL_NS_LOCKDOWN) + l2c_unlock(base, num_lock); +} + static const struct l2c_init_data l2c310_init_fns __initconst = { .type = "L2C-310", .way_size_0 = SZ_8K, @@ -763,6 +778,7 @@ static const struct l2c_init_data l2c310_init_fns __initconst = { .fixup = l2c310_fixup, .save = l2c310_save, .configure = l2c310_configure, + .unlock = l2c310_unlock, .outer_cache = { .inv_range = l2c210_inv_range, .clean_range = l2c210_clean_range, @@ -1067,6 +1083,7 @@ static const struct l2c_init_data of_l2c210_data __initconst = { .enable = l2c_enable, .save = l2c_save, .configure = l2c_configure, + .unlock = l2c_unlock, .outer_cache = { .inv_range = l2c210_inv_range, .clean_range = l2c210_clean_range, @@ -1086,6 +1103,7 @@ static const struct l2c_init_data of_l2c220_data __initconst = { .enable = l2c220_enable, .save = l2c_save, .configure = l2c_configure, + .unlock = l2c220_unlock, .outer_cache = { .inv_range = l2c220_inv_range, .clean_range = l2c220_clean_range, @@ -1213,6 +1231,7 @@ static const struct l2c_init_data of_l2c310_data __initconst = { .fixup = l2c310_fixup, .save = l2c310_save, .configure = l2c310_configure, + .unlock = l2c310_unlock, .outer_cache = { .inv_range = l2c210_inv_range, .clean_range = l2c210_clean_range, @@ -1242,6 +1261,7 @@ static const struct l2c_init_data of_l2c310_coherent_data __initconst = { .fixup = l2c310_fixup, .save = l2c310_save, .configure = l2c310_configure, + .unlock = l2c310_unlock, .outer_cache = { .inv_range = l2c210_inv_range, .clean_range = l2c210_clean_range, @@ -1419,6 +1439,7 @@ static const struct l2c_init_data of_aurora_with_outer_data __initconst = { .fixup = aurora_fixup, .save = aurora_save, .configure = l2c_configure, + .unlock = l2c_unlock, .outer_cache = { .inv_range = aurora_inv_range, .clean_range = aurora_clean_range, @@ -1439,6 +1460,7 @@ static const struct l2c_init_data of_aurora_no_outer_data __initconst = { .fixup = aurora_fixup, .save = aurora_save, .configure = l2c_configure, + .unlock = l2c_unlock, .outer_cache = { .resume = l2c_resume, }, @@ -1589,6 +1611,7 @@ static const struct l2c_init_data of_bcm_l2x0_data __initconst = { .enable = l2c310_enable, .save = l2c310_save, .configure = l2c310_configure, + .unlock = l2c310_unlock, .outer_cache = { .inv_range = bcm_inv_range, .clean_range = bcm_clean_range, @@ -1626,6 +1649,7 @@ static const struct l2c_init_data of_tauros3_data __initconst = { .enable = l2c_enable, .save = tauros3_save, .configure = tauros3_configure, + .unlock = l2c_unlock, /* Tauros3 broadcasts L1 cache operations to L2 */ .outer_cache = { .resume = l2c_resume, From 5b290ec2074c68b9f4f8f8789fa9b3e1782869e7 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 15 May 2015 12:03:29 +0100 Subject: [PATCH 20/75] ARM: l2c: avoid passing auxiliary control register through enable method Avoid passing the auxiliary control register value through the enable method. In the resume path, we have to read the value stored in l2x0_saved_regs.aux_ctrl, only to have it immediately written back by l2c_enable(). We can avoid this if we have __l2c_init() save the value directly to l2x0_saved_regs.aux_ctrl before calling the specific enable method. Signed-off-by: Russell King --- arch/arm/mm/cache-l2x0.c | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 95f33620353b..90599f60ff02 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -38,7 +38,7 @@ struct l2c_init_data { unsigned way_size_0; unsigned num_lock; void (*of_parse)(const struct device_node *, u32 *, u32 *); - void (*enable)(void __iomem *, u32, unsigned); + void (*enable)(void __iomem *, unsigned); void (*fixup)(void __iomem *, u32, struct outer_cache_fns *); void (*save)(void __iomem *); void (*configure)(void __iomem *); @@ -118,12 +118,10 @@ static void l2c_configure(void __iomem *base) * Enable the L2 cache controller. This function must only be * called when the cache controller is known to be disabled. */ -static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock) +static void l2c_enable(void __iomem *base, unsigned num_lock) { unsigned long flags; - l2x0_saved_regs.aux_ctrl = aux; - if (outer_cache.configure) outer_cache.configure(&l2x0_saved_regs); else @@ -160,7 +158,7 @@ static void l2c_resume(void) /* Do not touch the controller if already enabled. */ if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) - l2c_enable(base, l2x0_saved_regs.aux_ctrl, l2x0_data->num_lock); + l2c_enable(base, l2x0_data->num_lock); } /* @@ -390,16 +388,16 @@ static void l2c220_sync(void) raw_spin_unlock_irqrestore(&l2x0_lock, flags); } -static void l2c220_enable(void __iomem *base, u32 aux, unsigned num_lock) +static void l2c220_enable(void __iomem *base, unsigned num_lock) { /* * Always enable non-secure access to the lockdown registers - * we write to them as part of the L2C enable sequence so they * need to be accessible. */ - aux |= L220_AUX_CTRL_NS_LOCKDOWN; + l2x0_saved_regs.aux_ctrl |= L220_AUX_CTRL_NS_LOCKDOWN; - l2c_enable(base, aux, num_lock); + l2c_enable(base, num_lock); } static void l2c220_unlock(void __iomem *base, unsigned num_lock) @@ -612,10 +610,11 @@ static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, v return NOTIFY_OK; } -static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock) +static void __init l2c310_enable(void __iomem *base, unsigned num_lock) { unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK; bool cortex_a9 = read_cpuid_part() == ARM_CPU_PART_CORTEX_A9; + u32 aux = l2x0_saved_regs.aux_ctrl; if (rev >= L310_CACHE_ID_RTL_R2P0) { if (cortex_a9) { @@ -658,9 +657,9 @@ static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock) * we write to them as part of the L2C enable sequence so they * need to be accessible. */ - aux |= L310_AUX_CTRL_NS_LOCKDOWN; + l2x0_saved_regs.aux_ctrl = aux | L310_AUX_CTRL_NS_LOCKDOWN; - l2c_enable(base, aux, num_lock); + l2c_enable(base, num_lock); /* Read back resulting AUX_CTRL value as it could have been altered. */ aux = readl_relaxed(base + L2X0_AUX_CTRL); @@ -872,8 +871,11 @@ static int __init __l2c_init(const struct l2c_init_data *data, * Check if l2x0 controller is already enabled. If we are booting * in non-secure mode accessing the below registers will fault. */ - if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) - data->enable(l2x0_base, aux, data->num_lock); + if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { + l2x0_saved_regs.aux_ctrl = aux; + + data->enable(l2x0_base, data->num_lock); + } outer_cache = fns; @@ -1388,7 +1390,7 @@ static void aurora_save(void __iomem *base) * For Aurora cache in no outer mode, enable via the CP15 coprocessor * broadcasting of cache commands to L2. */ -static void __init aurora_enable_no_outer(void __iomem *base, u32 aux, +static void __init aurora_enable_no_outer(void __iomem *base, unsigned num_lock) { u32 u; @@ -1399,7 +1401,7 @@ static void __init aurora_enable_no_outer(void __iomem *base, u32 aux, isb(); - l2c_enable(base, aux, num_lock); + l2c_enable(base, num_lock); } static void __init aurora_fixup(void __iomem *base, u32 cache_id, From 982b465a2f7c90a11761a483d78594566619e405 Mon Sep 17 00:00:00 2001 From: Dmitry Eremin-Solenikov Date: Mon, 18 May 2015 15:58:57 +0100 Subject: [PATCH 21/75] ARM: 8361/1: sa1100: add platform functions to handle PWER settings PWER settings logically belongs neither to GPIO nor to system IRQ code. Add special functions to handle PWER (for GPIO and for system IRQs) from platform code. Signed-off-by: Dmitry Eremin-Solenikov Signed-off-by: Russell King --- arch/arm/mach-sa1100/generic.c | 24 ++++++++++++++++++++++++ include/soc/sa1100/pwer.h | 15 +++++++++++++++ 2 files changed, 39 insertions(+) create mode 100644 include/soc/sa1100/pwer.h diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c index 40e0d8619a2d..c651f6eb863c 100644 --- a/arch/arm/mach-sa1100/generic.c +++ b/arch/arm/mach-sa1100/generic.c @@ -23,6 +23,8 @@ #include