linux/arch/arm/mach-hisi/platsmp.c

187 lines
4.6 KiB
C
Raw Normal View History

/*
* Copyright (c) 2013 Linaro Ltd.
* Copyright (c) 2013 Hisilicon Limited.
* Based on arch/arm/mach-vexpress/platsmp.c, Copyright (C) 2002 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
#include <linux/smp.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/delay.h>
#include <asm/cacheflush.h>
#include <asm/smp_plat.h>
#include <asm/smp_scu.h>
#include <asm/mach/map.h>
#include "core.h"
#define HIX5HD2_BOOT_ADDRESS 0xffff0000
static void __iomem *ctrl_base;
void hi3xxx_set_cpu_jump(int cpu, void *jump_addr)
{
cpu = cpu_logical_map(cpu);
if (!cpu || !ctrl_base)
return;
writel_relaxed(virt_to_phys(jump_addr), ctrl_base + ((cpu - 1) << 2));
}
int hi3xxx_get_cpu_jump(int cpu)
{
cpu = cpu_logical_map(cpu);
if (!cpu || !ctrl_base)
return 0;
return readl_relaxed(ctrl_base + ((cpu - 1) << 2));
}
static void __init hisi_enable_scu_a9(void)
{
unsigned long base = 0;
void __iomem *scu_base = NULL;
if (scu_a9_has_base()) {
base = scu_a9_get_base();
scu_base = ioremap(base, SZ_4K);
if (!scu_base) {
pr_err("ioremap(scu_base) failed\n");
return;
}
scu_enable(scu_base);
iounmap(scu_base);
}
}
static void __init hi3xxx_smp_prepare_cpus(unsigned int max_cpus)
{
struct device_node *np = NULL;
u32 offset = 0;
hisi_enable_scu_a9();
if (!ctrl_base) {
np = of_find_compatible_node(NULL, NULL, "hisilicon,sysctrl");
if (!np) {
pr_err("failed to find hisilicon,sysctrl node\n");
return;
}
ctrl_base = of_iomap(np, 0);
if (!ctrl_base) {
pr_err("failed to map address\n");
return;
}
if (of_property_read_u32(np, "smp-offset", &offset) < 0) {
pr_err("failed to find smp-offset property\n");
return;
}
ctrl_base += offset;
}
}
static int hi3xxx_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
hi3xxx_set_cpu(cpu, true);
hi3xxx_set_cpu_jump(cpu, secondary_startup);
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
return 0;
}
static const struct smp_operations hi3xxx_smp_ops __initconst = {
.smp_prepare_cpus = hi3xxx_smp_prepare_cpus,
.smp_boot_secondary = hi3xxx_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_die = hi3xxx_cpu_die,
.cpu_kill = hi3xxx_cpu_kill,
#endif
};
static void __init hisi_common_smp_prepare_cpus(unsigned int max_cpus)
{
hisi_enable_scu_a9();
}
void hix5hd2_set_scu_boot_addr(phys_addr_t start_addr, phys_addr_t jump_addr)
{
void __iomem *virt;
virt = ioremap(start_addr, PAGE_SIZE);
writel_relaxed(0xe51ff004, virt); /* ldr pc, [rc, #-4] */
writel_relaxed(jump_addr, virt + 4); /* pc jump phy address */
iounmap(virt);
}
static int hix5hd2_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
phys_addr_t jumpaddr;
ARM: v7 setup function should invalidate L1 cache All ARMv5 and older CPUs invalidate their caches in the early assembly setup function, prior to enabling the MMU. This is because the L1 cache should not contain any data relevant to the execution of the kernel at this point; all data should have been flushed out to memory. This requirement should also be true for ARMv6 and ARMv7 CPUs - indeed, these typically do not search their caches when caching is disabled (as it needs to be when the MMU is disabled) so this change should be safe. ARMv7 allows there to be CPUs which search their caches while caching is disabled, and it's permitted that the cache is uninitialised at boot; for these, the architecture reference manual requires that an implementation specific code sequence is used immediately after reset to ensure that the cache is placed into a sane state. Such functionality is definitely outside the remit of the Linux kernel, and must be done by the SoC's firmware before _any_ CPU gets to the Linux kernel. Changing the data cache clean+invalidate to a mere invalidate allows us to get rid of a lot of platform specific hacks around this issue for their secondary CPU bringup paths - some of which were buggy. Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> Tested-by: Florian Fainelli <f.fainelli@gmail.com> Tested-by: Heiko Stuebner <heiko@sntech.de> Tested-by: Dinh Nguyen <dinguyen@opensource.altera.com> Acked-by: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> Tested-by: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> Acked-by: Shawn Guo <shawn.guo@linaro.org> Tested-by: Thierry Reding <treding@nvidia.com> Acked-by: Thierry Reding <treding@nvidia.com> Tested-by: Geert Uytterhoeven <geert+renesas@glider.be> Tested-by: Michal Simek <michal.simek@xilinx.com> Tested-by: Wei Xu <xuwei5@hisilicon.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2015-05-19 16:06:44 +00:00
jumpaddr = virt_to_phys(secondary_startup);
hix5hd2_set_scu_boot_addr(HIX5HD2_BOOT_ADDRESS, jumpaddr);
hix5hd2_set_cpu(cpu, true);
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
return 0;
}
static const struct smp_operations hix5hd2_smp_ops __initconst = {
.smp_prepare_cpus = hisi_common_smp_prepare_cpus,
.smp_boot_secondary = hix5hd2_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_die = hix5hd2_cpu_die,
#endif
};
#define SC_SCTL_REMAP_CLR 0x00000100
#define HIP01_BOOT_ADDRESS 0x80000000
#define REG_SC_CTRL 0x000
void hip01_set_boot_addr(phys_addr_t start_addr, phys_addr_t jump_addr)
{
void __iomem *virt;
virt = phys_to_virt(start_addr);
writel_relaxed(0xe51ff004, virt);
writel_relaxed(jump_addr, virt + 4);
}
static int hip01_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
phys_addr_t jumpaddr;
unsigned int remap_reg_value = 0;
struct device_node *node;
ARM: v7 setup function should invalidate L1 cache All ARMv5 and older CPUs invalidate their caches in the early assembly setup function, prior to enabling the MMU. This is because the L1 cache should not contain any data relevant to the execution of the kernel at this point; all data should have been flushed out to memory. This requirement should also be true for ARMv6 and ARMv7 CPUs - indeed, these typically do not search their caches when caching is disabled (as it needs to be when the MMU is disabled) so this change should be safe. ARMv7 allows there to be CPUs which search their caches while caching is disabled, and it's permitted that the cache is uninitialised at boot; for these, the architecture reference manual requires that an implementation specific code sequence is used immediately after reset to ensure that the cache is placed into a sane state. Such functionality is definitely outside the remit of the Linux kernel, and must be done by the SoC's firmware before _any_ CPU gets to the Linux kernel. Changing the data cache clean+invalidate to a mere invalidate allows us to get rid of a lot of platform specific hacks around this issue for their secondary CPU bringup paths - some of which were buggy. Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> Tested-by: Florian Fainelli <f.fainelli@gmail.com> Tested-by: Heiko Stuebner <heiko@sntech.de> Tested-by: Dinh Nguyen <dinguyen@opensource.altera.com> Acked-by: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> Tested-by: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> Acked-by: Shawn Guo <shawn.guo@linaro.org> Tested-by: Thierry Reding <treding@nvidia.com> Acked-by: Thierry Reding <treding@nvidia.com> Tested-by: Geert Uytterhoeven <geert+renesas@glider.be> Tested-by: Michal Simek <michal.simek@xilinx.com> Tested-by: Wei Xu <xuwei5@hisilicon.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2015-05-19 16:06:44 +00:00
jumpaddr = virt_to_phys(secondary_startup);
hip01_set_boot_addr(HIP01_BOOT_ADDRESS, jumpaddr);
node = of_find_compatible_node(NULL, NULL, "hisilicon,hip01-sysctrl");
if (WARN_ON(!node))
return -1;
ctrl_base = of_iomap(node, 0);
/* set the secondary core boot from DDR */
remap_reg_value = readl_relaxed(ctrl_base + REG_SC_CTRL);
barrier();
remap_reg_value |= SC_SCTL_REMAP_CLR;
barrier();
writel_relaxed(remap_reg_value, ctrl_base + REG_SC_CTRL);
hip01_set_cpu(cpu, true);
return 0;
}
static const struct smp_operations hip01_smp_ops __initconst = {
.smp_prepare_cpus = hisi_common_smp_prepare_cpus,
.smp_boot_secondary = hip01_boot_secondary,
};
CPU_METHOD_OF_DECLARE(hi3xxx_smp, "hisilicon,hi3620-smp", &hi3xxx_smp_ops);
CPU_METHOD_OF_DECLARE(hix5hd2_smp, "hisilicon,hix5hd2-smp", &hix5hd2_smp_ops);
CPU_METHOD_OF_DECLARE(hip01_smp, "hisilicon,hip01-smp", &hip01_smp_ops);