forked from Minki/linux
166aaf3966
The name "power_down_finish" seems to be causing some confusion, because it suggests that this function is responsible for taking some action to cause the specified CPU to complete its power down. This patch renames the affected functions to "wait_for_powerdown" and similar, since this function's intended purpose is just to wait for the hardware to finish a powerdown initiated by a previous cpu_power_down. Signed-off-by: Dave Martin <Dave.Martin@arm.com> Acked-by: Nicolas Pitre <nico@linaro.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
104 lines
2.4 KiB
C
104 lines
2.4 KiB
C
/*
|
|
* linux/arch/arm/mach-vexpress/mcpm_platsmp.c
|
|
*
|
|
* Created by: Nicolas Pitre, November 2012
|
|
* Copyright: (C) 2012-2013 Linaro Limited
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* Code to handle secondary CPU bringup and hotplug for the cluster power API.
|
|
*/
|
|
|
|
#include <linux/init.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <asm/mcpm.h>
|
|
#include <asm/smp.h>
|
|
#include <asm/smp_plat.h>
|
|
|
|
static void cpu_to_pcpu(unsigned int cpu,
|
|
unsigned int *pcpu, unsigned int *pcluster)
|
|
{
|
|
unsigned int mpidr;
|
|
|
|
mpidr = cpu_logical_map(cpu);
|
|
*pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
|
|
*pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
|
|
}
|
|
|
|
static int mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
{
|
|
unsigned int pcpu, pcluster, ret;
|
|
extern void secondary_startup(void);
|
|
|
|
cpu_to_pcpu(cpu, &pcpu, &pcluster);
|
|
|
|
pr_debug("%s: logical CPU %d is physical CPU %d cluster %d\n",
|
|
__func__, cpu, pcpu, pcluster);
|
|
|
|
mcpm_set_entry_vector(pcpu, pcluster, NULL);
|
|
ret = mcpm_cpu_power_up(pcpu, pcluster);
|
|
if (ret)
|
|
return ret;
|
|
mcpm_set_entry_vector(pcpu, pcluster, secondary_startup);
|
|
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
|
|
dsb_sev();
|
|
return 0;
|
|
}
|
|
|
|
static void mcpm_secondary_init(unsigned int cpu)
|
|
{
|
|
mcpm_cpu_powered_up();
|
|
}
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
static int mcpm_cpu_kill(unsigned int cpu)
|
|
{
|
|
unsigned int pcpu, pcluster;
|
|
|
|
cpu_to_pcpu(cpu, &pcpu, &pcluster);
|
|
|
|
return !mcpm_wait_for_cpu_powerdown(pcpu, pcluster);
|
|
}
|
|
|
|
static int mcpm_cpu_disable(unsigned int cpu)
|
|
{
|
|
/*
|
|
* We assume all CPUs may be shut down.
|
|
* This would be the hook to use for eventual Secure
|
|
* OS migration requests as described in the PSCI spec.
|
|
*/
|
|
return 0;
|
|
}
|
|
|
|
static void mcpm_cpu_die(unsigned int cpu)
|
|
{
|
|
unsigned int mpidr, pcpu, pcluster;
|
|
mpidr = read_cpuid_mpidr();
|
|
pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
|
|
pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
|
|
mcpm_set_entry_vector(pcpu, pcluster, NULL);
|
|
mcpm_cpu_power_down();
|
|
}
|
|
|
|
#endif
|
|
|
|
static struct smp_operations __initdata mcpm_smp_ops = {
|
|
.smp_boot_secondary = mcpm_boot_secondary,
|
|
.smp_secondary_init = mcpm_secondary_init,
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
.cpu_kill = mcpm_cpu_kill,
|
|
.cpu_disable = mcpm_cpu_disable,
|
|
.cpu_die = mcpm_cpu_die,
|
|
#endif
|
|
};
|
|
|
|
void __init mcpm_smp_set_ops(void)
|
|
{
|
|
smp_set_ops(&mcpm_smp_ops);
|
|
}
|