2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* linux/arch/arm/kernel/process.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 1996-2000 Russell King - Converted to ARM.
|
|
|
|
* Original Copyright (C) 1995 Linus Torvalds
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
#include <stdarg.h>
|
|
|
|
|
2011-07-22 14:58:34 +00:00
|
|
|
#include <linux/export.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/stddef.h>
|
|
|
|
#include <linux/unistd.h>
|
|
|
|
#include <linux/user.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/reboot.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/kallsyms.h>
|
|
|
|
#include <linux/init.h>
|
2005-11-02 22:24:33 +00:00
|
|
|
#include <linux/cpu.h>
|
2006-03-15 23:17:23 +00:00
|
|
|
#include <linux/elfcore.h>
|
2006-06-19 18:57:12 +00:00
|
|
|
#include <linux/pm.h>
|
2007-03-14 16:33:24 +00:00
|
|
|
#include <linux/tick.h>
|
2007-06-18 13:59:45 +00:00
|
|
|
#include <linux/utsname.h>
|
2008-09-06 10:35:55 +00:00
|
|
|
#include <linux/uaccess.h>
|
2010-06-14 20:27:19 +00:00
|
|
|
#include <linux/random.h>
|
2010-09-03 09:42:55 +00:00
|
|
|
#include <linux/hw_breakpoint.h>
|
2012-03-13 18:26:56 +00:00
|
|
|
#include <linux/leds.h>
|
2013-07-08 23:01:40 +00:00
|
|
|
#include <linux/reboot.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-07-26 11:22:12 +00:00
|
|
|
#include <asm/cacheflush.h>
|
2012-11-08 18:54:11 +00:00
|
|
|
#include <asm/idmap.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/processor.h>
|
2006-06-21 12:31:52 +00:00
|
|
|
#include <asm/thread_notify.h>
|
2009-02-11 12:07:53 +00:00
|
|
|
#include <asm/stacktrace.h>
|
2014-04-06 15:17:39 +00:00
|
|
|
#include <asm/system_misc.h>
|
2005-06-27 13:04:05 +00:00
|
|
|
#include <asm/mach/time.h>
|
2013-06-18 22:23:26 +00:00
|
|
|
#include <asm/tls.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-05-25 03:55:42 +00:00
|
|
|
#ifdef CONFIG_CC_STACKPROTECTOR
|
|
|
|
#include <linux/stackprotector.h>
|
|
|
|
unsigned long __stack_chk_guard __read_mostly;
|
|
|
|
EXPORT_SYMBOL(__stack_chk_guard);
|
|
|
|
#endif
|
|
|
|
|
2013-12-16 09:38:57 +00:00
|
|
|
static const char *processor_modes[] __maybe_unused = {
|
2007-01-09 12:57:37 +00:00
|
|
|
"USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" ,
|
|
|
|
"UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26",
|
|
|
|
"USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "UK6_32" , "ABT_32" ,
|
|
|
|
"UK8_32" , "UK9_32" , "UK10_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32"
|
|
|
|
};
|
|
|
|
|
2013-12-16 09:38:57 +00:00
|
|
|
static const char *isa_modes[] __maybe_unused = {
|
2007-06-26 00:38:27 +00:00
|
|
|
"ARM" , "Thumb" , "Jazelle", "ThumbEE"
|
|
|
|
};
|
|
|
|
|
2011-06-06 11:28:54 +00:00
|
|
|
extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
|
|
|
|
typedef void (*phys_reset_t)(unsigned long);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A temporary stack to use for CPU reset. This is static so that we
|
|
|
|
* don't clobber it with the identity mapping. When running with this
|
|
|
|
* stack, any references to the current task *will not work* so you
|
|
|
|
* should really do as little as possible before jumping to your reset
|
|
|
|
* code.
|
|
|
|
*/
|
|
|
|
static u64 soft_restart_stack[16];
|
|
|
|
|
|
|
|
static void __soft_restart(void *addr)
|
2006-06-19 18:57:12 +00:00
|
|
|
{
|
2011-06-06 11:28:54 +00:00
|
|
|
phys_reset_t phys_reset;
|
2006-06-19 18:57:12 +00:00
|
|
|
|
2011-06-06 11:28:54 +00:00
|
|
|
/* Take out a flat memory mapping. */
|
2011-11-01 10:15:27 +00:00
|
|
|
setup_mm_for_reboot();
|
2006-06-19 18:57:12 +00:00
|
|
|
|
2010-07-26 11:22:12 +00:00
|
|
|
/* Clean and invalidate caches */
|
|
|
|
flush_cache_all();
|
|
|
|
|
|
|
|
/* Turn off caching */
|
|
|
|
cpu_proc_fin();
|
|
|
|
|
|
|
|
/* Push out any further dirty data, and ensure cache is empty */
|
|
|
|
flush_cache_all();
|
|
|
|
|
2011-06-06 11:28:54 +00:00
|
|
|
/* Switch to the identity mapping. */
|
|
|
|
phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
|
|
|
|
phys_reset((unsigned long)addr);
|
2006-06-19 18:57:12 +00:00
|
|
|
|
2011-06-06 11:28:54 +00:00
|
|
|
/* Should never get here. */
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
|
|
|
void soft_restart(unsigned long addr)
|
|
|
|
{
|
|
|
|
u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
|
|
|
|
|
|
|
|
/* Disable interrupts first */
|
2014-03-25 00:18:35 +00:00
|
|
|
raw_local_irq_disable();
|
2011-06-06 11:28:54 +00:00
|
|
|
local_fiq_disable();
|
|
|
|
|
|
|
|
/* Disable the L2 if we're the last man standing. */
|
|
|
|
if (num_online_cpus() == 1)
|
|
|
|
outer_disable();
|
|
|
|
|
|
|
|
/* Change to the new stack and continue with the reset. */
|
|
|
|
call_with_stack(__soft_restart, (void *)addr, (void *)stack);
|
|
|
|
|
|
|
|
/* Should never get here. */
|
|
|
|
BUG();
|
2011-11-01 13:16:26 +00:00
|
|
|
}
|
|
|
|
|
2013-07-08 23:01:40 +00:00
|
|
|
static void null_restart(enum reboot_mode reboot_mode, const char *cmd)
|
2011-11-01 13:16:26 +00:00
|
|
|
{
|
2006-06-19 18:57:12 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2006-06-19 18:57:12 +00:00
|
|
|
* Function pointers to optional machine specific functions
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
void (*pm_power_off)(void);
|
|
|
|
EXPORT_SYMBOL(pm_power_off);
|
|
|
|
|
2013-07-08 23:01:40 +00:00
|
|
|
void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd) = null_restart;
|
2006-06-19 18:57:12 +00:00
|
|
|
EXPORT_SYMBOL_GPL(arm_pm_restart);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2011-08-01 21:25:06 +00:00
|
|
|
* This is our default idle handler.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2011-08-01 21:25:06 +00:00
|
|
|
|
|
|
|
void (*arm_pm_idle)(void);
|
|
|
|
|
2014-01-29 17:45:09 +00:00
|
|
|
/*
|
|
|
|
* Called from the core idle loop.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void arch_cpu_idle(void)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2011-08-01 21:25:06 +00:00
|
|
|
if (arm_pm_idle)
|
|
|
|
arm_pm_idle();
|
|
|
|
else
|
2011-12-19 08:03:58 +00:00
|
|
|
cpu_do_idle();
|
2009-06-22 21:34:55 +00:00
|
|
|
local_irq_enable();
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2013-03-21 21:49:38 +00:00
|
|
|
void arch_cpu_idle_prepare(void)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
local_fiq_enable();
|
2013-03-21 21:49:38 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-03-21 21:49:38 +00:00
|
|
|
void arch_cpu_idle_enter(void)
|
|
|
|
{
|
|
|
|
ledtrig_cpu(CPU_LED_IDLE_START);
|
|
|
|
#ifdef CONFIG_PL310_ERRATA_769419
|
|
|
|
wmb();
|
2005-11-02 22:24:33 +00:00
|
|
|
#endif
|
2013-03-21 21:49:38 +00:00
|
|
|
}
|
2005-11-02 22:24:33 +00:00
|
|
|
|
2013-03-21 21:49:38 +00:00
|
|
|
void arch_cpu_idle_exit(void)
|
|
|
|
{
|
|
|
|
ledtrig_cpu(CPU_LED_IDLE_END);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
void arch_cpu_idle_dead(void)
|
|
|
|
{
|
|
|
|
cpu_die();
|
|
|
|
}
|
2011-11-14 16:24:58 +00:00
|
|
|
#endif
|
2013-03-21 21:49:38 +00:00
|
|
|
|
ARM: 7759/1: decouple CPU offlining from reboot/shutdown
Add comments to machine_shutdown()/halt()/power_off()/restart() that
describe their purpose and/or requirements re: CPUs being active/not.
In machine_shutdown(), replace the call to smp_send_stop() with a call to
disable_nonboot_cpus(). This completely disables all but one CPU, thus
satisfying the requirement that only a single CPU be active for kexec.
Adjust Kconfig dependencies for this change.
In machine_halt()/power_off()/restart(), call smp_send_stop() directly,
rather than via machine_shutdown(); these functions don't need to
completely de-activate all CPUs using hotplug, but rather just quiesce
them.
Remove smp_kill_cpus(), and its call from smp_send_stop().
smp_kill_cpus() was indirectly calling smp_ops.cpu_kill() without calling
smp_ops.cpu_die() on the target CPUs first. At least some implementations
of smp_ops had issues with this; it caused cpu_kill() to hang on Tegra,
for example. Since smp_send_stop() is only used for shutdown, halt, and
power-off, there is no need to attempt any kind of CPU hotplug here.
Adjust Kconfig to reflect that machine_shutdown() (and hence kexec)
relies upon disable_nonboot_cpus(). However, this alone doesn't guarantee
that hotplug will work, or even that hotplug is implemented for a
particular piece of HW that a multi-platform zImage runs on. Hence, add
error-checking to machine_kexec() to determine whether it did work.
Suggested-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Stephen Warren <swarren@nvidia.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Tested-by: Zhangfei Gao <zhangfei.gao@gmail.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2013-06-14 15:14:14 +00:00
|
|
|
/*
|
|
|
|
* Called by kexec, immediately prior to machine_kexec().
|
|
|
|
*
|
|
|
|
* This must completely disable all secondary CPUs; simply causing those CPUs
|
|
|
|
* to execute e.g. a RAM-based pin loop is not sufficient. This allows the
|
|
|
|
* kexec'd kernel to use any and all RAM as it sees fit, without having to
|
|
|
|
* avoid any code or data used by any SW CPU pin loop. The CPU hotplug
|
|
|
|
* functionality embodied in disable_nonboot_cpus() to achieve this.
|
|
|
|
*/
|
2010-07-26 12:31:27 +00:00
|
|
|
void machine_shutdown(void)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
ARM: 7759/1: decouple CPU offlining from reboot/shutdown
Add comments to machine_shutdown()/halt()/power_off()/restart() that
describe their purpose and/or requirements re: CPUs being active/not.
In machine_shutdown(), replace the call to smp_send_stop() with a call to
disable_nonboot_cpus(). This completely disables all but one CPU, thus
satisfying the requirement that only a single CPU be active for kexec.
Adjust Kconfig dependencies for this change.
In machine_halt()/power_off()/restart(), call smp_send_stop() directly,
rather than via machine_shutdown(); these functions don't need to
completely de-activate all CPUs using hotplug, but rather just quiesce
them.
Remove smp_kill_cpus(), and its call from smp_send_stop().
smp_kill_cpus() was indirectly calling smp_ops.cpu_kill() without calling
smp_ops.cpu_die() on the target CPUs first. At least some implementations
of smp_ops had issues with this; it caused cpu_kill() to hang on Tegra,
for example. Since smp_send_stop() is only used for shutdown, halt, and
power-off, there is no need to attempt any kind of CPU hotplug here.
Adjust Kconfig to reflect that machine_shutdown() (and hence kexec)
relies upon disable_nonboot_cpus(). However, this alone doesn't guarantee
that hotplug will work, or even that hotplug is implemented for a
particular piece of HW that a multi-platform zImage runs on. Hence, add
error-checking to machine_kexec() to determine whether it did work.
Suggested-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Stephen Warren <swarren@nvidia.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Tested-by: Zhangfei Gao <zhangfei.gao@gmail.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2013-06-14 15:14:14 +00:00
|
|
|
disable_nonboot_cpus();
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
ARM: 7759/1: decouple CPU offlining from reboot/shutdown
Add comments to machine_shutdown()/halt()/power_off()/restart() that
describe their purpose and/or requirements re: CPUs being active/not.
In machine_shutdown(), replace the call to smp_send_stop() with a call to
disable_nonboot_cpus(). This completely disables all but one CPU, thus
satisfying the requirement that only a single CPU be active for kexec.
Adjust Kconfig dependencies for this change.
In machine_halt()/power_off()/restart(), call smp_send_stop() directly,
rather than via machine_shutdown(); these functions don't need to
completely de-activate all CPUs using hotplug, but rather just quiesce
them.
Remove smp_kill_cpus(), and its call from smp_send_stop().
smp_kill_cpus() was indirectly calling smp_ops.cpu_kill() without calling
smp_ops.cpu_die() on the target CPUs first. At least some implementations
of smp_ops had issues with this; it caused cpu_kill() to hang on Tegra,
for example. Since smp_send_stop() is only used for shutdown, halt, and
power-off, there is no need to attempt any kind of CPU hotplug here.
Adjust Kconfig to reflect that machine_shutdown() (and hence kexec)
relies upon disable_nonboot_cpus(). However, this alone doesn't guarantee
that hotplug will work, or even that hotplug is implemented for a
particular piece of HW that a multi-platform zImage runs on. Hence, add
error-checking to machine_kexec() to determine whether it did work.
Suggested-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Stephen Warren <swarren@nvidia.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Tested-by: Zhangfei Gao <zhangfei.gao@gmail.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2013-06-14 15:14:14 +00:00
|
|
|
/*
|
|
|
|
* Halting simply requires that the secondary CPUs stop performing any
|
|
|
|
* activity (executing tasks, handling interrupts). smp_send_stop()
|
|
|
|
* achieves this.
|
|
|
|
*/
|
2010-07-26 12:31:27 +00:00
|
|
|
void machine_halt(void)
|
|
|
|
{
|
2013-07-30 22:09:46 +00:00
|
|
|
local_irq_disable();
|
ARM: 7759/1: decouple CPU offlining from reboot/shutdown
Add comments to machine_shutdown()/halt()/power_off()/restart() that
describe their purpose and/or requirements re: CPUs being active/not.
In machine_shutdown(), replace the call to smp_send_stop() with a call to
disable_nonboot_cpus(). This completely disables all but one CPU, thus
satisfying the requirement that only a single CPU be active for kexec.
Adjust Kconfig dependencies for this change.
In machine_halt()/power_off()/restart(), call smp_send_stop() directly,
rather than via machine_shutdown(); these functions don't need to
completely de-activate all CPUs using hotplug, but rather just quiesce
them.
Remove smp_kill_cpus(), and its call from smp_send_stop().
smp_kill_cpus() was indirectly calling smp_ops.cpu_kill() without calling
smp_ops.cpu_die() on the target CPUs first. At least some implementations
of smp_ops had issues with this; it caused cpu_kill() to hang on Tegra,
for example. Since smp_send_stop() is only used for shutdown, halt, and
power-off, there is no need to attempt any kind of CPU hotplug here.
Adjust Kconfig to reflect that machine_shutdown() (and hence kexec)
relies upon disable_nonboot_cpus(). However, this alone doesn't guarantee
that hotplug will work, or even that hotplug is implemented for a
particular piece of HW that a multi-platform zImage runs on. Hence, add
error-checking to machine_kexec() to determine whether it did work.
Suggested-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Stephen Warren <swarren@nvidia.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Tested-by: Zhangfei Gao <zhangfei.gao@gmail.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2013-06-14 15:14:14 +00:00
|
|
|
smp_send_stop();
|
|
|
|
|
2012-07-13 07:19:34 +00:00
|
|
|
local_irq_disable();
|
2010-07-26 12:31:27 +00:00
|
|
|
while (1);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
ARM: 7759/1: decouple CPU offlining from reboot/shutdown
Add comments to machine_shutdown()/halt()/power_off()/restart() that
describe their purpose and/or requirements re: CPUs being active/not.
In machine_shutdown(), replace the call to smp_send_stop() with a call to
disable_nonboot_cpus(). This completely disables all but one CPU, thus
satisfying the requirement that only a single CPU be active for kexec.
Adjust Kconfig dependencies for this change.
In machine_halt()/power_off()/restart(), call smp_send_stop() directly,
rather than via machine_shutdown(); these functions don't need to
completely de-activate all CPUs using hotplug, but rather just quiesce
them.
Remove smp_kill_cpus(), and its call from smp_send_stop().
smp_kill_cpus() was indirectly calling smp_ops.cpu_kill() without calling
smp_ops.cpu_die() on the target CPUs first. At least some implementations
of smp_ops had issues with this; it caused cpu_kill() to hang on Tegra,
for example. Since smp_send_stop() is only used for shutdown, halt, and
power-off, there is no need to attempt any kind of CPU hotplug here.
Adjust Kconfig to reflect that machine_shutdown() (and hence kexec)
relies upon disable_nonboot_cpus(). However, this alone doesn't guarantee
that hotplug will work, or even that hotplug is implemented for a
particular piece of HW that a multi-platform zImage runs on. Hence, add
error-checking to machine_kexec() to determine whether it did work.
Suggested-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Stephen Warren <swarren@nvidia.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Tested-by: Zhangfei Gao <zhangfei.gao@gmail.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2013-06-14 15:14:14 +00:00
|
|
|
/*
|
|
|
|
* Power-off simply requires that the secondary CPUs stop performing any
|
|
|
|
* activity (executing tasks, handling interrupts). smp_send_stop()
|
|
|
|
* achieves this. When the system power is turned off, it will take all CPUs
|
|
|
|
* with it.
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
void machine_power_off(void)
|
|
|
|
{
|
2013-07-30 22:09:46 +00:00
|
|
|
local_irq_disable();
|
ARM: 7759/1: decouple CPU offlining from reboot/shutdown
Add comments to machine_shutdown()/halt()/power_off()/restart() that
describe their purpose and/or requirements re: CPUs being active/not.
In machine_shutdown(), replace the call to smp_send_stop() with a call to
disable_nonboot_cpus(). This completely disables all but one CPU, thus
satisfying the requirement that only a single CPU be active for kexec.
Adjust Kconfig dependencies for this change.
In machine_halt()/power_off()/restart(), call smp_send_stop() directly,
rather than via machine_shutdown(); these functions don't need to
completely de-activate all CPUs using hotplug, but rather just quiesce
them.
Remove smp_kill_cpus(), and its call from smp_send_stop().
smp_kill_cpus() was indirectly calling smp_ops.cpu_kill() without calling
smp_ops.cpu_die() on the target CPUs first. At least some implementations
of smp_ops had issues with this; it caused cpu_kill() to hang on Tegra,
for example. Since smp_send_stop() is only used for shutdown, halt, and
power-off, there is no need to attempt any kind of CPU hotplug here.
Adjust Kconfig to reflect that machine_shutdown() (and hence kexec)
relies upon disable_nonboot_cpus(). However, this alone doesn't guarantee
that hotplug will work, or even that hotplug is implemented for a
particular piece of HW that a multi-platform zImage runs on. Hence, add
error-checking to machine_kexec() to determine whether it did work.
Suggested-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Stephen Warren <swarren@nvidia.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Tested-by: Zhangfei Gao <zhangfei.gao@gmail.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2013-06-14 15:14:14 +00:00
|
|
|
smp_send_stop();
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (pm_power_off)
|
|
|
|
pm_power_off();
|
|
|
|
}
|
|
|
|
|
ARM: 7759/1: decouple CPU offlining from reboot/shutdown
Add comments to machine_shutdown()/halt()/power_off()/restart() that
describe their purpose and/or requirements re: CPUs being active/not.
In machine_shutdown(), replace the call to smp_send_stop() with a call to
disable_nonboot_cpus(). This completely disables all but one CPU, thus
satisfying the requirement that only a single CPU be active for kexec.
Adjust Kconfig dependencies for this change.
In machine_halt()/power_off()/restart(), call smp_send_stop() directly,
rather than via machine_shutdown(); these functions don't need to
completely de-activate all CPUs using hotplug, but rather just quiesce
them.
Remove smp_kill_cpus(), and its call from smp_send_stop().
smp_kill_cpus() was indirectly calling smp_ops.cpu_kill() without calling
smp_ops.cpu_die() on the target CPUs first. At least some implementations
of smp_ops had issues with this; it caused cpu_kill() to hang on Tegra,
for example. Since smp_send_stop() is only used for shutdown, halt, and
power-off, there is no need to attempt any kind of CPU hotplug here.
Adjust Kconfig to reflect that machine_shutdown() (and hence kexec)
relies upon disable_nonboot_cpus(). However, this alone doesn't guarantee
that hotplug will work, or even that hotplug is implemented for a
particular piece of HW that a multi-platform zImage runs on. Hence, add
error-checking to machine_kexec() to determine whether it did work.
Suggested-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Stephen Warren <swarren@nvidia.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Tested-by: Zhangfei Gao <zhangfei.gao@gmail.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2013-06-14 15:14:14 +00:00
|
|
|
/*
|
|
|
|
* Restart requires that the secondary CPUs stop performing any activity
|
|
|
|
* while the primary CPU resets the system. Systems with a single CPU can
|
|
|
|
* use soft_restart() as their machine descriptor's .restart hook, since that
|
|
|
|
* will cause the only available CPU to reset. Systems with multiple CPUs must
|
|
|
|
* provide a HW restart implementation, to ensure that all CPUs reset at once.
|
|
|
|
* This is required so that any code running after reset on the primary CPU
|
|
|
|
* doesn't have to co-ordinate with other CPUs to ensure they aren't still
|
|
|
|
* executing pre-reset code, and using RAM that the primary CPU's code wishes
|
|
|
|
* to use. Implementing such co-ordination would be essentially impossible.
|
|
|
|
*/
|
2009-03-19 16:20:24 +00:00
|
|
|
void machine_restart(char *cmd)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2013-07-30 22:09:46 +00:00
|
|
|
local_irq_disable();
|
ARM: 7759/1: decouple CPU offlining from reboot/shutdown
Add comments to machine_shutdown()/halt()/power_off()/restart() that
describe their purpose and/or requirements re: CPUs being active/not.
In machine_shutdown(), replace the call to smp_send_stop() with a call to
disable_nonboot_cpus(). This completely disables all but one CPU, thus
satisfying the requirement that only a single CPU be active for kexec.
Adjust Kconfig dependencies for this change.
In machine_halt()/power_off()/restart(), call smp_send_stop() directly,
rather than via machine_shutdown(); these functions don't need to
completely de-activate all CPUs using hotplug, but rather just quiesce
them.
Remove smp_kill_cpus(), and its call from smp_send_stop().
smp_kill_cpus() was indirectly calling smp_ops.cpu_kill() without calling
smp_ops.cpu_die() on the target CPUs first. At least some implementations
of smp_ops had issues with this; it caused cpu_kill() to hang on Tegra,
for example. Since smp_send_stop() is only used for shutdown, halt, and
power-off, there is no need to attempt any kind of CPU hotplug here.
Adjust Kconfig to reflect that machine_shutdown() (and hence kexec)
relies upon disable_nonboot_cpus(). However, this alone doesn't guarantee
that hotplug will work, or even that hotplug is implemented for a
particular piece of HW that a multi-platform zImage runs on. Hence, add
error-checking to machine_kexec() to determine whether it did work.
Suggested-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Stephen Warren <swarren@nvidia.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Tested-by: Zhangfei Gao <zhangfei.gao@gmail.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2013-06-14 15:14:14 +00:00
|
|
|
smp_send_stop();
|
2011-10-31 09:22:22 +00:00
|
|
|
|
2009-03-19 16:20:24 +00:00
|
|
|
arm_pm_restart(reboot_mode, cmd);
|
2011-10-31 09:22:22 +00:00
|
|
|
|
|
|
|
/* Give a grace period for failure to restart of 1s */
|
|
|
|
mdelay(1000);
|
|
|
|
|
|
|
|
/* Whoops - the platform was unable to reboot. Tell the user! */
|
|
|
|
printk("Reboot failed -- System halted\n");
|
2012-07-13 07:19:34 +00:00
|
|
|
local_irq_disable();
|
2011-10-31 09:22:22 +00:00
|
|
|
while (1);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-04-17 14:50:36 +00:00
|
|
|
void __show_regs(struct pt_regs *regs)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-06-18 13:59:45 +00:00
|
|
|
unsigned long flags;
|
|
|
|
char buf[64];
|
2005-04-16 22:20:36 +00:00
|
|
|
|
dump_stack: unify debug information printed by show_regs()
show_regs() is inherently arch-dependent but it does make sense to print
generic debug information and some archs already do albeit in slightly
different forms. This patch introduces a generic function to print debug
information from show_regs() so that different archs print out the same
information and it's much easier to modify what's printed.
show_regs_print_info() prints out the same debug info as dump_stack()
does plus task and thread_info pointers.
* Archs which didn't print debug info now do.
alpha, arc, blackfin, c6x, cris, frv, h8300, hexagon, ia64, m32r,
metag, microblaze, mn10300, openrisc, parisc, score, sh64, sparc,
um, xtensa
* Already prints debug info. Replaced with show_regs_print_info().
The printed information is superset of what used to be there.
arm, arm64, avr32, mips, powerpc, sh32, tile, unicore32, x86
* s390 is special in that it used to print arch-specific information
along with generic debug info. Heiko and Martin think that the
arch-specific extra isn't worth keeping s390 specfic implementation.
Converted to use the generic version.
Note that now all archs print the debug info before actual register
dumps.
An example BUG() dump follows.
kernel BUG at /work/os/work/kernel/workqueue.c:4841!
invalid opcode: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC
Modules linked in:
CPU: 0 PID: 1 Comm: swapper/0 Not tainted 3.9.0-rc1-work+ #7
Hardware name: empty empty/S3992, BIOS 080011 10/26/2007
task: ffff88007c85e040 ti: ffff88007c860000 task.ti: ffff88007c860000
RIP: 0010:[<ffffffff8234a07e>] [<ffffffff8234a07e>] init_workqueues+0x4/0x6
RSP: 0000:ffff88007c861ec8 EFLAGS: 00010246
RAX: ffff88007c861fd8 RBX: ffffffff824466a8 RCX: 0000000000000001
RDX: 0000000000000046 RSI: 0000000000000001 RDI: ffffffff8234a07a
RBP: ffff88007c861ec8 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: ffffffff8234a07a
R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000
FS: 0000000000000000(0000) GS:ffff88007dc00000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
CR2: ffff88015f7ff000 CR3: 00000000021f1000 CR4: 00000000000007f0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
Stack:
ffff88007c861ef8 ffffffff81000312 ffffffff824466a8 ffff88007c85e650
0000000000000003 0000000000000000 ffff88007c861f38 ffffffff82335e5d
ffff88007c862080 ffffffff8223d8c0 ffff88007c862080 ffffffff81c47760
Call Trace:
[<ffffffff81000312>] do_one_initcall+0x122/0x170
[<ffffffff82335e5d>] kernel_init_freeable+0x9b/0x1c8
[<ffffffff81c47760>] ? rest_init+0x140/0x140
[<ffffffff81c4776e>] kernel_init+0xe/0xf0
[<ffffffff81c6be9c>] ret_from_fork+0x7c/0xb0
[<ffffffff81c47760>] ? rest_init+0x140/0x140
...
v2: Typo fix in x86-32.
v3: CPU number dropped from show_regs_print_info() as
dump_stack_print_info() has been updated to print it. s390
specific implementation dropped as requested by s390 maintainers.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Jesper Nilsson <jesper.nilsson@axis.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Fengguang Wu <fengguang.wu@intel.com>
Cc: Mike Frysinger <vapier@gentoo.org>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Sam Ravnborg <sam@ravnborg.org>
Acked-by: Chris Metcalf <cmetcalf@tilera.com> [tile bits]
Acked-by: Richard Kuo <rkuo@codeaurora.org> [hexagon bits]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-04-30 22:27:17 +00:00
|
|
|
show_regs_print_info(KERN_DEFAULT);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
print_symbol("PC is at %s\n", instruction_pointer(regs));
|
|
|
|
print_symbol("LR is at %s\n", regs->ARM_lr);
|
2007-06-18 13:59:45 +00:00
|
|
|
printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
|
2005-04-16 22:20:36 +00:00
|
|
|
"sp : %08lx ip : %08lx fp : %08lx\n",
|
2007-06-18 13:59:45 +00:00
|
|
|
regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
|
|
|
|
regs->ARM_sp, regs->ARM_ip, regs->ARM_fp);
|
2005-04-16 22:20:36 +00:00
|
|
|
printk("r10: %08lx r9 : %08lx r8 : %08lx\n",
|
|
|
|
regs->ARM_r10, regs->ARM_r9,
|
|
|
|
regs->ARM_r8);
|
|
|
|
printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
|
|
|
|
regs->ARM_r7, regs->ARM_r6,
|
|
|
|
regs->ARM_r5, regs->ARM_r4);
|
|
|
|
printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
|
|
|
|
regs->ARM_r3, regs->ARM_r2,
|
|
|
|
regs->ARM_r1, regs->ARM_r0);
|
2007-06-18 13:59:45 +00:00
|
|
|
|
|
|
|
flags = regs->ARM_cpsr;
|
|
|
|
buf[0] = flags & PSR_N_BIT ? 'N' : 'n';
|
|
|
|
buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z';
|
|
|
|
buf[2] = flags & PSR_C_BIT ? 'C' : 'c';
|
|
|
|
buf[3] = flags & PSR_V_BIT ? 'V' : 'v';
|
|
|
|
buf[4] = '\0';
|
|
|
|
|
2013-12-16 09:38:57 +00:00
|
|
|
#ifndef CONFIG_CPU_V7M
|
2007-06-26 00:38:27 +00:00
|
|
|
printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
|
2007-06-18 13:59:45 +00:00
|
|
|
buf, interrupts_enabled(regs) ? "n" : "ff",
|
2005-04-16 22:20:36 +00:00
|
|
|
fast_interrupts_enabled(regs) ? "n" : "ff",
|
|
|
|
processor_modes[processor_mode(regs)],
|
2007-06-26 00:38:27 +00:00
|
|
|
isa_modes[isa_mode(regs)],
|
2005-04-16 22:20:36 +00:00
|
|
|
get_fs() == get_ds() ? "kernel" : "user");
|
2013-12-16 09:38:57 +00:00
|
|
|
#else
|
|
|
|
printk("xPSR: %08lx\n", regs->ARM_cpsr);
|
|
|
|
#endif
|
|
|
|
|
2007-06-18 13:59:45 +00:00
|
|
|
#ifdef CONFIG_CPU_CP15
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-09-26 08:36:37 +00:00
|
|
|
unsigned int ctrl;
|
2007-06-18 13:59:45 +00:00
|
|
|
|
|
|
|
buf[0] = '\0';
|
2006-09-26 08:36:37 +00:00
|
|
|
#ifdef CONFIG_CPU_CP15_MMU
|
2007-06-18 13:59:45 +00:00
|
|
|
{
|
|
|
|
unsigned int transbase, dac;
|
|
|
|
asm("mrc p15, 0, %0, c2, c0\n\t"
|
|
|
|
"mrc p15, 0, %1, c3, c0\n"
|
|
|
|
: "=r" (transbase), "=r" (dac));
|
|
|
|
snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
|
|
|
|
transbase, dac);
|
|
|
|
}
|
2006-09-26 08:36:37 +00:00
|
|
|
#endif
|
2007-06-18 13:59:45 +00:00
|
|
|
asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));
|
|
|
|
|
|
|
|
printk("Control: %08x%s\n", ctrl, buf);
|
|
|
|
}
|
2006-09-26 08:36:37 +00:00
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-04-17 14:50:36 +00:00
|
|
|
void show_regs(struct pt_regs * regs)
|
|
|
|
{
|
|
|
|
printk("\n");
|
|
|
|
__show_regs(regs);
|
2011-08-31 01:04:06 +00:00
|
|
|
dump_stack();
|
2005-04-17 14:50:36 +00:00
|
|
|
}
|
|
|
|
|
2009-12-18 14:34:43 +00:00
|
|
|
ATOMIC_NOTIFIER_HEAD(thread_notify_head);
|
|
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(thread_notify_head);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Free current thread data structures etc..
|
|
|
|
*/
|
|
|
|
void exit_thread(void)
|
|
|
|
{
|
2009-12-18 14:34:43 +00:00
|
|
|
thread_notify(THREAD_NOTIFY_EXIT, current_thread_info());
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void flush_thread(void)
|
|
|
|
{
|
|
|
|
struct thread_info *thread = current_thread_info();
|
|
|
|
struct task_struct *tsk = current;
|
|
|
|
|
2010-09-03 09:42:55 +00:00
|
|
|
flush_ptrace_hw_breakpoint(tsk);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
memset(thread->used_cp, 0, sizeof(thread->used_cp));
|
|
|
|
memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
|
2006-06-21 12:31:52 +00:00
|
|
|
memset(&thread->fpstate, 0, sizeof(union fp_state));
|
|
|
|
|
|
|
|
thread_notify(THREAD_NOTIFY_FLUSH, thread);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void release_thread(struct task_struct *dead_task)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
|
|
|
|
|
|
|
|
int
|
2009-04-02 23:56:59 +00:00
|
|
|
copy_thread(unsigned long clone_flags, unsigned long stack_start,
|
2012-10-23 02:51:14 +00:00
|
|
|
unsigned long stk_sz, struct task_struct *p)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-01-12 09:05:57 +00:00
|
|
|
struct thread_info *thread = task_thread_info(p);
|
|
|
|
struct pt_regs *childregs = task_pt_regs(p);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
|
2012-09-10 01:31:07 +00:00
|
|
|
|
2012-10-21 19:54:27 +00:00
|
|
|
if (likely(!(p->flags & PF_KTHREAD))) {
|
|
|
|
*childregs = *current_pt_regs();
|
2012-09-10 01:31:07 +00:00
|
|
|
childregs->ARM_r0 = 0;
|
2012-10-21 19:54:27 +00:00
|
|
|
if (stack_start)
|
|
|
|
childregs->ARM_sp = stack_start;
|
2012-09-10 01:31:07 +00:00
|
|
|
} else {
|
2012-10-11 02:23:29 +00:00
|
|
|
memset(childregs, 0, sizeof(struct pt_regs));
|
2012-09-10 01:31:07 +00:00
|
|
|
thread->cpu_context.r4 = stk_sz;
|
|
|
|
thread->cpu_context.r5 = stack_start;
|
|
|
|
childregs->ARM_cpsr = SVC_MODE;
|
|
|
|
}
|
2012-10-11 02:23:29 +00:00
|
|
|
thread->cpu_context.pc = (unsigned long)ret_from_fork;
|
2005-04-16 22:20:36 +00:00
|
|
|
thread->cpu_context.sp = (unsigned long)childregs;
|
|
|
|
|
2010-09-03 09:42:55 +00:00
|
|
|
clear_ptrace_hw_breakpoint(p);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (clone_flags & CLONE_SETTLS)
|
2013-06-18 22:23:26 +00:00
|
|
|
thread->tp_value[0] = childregs->ARM_r3;
|
|
|
|
thread->tp_value[1] = get_tpuser();
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-04-06 15:16:29 +00:00
|
|
|
thread_notify(THREAD_NOTIFY_COPY, thread);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-10-13 07:54:30 +00:00
|
|
|
/*
|
|
|
|
* Fill in the task's elfregs structure for a core dump.
|
|
|
|
*/
|
|
|
|
int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs)
|
|
|
|
{
|
|
|
|
elf_core_copy_regs(elfregs, task_pt_regs(t));
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* fill in the fpe structure for a core dump...
|
|
|
|
*/
|
|
|
|
int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
|
|
|
|
{
|
|
|
|
struct thread_info *thread = current_thread_info();
|
|
|
|
int used_math = thread->used_cp[1] | thread->used_cp[2];
|
|
|
|
|
|
|
|
if (used_math)
|
|
|
|
memcpy(fp, &thread->fpstate.soft, sizeof (*fp));
|
|
|
|
|
|
|
|
return used_math != 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dump_fpu);
|
|
|
|
|
|
|
|
unsigned long get_wchan(struct task_struct *p)
|
|
|
|
{
|
2009-02-11 12:07:53 +00:00
|
|
|
struct stackframe frame;
|
2013-12-05 13:21:36 +00:00
|
|
|
unsigned long stack_page;
|
2005-04-16 22:20:36 +00:00
|
|
|
int count = 0;
|
|
|
|
if (!p || p == current || p->state == TASK_RUNNING)
|
|
|
|
return 0;
|
|
|
|
|
2009-02-11 12:07:53 +00:00
|
|
|
frame.fp = thread_saved_fp(p);
|
|
|
|
frame.sp = thread_saved_sp(p);
|
|
|
|
frame.lr = 0; /* recovered from the stack */
|
|
|
|
frame.pc = thread_saved_pc(p);
|
2013-12-05 13:21:36 +00:00
|
|
|
stack_page = (unsigned long)task_stack_page(p);
|
2005-04-16 22:20:36 +00:00
|
|
|
do {
|
2013-12-05 13:21:36 +00:00
|
|
|
if (frame.sp < stack_page ||
|
|
|
|
frame.sp >= stack_page + THREAD_SIZE ||
|
|
|
|
unwind_frame(&frame) < 0)
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
2009-02-11 12:07:53 +00:00
|
|
|
if (!in_sched_functions(frame.pc))
|
|
|
|
return frame.pc;
|
2005-04-16 22:20:36 +00:00
|
|
|
} while (count ++ < 16);
|
|
|
|
return 0;
|
|
|
|
}
|
2010-06-14 20:27:19 +00:00
|
|
|
|
|
|
|
unsigned long arch_randomize_brk(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
unsigned long range_end = mm->brk + 0x02000000;
|
|
|
|
return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
|
|
|
|
}
|
2010-08-27 03:10:50 +00:00
|
|
|
|
2011-01-11 13:04:36 +00:00
|
|
|
#ifdef CONFIG_MMU
|
2013-07-31 20:58:56 +00:00
|
|
|
#ifdef CONFIG_KUSER_HELPERS
|
2010-08-27 03:10:50 +00:00
|
|
|
/*
|
|
|
|
* The vectors page is always readable from user space for the
|
2013-07-23 23:29:18 +00:00
|
|
|
* atomic helpers. Insert it into the gate_vma so that it is visible
|
|
|
|
* through ptrace and /proc/<pid>/mem.
|
2010-08-27 03:10:50 +00:00
|
|
|
*/
|
2013-02-23 17:55:39 +00:00
|
|
|
static struct vm_area_struct gate_vma = {
|
|
|
|
.vm_start = 0xffff0000,
|
|
|
|
.vm_end = 0xffff0000 + PAGE_SIZE,
|
|
|
|
.vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC,
|
|
|
|
};
|
2010-08-27 03:10:50 +00:00
|
|
|
|
2012-01-20 11:01:13 +00:00
|
|
|
static int __init gate_vma_init(void)
|
2010-08-27 03:10:50 +00:00
|
|
|
{
|
2013-02-23 17:55:39 +00:00
|
|
|
gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
|
2012-01-20 11:01:13 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
arch_initcall(gate_vma_init);
|
|
|
|
|
|
|
|
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
return &gate_vma;
|
|
|
|
}
|
|
|
|
|
|
|
|
int in_gate_area(struct mm_struct *mm, unsigned long addr)
|
|
|
|
{
|
|
|
|
return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end);
|
|
|
|
}
|
|
|
|
|
|
|
|
int in_gate_area_no_mm(unsigned long addr)
|
|
|
|
{
|
|
|
|
return in_gate_area(NULL, addr);
|
2010-08-27 03:10:50 +00:00
|
|
|
}
|
2013-08-06 08:49:14 +00:00
|
|
|
#define is_gate_vma(vma) ((vma) == &gate_vma)
|
2013-07-31 20:58:56 +00:00
|
|
|
#else
|
|
|
|
#define is_gate_vma(vma) 0
|
|
|
|
#endif
|
2010-08-27 03:10:50 +00:00
|
|
|
|
|
|
|
const char *arch_vma_name(struct vm_area_struct *vma)
|
|
|
|
{
|
2014-09-22 21:08:42 +00:00
|
|
|
return is_gate_vma(vma) ? "[vectors]" : NULL;
|
2013-07-23 23:29:18 +00:00
|
|
|
}
|
|
|
|
|
2013-08-03 09:30:05 +00:00
|
|
|
static struct page *signal_page;
|
2013-07-23 23:29:18 +00:00
|
|
|
extern struct page *get_signal_page(void);
|
|
|
|
|
2014-09-22 21:08:42 +00:00
|
|
|
static const struct vm_special_mapping sigpage_mapping = {
|
|
|
|
.name = "[sigpage]",
|
|
|
|
.pages = &signal_page,
|
|
|
|
};
|
|
|
|
|
2013-07-23 23:29:18 +00:00
|
|
|
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
|
|
|
{
|
|
|
|
struct mm_struct *mm = current->mm;
|
2014-09-22 21:08:42 +00:00
|
|
|
struct vm_area_struct *vma;
|
2013-07-23 23:29:18 +00:00
|
|
|
unsigned long addr;
|
2014-09-22 21:08:42 +00:00
|
|
|
int ret = 0;
|
2013-07-23 23:29:18 +00:00
|
|
|
|
2013-08-03 09:30:05 +00:00
|
|
|
if (!signal_page)
|
|
|
|
signal_page = get_signal_page();
|
|
|
|
if (!signal_page)
|
2013-07-23 23:29:18 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
down_write(&mm->mmap_sem);
|
|
|
|
addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
|
|
|
|
if (IS_ERR_VALUE(addr)) {
|
|
|
|
ret = addr;
|
|
|
|
goto up_fail;
|
|
|
|
}
|
|
|
|
|
2014-09-22 21:08:42 +00:00
|
|
|
vma = _install_special_mapping(mm, addr, PAGE_SIZE,
|
2013-07-23 23:29:18 +00:00
|
|
|
VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
|
2014-09-22 21:08:42 +00:00
|
|
|
&sigpage_mapping);
|
|
|
|
|
|
|
|
if (IS_ERR(vma)) {
|
|
|
|
ret = PTR_ERR(vma);
|
|
|
|
goto up_fail;
|
|
|
|
}
|
2013-07-23 23:29:18 +00:00
|
|
|
|
2014-09-22 21:08:42 +00:00
|
|
|
mm->context.sigpage = addr;
|
2013-07-23 23:29:18 +00:00
|
|
|
|
|
|
|
up_fail:
|
|
|
|
up_write(&mm->mmap_sem);
|
|
|
|
return ret;
|
2010-08-27 03:10:50 +00:00
|
|
|
}
|
2011-01-11 13:04:36 +00:00
|
|
|
#endif
|