mirror of
https://github.com/torvalds/linux.git
synced 2024-12-26 04:42:12 +00:00
c02433dd6d
This patch moves arm64's struct thread_info from the task stack into task_struct. This protects thread_info from corruption in the case of stack overflows, and makes its address harder to determine if stack addresses are leaked, making a number of attacks more difficult. Precise detection and handling of overflow is left for subsequent patches. Largely, this involves changing code to store the task_struct in sp_el0, and acquire the thread_info from the task struct. Core code now implements current_thread_info(), and as noted in <linux/sched.h> this relies on offsetof(task_struct, thread_info) == 0, enforced by core code. This change means that the 'tsk' register used in entry.S now points to a task_struct, rather than a thread_info as it used to. To make this clear, the TI_* field offsets are renamed to TSK_TI_*, with asm-offsets appropriately updated to account for the structural change. Userspace clobbers sp_el0, and we can no longer restore this from the stack. Instead, the current task is cached in a per-cpu variable that we can safely access from early assembly as interrupts are disabled (and we are thus not preemptible). Both secondary entry and idle are updated to stash the sp and task pointer separately. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Tested-by: Laura Abbott <labbott@redhat.com> Cc: AKASHI Takahiro <takahiro.akashi@linaro.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org> Cc: James Morse <james.morse@arm.com> Cc: Kees Cook <keescook@chromium.org> Cc: Suzuki K Poulose <suzuki.poulose@arm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
152 lines
4.0 KiB
C
152 lines
4.0 KiB
C
/*
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
#ifndef __ASM_SMP_H
|
|
#define __ASM_SMP_H
|
|
|
|
/* Values for secondary_data.status */
|
|
|
|
#define CPU_MMU_OFF (-1)
|
|
#define CPU_BOOT_SUCCESS (0)
|
|
/* The cpu invoked ops->cpu_die, synchronise it with cpu_kill */
|
|
#define CPU_KILL_ME (1)
|
|
/* The cpu couldn't die gracefully and is looping in the kernel */
|
|
#define CPU_STUCK_IN_KERNEL (2)
|
|
/* Fatal system error detected by secondary CPU, crash the system */
|
|
#define CPU_PANIC_KERNEL (3)
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#include <asm/percpu.h>
|
|
|
|
#include <linux/threads.h>
|
|
#include <linux/cpumask.h>
|
|
#include <linux/thread_info.h>
|
|
|
|
DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
|
|
|
|
/*
|
|
* We don't use this_cpu_read(cpu_number) as that has implicit writes to
|
|
* preempt_count, and associated (compiler) barriers, that we'd like to avoid
|
|
* the expense of. If we're preemptible, the value can be stale at use anyway.
|
|
*/
|
|
#define raw_smp_processor_id() (*this_cpu_ptr(&cpu_number))
|
|
|
|
struct seq_file;
|
|
|
|
/*
|
|
* generate IPI list text
|
|
*/
|
|
extern void show_ipi_list(struct seq_file *p, int prec);
|
|
|
|
/*
|
|
* Called from C code, this handles an IPI.
|
|
*/
|
|
extern void handle_IPI(int ipinr, struct pt_regs *regs);
|
|
|
|
/*
|
|
* Discover the set of possible CPUs and determine their
|
|
* SMP operations.
|
|
*/
|
|
extern void smp_init_cpus(void);
|
|
|
|
/*
|
|
* Provide a function to raise an IPI cross call on CPUs in callmap.
|
|
*/
|
|
extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));
|
|
|
|
extern void (*__smp_cross_call)(const struct cpumask *, unsigned int);
|
|
|
|
/*
|
|
* Called from the secondary holding pen, this is the secondary CPU entry point.
|
|
*/
|
|
asmlinkage void secondary_start_kernel(void);
|
|
|
|
/*
|
|
* Initial data for bringing up a secondary CPU.
|
|
* @stack - sp for the secondary CPU
|
|
* @status - Result passed back from the secondary CPU to
|
|
* indicate failure.
|
|
*/
|
|
struct secondary_data {
|
|
void *stack;
|
|
struct task_struct *task;
|
|
long status;
|
|
};
|
|
|
|
extern struct secondary_data secondary_data;
|
|
extern long __early_cpu_boot_status;
|
|
extern void secondary_entry(void);
|
|
|
|
extern void arch_send_call_function_single_ipi(int cpu);
|
|
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
|
|
|
#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
|
|
extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
|
|
#else
|
|
static inline void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
|
|
{
|
|
BUILD_BUG();
|
|
}
|
|
#endif
|
|
|
|
extern int __cpu_disable(void);
|
|
|
|
extern void __cpu_die(unsigned int cpu);
|
|
extern void cpu_die(void);
|
|
extern void cpu_die_early(void);
|
|
|
|
static inline void cpu_park_loop(void)
|
|
{
|
|
for (;;) {
|
|
wfe();
|
|
wfi();
|
|
}
|
|
}
|
|
|
|
static inline void update_cpu_boot_status(int val)
|
|
{
|
|
WRITE_ONCE(secondary_data.status, val);
|
|
/* Ensure the visibility of the status update */
|
|
dsb(ishst);
|
|
}
|
|
|
|
/*
|
|
* The calling secondary CPU has detected serious configuration mismatch,
|
|
* which calls for a kernel panic. Update the boot status and park the calling
|
|
* CPU.
|
|
*/
|
|
static inline void cpu_panic_kernel(void)
|
|
{
|
|
update_cpu_boot_status(CPU_PANIC_KERNEL);
|
|
cpu_park_loop();
|
|
}
|
|
|
|
/*
|
|
* If a secondary CPU enters the kernel but fails to come online,
|
|
* (e.g. due to mismatched features), and cannot exit the kernel,
|
|
* we increment cpus_stuck_in_kernel and leave the CPU in a
|
|
* quiesecent loop within the kernel text. The memory containing
|
|
* this loop must not be re-used for anything else as the 'stuck'
|
|
* core is executing it.
|
|
*
|
|
* This function is used to inhibit features like kexec and hibernate.
|
|
*/
|
|
bool cpus_are_stuck_in_kernel(void);
|
|
|
|
#endif /* ifndef __ASSEMBLY__ */
|
|
|
|
#endif /* ifndef __ASM_SMP_H */
|