mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 14:12:06 +00:00
3fb0fdb3bb
On 32-bit kernels, the stackprotector canary is quite nasty -- it is stored at %gs:(20), which is nasty because 32-bit kernels use %fs for percpu storage. It's even nastier because it means that whether %gs contains userspace state or kernel state while running kernel code depends on whether stackprotector is enabled (this is CONFIG_X86_32_LAZY_GS), and this setting radically changes the way that segment selectors work. Supporting both variants is a maintenance and testing mess. Merely rearranging so that percpu and the stack canary share the same segment would be messy as the 32-bit percpu address layout isn't currently compatible with putting a variable at a fixed offset. Fortunately, GCC 8.1 added options that allow the stack canary to be accessed as %fs:__stack_chk_guard, effectively turning it into an ordinary percpu variable. This lets us get rid of all of the code to manage the stack canary GDT descriptor and the CONFIG_X86_32_LAZY_GS mess. (That name is special. We could use any symbol we want for the %fs-relative mode, but for CONFIG_SMP=n, gcc refuses to let us use any name other than __stack_chk_guard.) Forcibly disable stackprotector on older compilers that don't support the new options and turn the stack canary into a percpu variable. The "lazy GS" approach is now used for all 32-bit configurations. Also makes load_gs_index() work on 32-bit kernels. On 64-bit kernels, it loads the GS selector and updates the user GSBASE accordingly. (This is unchanged.) On 32-bit kernels, it loads the GS selector and updates GSBASE, which is now always the user base. This means that the overall effect is the same on 32-bit and 64-bit, which avoids some ifdeffery. [ bp: Massage commit message. ] Signed-off-by: Andy Lutomirski <luto@kernel.org> Signed-off-by: Borislav Petkov <bp@suse.de> Link: https://lkml.kernel.org/r/c0ff7dba14041c7e5d1cae5d4df052f03759bef3.1613243844.git.luto@kernel.org
59 lines
1.8 KiB
C
59 lines
1.8 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#ifndef __LINUX_KBUILD_H
|
|
# error "Please do not build this file directly, build asm-offsets.c instead"
|
|
#endif
|
|
|
|
#include <linux/efi.h>
|
|
|
|
#include <asm/ucontext.h>
|
|
|
|
/* workaround for a warning with -Wmissing-prototypes */
|
|
void foo(void);
|
|
|
|
void foo(void)
|
|
{
|
|
OFFSET(CPUINFO_x86, cpuinfo_x86, x86);
|
|
OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor);
|
|
OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model);
|
|
OFFSET(CPUINFO_x86_stepping, cpuinfo_x86, x86_stepping);
|
|
OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level);
|
|
OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability);
|
|
OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
|
|
BLANK();
|
|
|
|
OFFSET(PT_EBX, pt_regs, bx);
|
|
OFFSET(PT_ECX, pt_regs, cx);
|
|
OFFSET(PT_EDX, pt_regs, dx);
|
|
OFFSET(PT_ESI, pt_regs, si);
|
|
OFFSET(PT_EDI, pt_regs, di);
|
|
OFFSET(PT_EBP, pt_regs, bp);
|
|
OFFSET(PT_EAX, pt_regs, ax);
|
|
OFFSET(PT_DS, pt_regs, ds);
|
|
OFFSET(PT_ES, pt_regs, es);
|
|
OFFSET(PT_FS, pt_regs, fs);
|
|
OFFSET(PT_GS, pt_regs, gs);
|
|
OFFSET(PT_ORIG_EAX, pt_regs, orig_ax);
|
|
OFFSET(PT_EIP, pt_regs, ip);
|
|
OFFSET(PT_CS, pt_regs, cs);
|
|
OFFSET(PT_EFLAGS, pt_regs, flags);
|
|
OFFSET(PT_OLDESP, pt_regs, sp);
|
|
OFFSET(PT_OLDSS, pt_regs, ss);
|
|
BLANK();
|
|
|
|
OFFSET(saved_context_gdt_desc, saved_context, gdt_desc);
|
|
BLANK();
|
|
|
|
/*
|
|
* Offset from the entry stack to task stack stored in TSS. Kernel entry
|
|
* happens on the per-cpu entry-stack, and the asm code switches to the
|
|
* task-stack pointer stored in x86_tss.sp1, which is a copy of
|
|
* task->thread.sp0 where entry code can find it.
|
|
*/
|
|
DEFINE(TSS_entry2task_stack,
|
|
offsetof(struct cpu_entry_area, tss.x86_tss.sp1) -
|
|
offsetofend(struct cpu_entry_area, entry_stack_page.stack));
|
|
|
|
BLANK();
|
|
DEFINE(EFI_svam, offsetof(efi_runtime_services_t, set_virtual_address_map));
|
|
}
|