mirror of
https://github.com/torvalds/linux.git
synced 2024-12-02 09:01:34 +00:00
bf904d2762
The SYSCALL64 trampoline has a couple of nice properties: - The usual sequence of SWAPGS followed by two GS-relative accesses to set up RSP is somewhat slow because the GS-relative accesses need to wait for SWAPGS to finish. The trampoline approach allows RIP-relative accesses to set up RSP, which avoids the stall. - The trampoline avoids any percpu access before CR3 is set up, which means that no percpu memory needs to be mapped in the user page tables. This prevents using Meltdown to read any percpu memory outside the cpu_entry_area and prevents using timing leaks to directly locate the percpu areas. The downsides of using a trampoline may outweigh the upsides, however. It adds an extra non-contiguous I$ cache line to system calls, and it forces an indirect jump to transfer control back to the normal kernel text after CR3 is set up. The latter is because x86 lacks a 64-bit direct jump instruction that could jump from the trampoline to the entry text. With retpolines enabled, the indirect jump is extremely slow. Change the code to map the percpu TSS into the user page tables to allow the non-trampoline SYSCALL64 path to work under PTI. This does not add a new direct information leak, since the TSS is readable by Meltdown from the cpu_entry_area alias regardless. It does allow a timing attack to locate the percpu area, but KASLR is more or less a lost cause against local attack on CPUs vulnerable to Meltdown regardless. As far as I'm concerned, on current hardware, KASLR is only useful to mitigate remote attacks that try to attack the kernel without first gaining RCE against a vulnerable user process. On Skylake, with CONFIG_RETPOLINE=y and KPTI on, this reduces syscall overhead from ~237ns to ~228ns. There is a possible alternative approach: Move the trampoline within 2G of the entry text and make a separate copy for each CPU. This would allow a direct jump to rejoin the normal entry path. There are pro's and con's for this approach: + It avoids a pipeline stall - It executes from an extra page and read from another extra page during the syscall. The latter is because it needs to use a relative addressing mode to find sp1 -- it's the same *cacheline*, but accessed using an alias, so it's an extra TLB entry. - Slightly more memory. This would be one page per CPU for a simple implementation and 64-ish bytes per CPU or one page per node for a more complex implementation. - More code complexity. The current approach is chosen for simplicity and because the alternative does not provide a significant benefit, which makes it worth. [ tglx: Added the alternative discussion to the changelog ] Signed-off-by: Andy Lutomirski <luto@kernel.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Borislav Petkov <bp@suse.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Joerg Roedel <joro@8bytes.org> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Link: https://lkml.kernel.org/r/8c7c6e483612c3e4e10ca89495dc160b1aa66878.1536015544.git.luto@kernel.org
111 lines
3.4 KiB
C
111 lines
3.4 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Generate definitions needed by assembly language modules.
|
|
* This code generates raw asm output which is post-processed to extract
|
|
* and format the required data.
|
|
*/
|
|
#define COMPILE_OFFSETS
|
|
|
|
#include <linux/crypto.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/stddef.h>
|
|
#include <linux/hardirq.h>
|
|
#include <linux/suspend.h>
|
|
#include <linux/kbuild.h>
|
|
#include <asm/processor.h>
|
|
#include <asm/thread_info.h>
|
|
#include <asm/sigframe.h>
|
|
#include <asm/bootparam.h>
|
|
#include <asm/suspend.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
#ifdef CONFIG_XEN
|
|
#include <xen/interface/xen.h>
|
|
#endif
|
|
|
|
#ifdef CONFIG_X86_32
|
|
# include "asm-offsets_32.c"
|
|
#else
|
|
# include "asm-offsets_64.c"
|
|
#endif
|
|
|
|
void common(void) {
|
|
BLANK();
|
|
OFFSET(TASK_threadsp, task_struct, thread.sp);
|
|
#ifdef CONFIG_STACKPROTECTOR
|
|
OFFSET(TASK_stack_canary, task_struct, stack_canary);
|
|
#endif
|
|
|
|
BLANK();
|
|
OFFSET(TASK_TI_flags, task_struct, thread_info.flags);
|
|
OFFSET(TASK_addr_limit, task_struct, thread.addr_limit);
|
|
|
|
BLANK();
|
|
OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
|
|
|
|
BLANK();
|
|
OFFSET(pbe_address, pbe, address);
|
|
OFFSET(pbe_orig_address, pbe, orig_address);
|
|
OFFSET(pbe_next, pbe, next);
|
|
|
|
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
|
|
BLANK();
|
|
OFFSET(IA32_SIGCONTEXT_ax, sigcontext_32, ax);
|
|
OFFSET(IA32_SIGCONTEXT_bx, sigcontext_32, bx);
|
|
OFFSET(IA32_SIGCONTEXT_cx, sigcontext_32, cx);
|
|
OFFSET(IA32_SIGCONTEXT_dx, sigcontext_32, dx);
|
|
OFFSET(IA32_SIGCONTEXT_si, sigcontext_32, si);
|
|
OFFSET(IA32_SIGCONTEXT_di, sigcontext_32, di);
|
|
OFFSET(IA32_SIGCONTEXT_bp, sigcontext_32, bp);
|
|
OFFSET(IA32_SIGCONTEXT_sp, sigcontext_32, sp);
|
|
OFFSET(IA32_SIGCONTEXT_ip, sigcontext_32, ip);
|
|
|
|
BLANK();
|
|
OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe_ia32, uc.uc_mcontext);
|
|
#endif
|
|
|
|
#ifdef CONFIG_PARAVIRT
|
|
BLANK();
|
|
OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops);
|
|
OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops);
|
|
OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
|
|
OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
|
|
OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
|
|
OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
|
|
OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
|
|
#endif
|
|
|
|
#ifdef CONFIG_XEN
|
|
BLANK();
|
|
OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
|
|
OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending);
|
|
#endif
|
|
|
|
BLANK();
|
|
OFFSET(BP_scratch, boot_params, scratch);
|
|
OFFSET(BP_secure_boot, boot_params, secure_boot);
|
|
OFFSET(BP_loadflags, boot_params, hdr.loadflags);
|
|
OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch);
|
|
OFFSET(BP_version, boot_params, hdr.version);
|
|
OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment);
|
|
OFFSET(BP_init_size, boot_params, hdr.init_size);
|
|
OFFSET(BP_pref_address, boot_params, hdr.pref_address);
|
|
OFFSET(BP_code32_start, boot_params, hdr.code32_start);
|
|
|
|
BLANK();
|
|
DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
|
|
|
|
/* TLB state for the entry code */
|
|
OFFSET(TLB_STATE_user_pcid_flush_mask, tlb_state, user_pcid_flush_mask);
|
|
|
|
/* Layout info for cpu_entry_area */
|
|
OFFSET(CPU_ENTRY_AREA_entry_stack, cpu_entry_area, entry_stack_page);
|
|
DEFINE(SIZEOF_entry_stack, sizeof(struct entry_stack));
|
|
DEFINE(MASK_entry_stack, (~(sizeof(struct entry_stack) - 1)));
|
|
|
|
/* Offset for fields in tss_struct */
|
|
OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
|
|
OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
|
|
OFFSET(TSS_sp2, tss_struct, x86_tss.sp2);
|
|
}
|