Merge tag 'v5.12-rc3' into x86/core
Pick up dependent SEV-ES urgent changes to base new work ontop. Signed-off-by: Borislav Petkov <bp@suse.de>
This commit is contained in:
@@ -268,21 +268,20 @@ static void __init kvmclock_init_mem(void)
|
||||
|
||||
static int __init kvm_setup_vsyscall_timeinfo(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
u8 flags;
|
||||
|
||||
if (!per_cpu(hv_clock_per_cpu, 0) || !kvmclock_vsyscall)
|
||||
return 0;
|
||||
|
||||
flags = pvclock_read_flags(&hv_clock_boot[0].pvti);
|
||||
if (!(flags & PVCLOCK_TSC_STABLE_BIT))
|
||||
return 0;
|
||||
|
||||
kvm_clock.vdso_clock_mode = VDSO_CLOCKMODE_PVCLOCK;
|
||||
#endif
|
||||
|
||||
kvmclock_init_mem();
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
if (per_cpu(hv_clock_per_cpu, 0) && kvmclock_vsyscall) {
|
||||
u8 flags;
|
||||
|
||||
flags = pvclock_read_flags(&hv_clock_boot[0].pvti);
|
||||
if (!(flags & PVCLOCK_TSC_STABLE_BIT))
|
||||
return 0;
|
||||
|
||||
kvm_clock.vdso_clock_mode = VDSO_CLOCKMODE_PVCLOCK;
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_initcall(kvm_setup_vsyscall_timeinfo);
|
||||
|
||||
@@ -121,8 +121,18 @@ static void __init setup_vc_stacks(int cpu)
|
||||
cea_set_pte((void *)vaddr, pa, PAGE_KERNEL);
|
||||
}
|
||||
|
||||
static __always_inline bool on_vc_stack(unsigned long sp)
|
||||
static __always_inline bool on_vc_stack(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long sp = regs->sp;
|
||||
|
||||
/* User-mode RSP is not trusted */
|
||||
if (user_mode(regs))
|
||||
return false;
|
||||
|
||||
/* SYSCALL gap still has user-mode RSP */
|
||||
if (ip_within_syscall_gap(regs))
|
||||
return false;
|
||||
|
||||
return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));
|
||||
}
|
||||
|
||||
@@ -144,7 +154,7 @@ void noinstr __sev_es_ist_enter(struct pt_regs *regs)
|
||||
old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
|
||||
|
||||
/* Make room on the IST stack */
|
||||
if (on_vc_stack(regs->sp))
|
||||
if (on_vc_stack(regs))
|
||||
new_ist = ALIGN_DOWN(regs->sp, 8) - sizeof(old_ist);
|
||||
else
|
||||
new_ist = old_ist - sizeof(old_ist);
|
||||
@@ -248,7 +258,7 @@ static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
|
||||
int res;
|
||||
|
||||
if (user_mode(ctxt->regs)) {
|
||||
res = insn_fetch_from_user(ctxt->regs, buffer);
|
||||
res = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
|
||||
if (!res) {
|
||||
ctxt->fi.vector = X86_TRAP_PF;
|
||||
ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
|
||||
@@ -1248,13 +1258,12 @@ static __always_inline bool on_vc_fallback_stack(struct pt_regs *regs)
|
||||
DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
|
||||
{
|
||||
struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
|
||||
irqentry_state_t irq_state;
|
||||
struct ghcb_state state;
|
||||
struct es_em_ctxt ctxt;
|
||||
enum es_result result;
|
||||
struct ghcb *ghcb;
|
||||
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
/*
|
||||
* Handle #DB before calling into !noinstr code to avoid recursive #DB.
|
||||
*/
|
||||
@@ -1263,6 +1272,8 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
|
||||
return;
|
||||
}
|
||||
|
||||
irq_state = irqentry_nmi_enter(regs);
|
||||
lockdep_assert_irqs_disabled();
|
||||
instrumentation_begin();
|
||||
|
||||
/*
|
||||
@@ -1325,6 +1336,7 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
|
||||
|
||||
out:
|
||||
instrumentation_end();
|
||||
irqentry_nmi_exit(regs, irq_state);
|
||||
|
||||
return;
|
||||
|
||||
|
||||
@@ -694,8 +694,7 @@ asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *r
|
||||
* In the SYSCALL entry path the RSP value comes from user-space - don't
|
||||
* trust it and switch to the current kernel stack
|
||||
*/
|
||||
if (regs->ip >= (unsigned long)entry_SYSCALL_64 &&
|
||||
regs->ip < (unsigned long)entry_SYSCALL_64_safe_stack) {
|
||||
if (ip_within_syscall_gap(regs)) {
|
||||
sp = this_cpu_read(cpu_current_top_of_stack);
|
||||
goto sync;
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
|
||||
#define orc_warn_current(args...) \
|
||||
({ \
|
||||
if (state->task == current) \
|
||||
if (state->task == current && !state->error) \
|
||||
orc_warn(args); \
|
||||
})
|
||||
|
||||
@@ -367,8 +367,8 @@ static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
|
||||
if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
|
||||
return false;
|
||||
|
||||
*ip = regs->ip;
|
||||
*sp = regs->sp;
|
||||
*ip = READ_ONCE_NOCHECK(regs->ip);
|
||||
*sp = READ_ONCE_NOCHECK(regs->sp);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -380,8 +380,8 @@ static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr
|
||||
if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
|
||||
return false;
|
||||
|
||||
*ip = regs->ip;
|
||||
*sp = regs->sp;
|
||||
*ip = READ_ONCE_NOCHECK(regs->ip);
|
||||
*sp = READ_ONCE_NOCHECK(regs->sp);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -402,12 +402,12 @@ static bool get_reg(struct unwind_state *state, unsigned int reg_off,
|
||||
return false;
|
||||
|
||||
if (state->full_regs) {
|
||||
*val = ((unsigned long *)state->regs)[reg];
|
||||
*val = READ_ONCE_NOCHECK(((unsigned long *)state->regs)[reg]);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (state->prev_regs) {
|
||||
*val = ((unsigned long *)state->prev_regs)[reg];
|
||||
*val = READ_ONCE_NOCHECK(((unsigned long *)state->prev_regs)[reg]);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user