forked from Minki/linux
x86: Remove force_iret()
force_iret() was originally intended to prevent the return to user mode with the SYSRET or SYSEXIT instructions, in cases where the register state could have been changed to be incompatible with those instructions. The entry code has been significantly reworked since then, and register state is validated before SYSRET or SYSEXIT are used. force_iret() no longer serves its original purpose and can be eliminated. Signed-off-by: Brian Gerst <brgerst@gmail.com> Signed-off-by: Borislav Petkov <bp@suse.de> Acked-by: Oleg Nesterov <oleg@redhat.com> Link: https://lkml.kernel.org/r/20191219115812.102620-1-brgerst@gmail.com
This commit is contained in:
parent
f444a5ff95
commit
2b10906f2d
@ -114,8 +114,6 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
|
||||
|
||||
err |= fpu__restore_sig(buf, 1);
|
||||
|
||||
force_iret();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -339,22 +339,6 @@ static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs,
|
||||
|
||||
#define ARCH_HAS_USER_SINGLE_STEP_REPORT
|
||||
|
||||
/*
|
||||
* When hitting ptrace_stop(), we cannot return using SYSRET because
|
||||
* that does not restore the full CPU state, only a minimal set. The
|
||||
* ptracer can change arbitrary register values, which is usually okay
|
||||
* because the usual ptrace stops run off the signal delivery path which
|
||||
* forces IRET; however, ptrace_event() stops happen in arbitrary places
|
||||
* in the kernel and don't force IRET path.
|
||||
*
|
||||
* So force IRET path after a ptrace stop.
|
||||
*/
|
||||
#define arch_ptrace_stop_needed(code, info) \
|
||||
({ \
|
||||
force_iret(); \
|
||||
false; \
|
||||
})
|
||||
|
||||
struct user_desc;
|
||||
extern int do_get_thread_area(struct task_struct *p, int idx,
|
||||
struct user_desc __user *info);
|
||||
|
@ -239,15 +239,6 @@ static inline int arch_within_stack_frames(const void * const stack,
|
||||
current_thread_info()->status & TS_COMPAT)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Force syscall return via IRET by making it look as if there was
|
||||
* some work pending. IRET is our most capable (but slowest) syscall
|
||||
* return path, which is able to restore modified SS, CS and certain
|
||||
* EFLAGS values that other (fast) syscall return instructions
|
||||
* are not able to restore properly.
|
||||
*/
|
||||
#define force_iret() set_thread_flag(TIF_NOTIFY_RESUME)
|
||||
|
||||
extern void arch_task_cache_init(void);
|
||||
extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
|
||||
extern void arch_release_task_struct(struct task_struct *tsk);
|
||||
|
@ -124,7 +124,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
|
||||
regs->ip = new_ip;
|
||||
regs->sp = new_sp;
|
||||
regs->flags = X86_EFLAGS_IF;
|
||||
force_iret();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(start_thread);
|
||||
|
||||
|
@ -394,7 +394,6 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip,
|
||||
regs->cs = _cs;
|
||||
regs->ss = _ss;
|
||||
regs->flags = X86_EFLAGS_IF;
|
||||
force_iret();
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -151,8 +151,6 @@ static int restore_sigcontext(struct pt_regs *regs,
|
||||
|
||||
err |= fpu__restore_sig(buf, IS_ENABLED(CONFIG_X86_32));
|
||||
|
||||
force_iret();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -381,7 +381,6 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
|
||||
mark_screen_rdonly(tsk->mm);
|
||||
|
||||
memcpy((struct kernel_vm86_regs *)regs, &vm86regs, sizeof(vm86regs));
|
||||
force_iret();
|
||||
return regs->ax;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user