2007-06-04 05:15:49 +00:00
|
|
|
/*
|
|
|
|
* Common signal handling code for both 32 and 64 bits
|
|
|
|
*
|
2016-02-24 18:51:11 +00:00
|
|
|
* Copyright (c) 2007 Benjamin Herrenschmidt, IBM Corporation
|
2007-06-04 05:15:49 +00:00
|
|
|
* Extracted from signal_32.c and signal_64.c
|
|
|
|
*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General
|
|
|
|
* Public License. See the file README.legal in the main directory of
|
|
|
|
* this archive for more details.
|
|
|
|
*/
|
|
|
|
|
2008-07-27 06:49:50 +00:00
|
|
|
#include <linux/tracehook.h>
|
2007-06-04 05:15:49 +00:00
|
|
|
#include <linux/signal.h>
|
2012-08-23 21:31:32 +00:00
|
|
|
#include <linux/uprobes.h>
|
2012-02-22 05:48:32 +00:00
|
|
|
#include <linux/key.h>
|
2013-05-13 16:16:42 +00:00
|
|
|
#include <linux/context_tracking.h>
|
2010-06-15 06:05:41 +00:00
|
|
|
#include <asm/hw_breakpoint.h>
|
2007-06-04 07:22:48 +00:00
|
|
|
#include <asm/uaccess.h>
|
2007-06-04 05:15:49 +00:00
|
|
|
#include <asm/unistd.h>
|
2012-03-28 17:30:02 +00:00
|
|
|
#include <asm/debug.h>
|
2013-05-26 18:09:41 +00:00
|
|
|
#include <asm/tm.h>
|
2007-06-04 05:15:49 +00:00
|
|
|
|
2007-06-04 05:15:51 +00:00
|
|
|
#include "signal.h"
|
|
|
|
|
2007-10-12 00:20:07 +00:00
|
|
|
/* Log an error when sending an unhandled signal to a process. Controlled
|
|
|
|
* through debug.exception-trace sysctl.
|
|
|
|
*/
|
|
|
|
|
2013-05-14 07:02:11 +00:00
|
|
|
int show_unhandled_signals = 1;
|
2007-10-12 00:20:07 +00:00
|
|
|
|
2007-06-04 07:22:48 +00:00
|
|
|
/*
|
|
|
|
* Allocate space for the signal frame
|
|
|
|
*/
|
2014-03-05 15:25:55 +00:00
|
|
|
void __user *get_sigframe(struct ksignal *ksig, unsigned long sp,
|
2009-03-25 06:23:59 +00:00
|
|
|
size_t frame_size, int is_32)
|
2007-06-04 07:22:48 +00:00
|
|
|
{
|
|
|
|
unsigned long oldsp, newsp;
|
|
|
|
|
|
|
|
/* Default to using normal stack */
|
2013-05-26 18:09:41 +00:00
|
|
|
oldsp = get_clean_sp(sp, is_32);
|
2014-03-05 15:25:55 +00:00
|
|
|
oldsp = sigsp(oldsp, ksig);
|
2007-06-04 07:22:48 +00:00
|
|
|
newsp = (oldsp - frame_size) & ~0xFUL;
|
|
|
|
|
|
|
|
/* Check access */
|
|
|
|
if (!access_ok(VERIFY_WRITE, (void __user *)newsp, oldsp - newsp))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return (void __user *)newsp;
|
|
|
|
}
|
|
|
|
|
2007-06-04 05:15:52 +00:00
|
|
|
static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
|
|
|
|
int has_handler)
|
2007-06-04 05:15:49 +00:00
|
|
|
{
|
|
|
|
unsigned long ret = regs->gpr[3];
|
|
|
|
int restart = 1;
|
|
|
|
|
|
|
|
/* syscall ? */
|
|
|
|
if (TRAP(regs) != 0x0C00)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* error signalled ? */
|
|
|
|
if (!(regs->ccr & 0x10000000))
|
|
|
|
return;
|
|
|
|
|
|
|
|
switch (ret) {
|
|
|
|
case ERESTART_RESTARTBLOCK:
|
|
|
|
case ERESTARTNOHAND:
|
|
|
|
/* ERESTARTNOHAND means that the syscall should only be
|
|
|
|
* restarted if there was no handler for the signal, and since
|
|
|
|
* we only get here if there is a handler, we dont restart.
|
|
|
|
*/
|
|
|
|
restart = !has_handler;
|
|
|
|
break;
|
|
|
|
case ERESTARTSYS:
|
|
|
|
/* ERESTARTSYS means to restart the syscall if there is no
|
|
|
|
* handler or the handler was registered with SA_RESTART
|
|
|
|
*/
|
|
|
|
restart = !has_handler || (ka->sa.sa_flags & SA_RESTART) != 0;
|
|
|
|
break;
|
|
|
|
case ERESTARTNOINTR:
|
|
|
|
/* ERESTARTNOINTR means that the syscall should be
|
|
|
|
* called again after the signal handler returns.
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (restart) {
|
|
|
|
if (ret == ERESTART_RESTARTBLOCK)
|
|
|
|
regs->gpr[0] = __NR_restart_syscall;
|
|
|
|
else
|
|
|
|
regs->gpr[3] = regs->orig_gpr3;
|
|
|
|
regs->nip -= 4;
|
|
|
|
regs->result = 0;
|
|
|
|
} else {
|
|
|
|
regs->result = -EINTR;
|
|
|
|
regs->gpr[3] = EINTR;
|
|
|
|
regs->ccr |= 0x10000000;
|
|
|
|
}
|
|
|
|
}
|
2007-06-04 05:15:50 +00:00
|
|
|
|
2014-03-02 13:46:11 +00:00
|
|
|
static void do_signal(struct pt_regs *regs)
|
2007-06-04 05:15:52 +00:00
|
|
|
{
|
2012-05-02 13:59:21 +00:00
|
|
|
sigset_t *oldset = sigmask_to_save();
|
2014-03-02 13:46:11 +00:00
|
|
|
struct ksignal ksig;
|
2007-06-04 05:15:52 +00:00
|
|
|
int ret;
|
|
|
|
int is32 = is_32bit_task();
|
|
|
|
|
2014-03-02 13:46:11 +00:00
|
|
|
get_signal(&ksig);
|
2007-06-04 05:15:52 +00:00
|
|
|
|
|
|
|
/* Is there any syscall restart business here ? */
|
2014-03-02 13:46:11 +00:00
|
|
|
check_syscall_restart(regs, &ksig.ka, ksig.sig > 0);
|
2007-06-04 05:15:52 +00:00
|
|
|
|
2014-03-02 13:46:11 +00:00
|
|
|
if (ksig.sig <= 0) {
|
2007-06-04 05:15:52 +00:00
|
|
|
/* No signal to deliver -- put the saved sigmask back */
|
2012-05-22 03:33:55 +00:00
|
|
|
restore_saved_sigmask();
|
2010-09-20 20:48:57 +00:00
|
|
|
regs->trap = 0;
|
2014-03-02 13:46:11 +00:00
|
|
|
return; /* no signals delivered */
|
2007-06-04 05:15:52 +00:00
|
|
|
}
|
|
|
|
|
2010-02-08 11:51:18 +00:00
|
|
|
#ifndef CONFIG_PPC_ADV_DEBUG_REGS
|
2007-06-04 05:15:52 +00:00
|
|
|
/*
|
|
|
|
* Reenable the DABR before delivering the signal to
|
|
|
|
* user space. The DABR will have been cleared if it
|
|
|
|
* triggered inside the kernel.
|
|
|
|
*/
|
2012-12-20 14:06:44 +00:00
|
|
|
if (current->thread.hw_brk.address &&
|
|
|
|
current->thread.hw_brk.type)
|
2014-04-29 19:25:17 +00:00
|
|
|
__set_breakpoint(¤t->thread.hw_brk);
|
2008-07-23 16:10:41 +00:00
|
|
|
#endif
|
2010-06-15 06:05:41 +00:00
|
|
|
/* Re-enable the breakpoints for the signal stack */
|
|
|
|
thread_change_pc(current, regs);
|
2007-06-04 05:15:52 +00:00
|
|
|
|
|
|
|
if (is32) {
|
2014-03-02 13:46:11 +00:00
|
|
|
if (ksig.ka.sa.sa_flags & SA_SIGINFO)
|
|
|
|
ret = handle_rt_signal32(&ksig, oldset, regs);
|
2007-06-04 05:15:52 +00:00
|
|
|
else
|
2014-03-02 13:46:11 +00:00
|
|
|
ret = handle_signal32(&ksig, oldset, regs);
|
2007-06-04 05:15:52 +00:00
|
|
|
} else {
|
2014-03-02 13:46:11 +00:00
|
|
|
ret = handle_rt_signal64(&ksig, oldset, regs);
|
2007-06-04 05:15:52 +00:00
|
|
|
}
|
|
|
|
|
2010-09-20 20:48:57 +00:00
|
|
|
regs->trap = 0;
|
2014-03-02 13:46:11 +00:00
|
|
|
signal_setup_done(ret, &ksig, test_thread_flag(TIF_SINGLESTEP));
|
2007-06-04 05:15:52 +00:00
|
|
|
}
|
|
|
|
|
2012-02-22 05:48:32 +00:00
|
|
|
void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
|
2008-07-27 06:52:52 +00:00
|
|
|
{
|
2013-05-13 16:16:42 +00:00
|
|
|
user_exit();
|
|
|
|
|
2012-10-28 17:17:11 +00:00
|
|
|
if (thread_info_flags & _TIF_UPROBE)
|
2012-08-23 21:31:32 +00:00
|
|
|
uprobe_notify_resume(regs);
|
|
|
|
|
2008-07-27 06:52:52 +00:00
|
|
|
if (thread_info_flags & _TIF_SIGPENDING)
|
2012-02-22 05:48:32 +00:00
|
|
|
do_signal(regs);
|
2008-07-27 06:52:52 +00:00
|
|
|
|
|
|
|
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
|
|
|
clear_thread_flag(TIF_NOTIFY_RESUME);
|
|
|
|
tracehook_notify_resume(regs);
|
|
|
|
}
|
2013-05-13 16:16:42 +00:00
|
|
|
|
|
|
|
user_enter();
|
2008-07-27 06:52:52 +00:00
|
|
|
}
|
2013-05-26 18:09:41 +00:00
|
|
|
|
|
|
|
unsigned long get_tm_stackpointer(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
/* When in an active transaction that takes a signal, we need to be
|
|
|
|
* careful with the stack. It's possible that the stack has moved back
|
|
|
|
* up after the tbegin. The obvious case here is when the tbegin is
|
|
|
|
* called inside a function that returns before a tend. In this case,
|
|
|
|
* the stack is part of the checkpointed transactional memory state.
|
|
|
|
* If we write over this non transactionally or in suspend, we are in
|
|
|
|
* trouble because if we get a tm abort, the program counter and stack
|
|
|
|
* pointer will be back at the tbegin but our in memory stack won't be
|
|
|
|
* valid anymore.
|
|
|
|
*
|
|
|
|
* To avoid this, when taking a signal in an active transaction, we
|
|
|
|
* need to use the stack pointer from the checkpointed state, rather
|
|
|
|
* than the speculated state. This ensures that the signal context
|
|
|
|
* (written tm suspended) will be written below the stack required for
|
2016-02-24 18:51:11 +00:00
|
|
|
* the rollback. The transaction is aborted because of the treclaim,
|
2013-05-26 18:09:41 +00:00
|
|
|
* so any memory written between the tbegin and the signal will be
|
|
|
|
* rolled back anyway.
|
|
|
|
*
|
|
|
|
* For signals taken in non-TM or suspended mode, we use the
|
|
|
|
* normal/non-checkpointed stack pointer.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
if (MSR_TM_ACTIVE(regs->msr)) {
|
powerpc: Don't corrupt transactional state when using FP/VMX in kernel
Currently, when we have a process using the transactional memory
facilities on POWER8 (that is, the processor is in transactional
or suspended state), and the process enters the kernel and the
kernel then uses the floating-point or vector (VMX/Altivec) facility,
we end up corrupting the user-visible FP/VMX/VSX state. This
happens, for example, if a page fault causes a copy-on-write
operation, because the copy_page function will use VMX to do the
copy on POWER8. The test program below demonstrates the bug.
The bug happens because when FP/VMX state for a transactional process
is stored in the thread_struct, we store the checkpointed state in
.fp_state/.vr_state and the transactional (current) state in
.transact_fp/.transact_vr. However, when the kernel wants to use
FP/VMX, it calls enable_kernel_fp() or enable_kernel_altivec(),
which saves the current state in .fp_state/.vr_state. Furthermore,
when we return to the user process we return with FP/VMX/VSX
disabled. The next time the process uses FP/VMX/VSX, we don't know
which set of state (the current register values, .fp_state/.vr_state,
or .transact_fp/.transact_vr) we should be using, since we have no
way to tell if we are still in the same transaction, and if not,
whether the previous transaction succeeded or failed.
Thus it is necessary to strictly adhere to the rule that if FP has
been enabled at any point in a transaction, we must keep FP enabled
for the user process with the current transactional state in the
FP registers, until we detect that it is no longer in a transaction.
Similarly for VMX; once enabled it must stay enabled until the
process is no longer transactional.
In order to keep this rule, we add a new thread_info flag which we
test when returning from the kernel to userspace, called TIF_RESTORE_TM.
This flag indicates that there is FP/VMX/VSX state to be restored
before entering userspace, and when it is set the .tm_orig_msr field
in the thread_struct indicates what state needs to be restored.
The restoration is done by restore_tm_state(). The TIF_RESTORE_TM
bit is set by new giveup_fpu/altivec_maybe_transactional helpers,
which are called from enable_kernel_fp/altivec, giveup_vsx, and
flush_fp/altivec_to_thread instead of giveup_fpu/altivec.
The other thing to be done is to get the transactional FP/VMX/VSX
state from .fp_state/.vr_state when doing reclaim, if that state
has been saved there by giveup_fpu/altivec_maybe_transactional.
Having done this, we set the FP/VMX bit in the thread's MSR after
reclaim to indicate that that part of the state is now valid
(having been reclaimed from the processor's checkpointed state).
Finally, in the signal handling code, we move the clearing of the
transactional state bits in the thread's MSR a bit earlier, before
calling flush_fp_to_thread(), so that we don't unnecessarily set
the TIF_RESTORE_TM bit.
This is the test program:
/* Michael Neuling 4/12/2013
*
* See if the altivec state is leaked out of an aborted transaction due to
* kernel vmx copy loops.
*
* gcc -m64 htm_vmxcopy.c -o htm_vmxcopy
*
*/
/* We don't use all of these, but for reference: */
int main(int argc, char *argv[])
{
long double vecin = 1.3;
long double vecout;
unsigned long pgsize = getpagesize();
int i;
int fd;
int size = pgsize*16;
char tmpfile[] = "/tmp/page_faultXXXXXX";
char buf[pgsize];
char *a;
uint64_t aborted = 0;
fd = mkstemp(tmpfile);
assert(fd >= 0);
memset(buf, 0, pgsize);
for (i = 0; i < size; i += pgsize)
assert(write(fd, buf, pgsize) == pgsize);
unlink(tmpfile);
a = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
assert(a != MAP_FAILED);
asm __volatile__(
"lxvd2x 40,0,%[vecinptr] ; " // set 40 to initial value
TBEGIN
"beq 3f ;"
TSUSPEND
"xxlxor 40,40,40 ; " // set 40 to 0
"std 5, 0(%[map]) ;" // cause kernel vmx copy page
TABORT
TRESUME
TEND
"li %[res], 0 ;"
"b 5f ;"
"3: ;" // Abort handler
"li %[res], 1 ;"
"5: ;"
"stxvd2x 40,0,%[vecoutptr] ; "
: [res]"=r"(aborted)
: [vecinptr]"r"(&vecin),
[vecoutptr]"r"(&vecout),
[map]"r"(a)
: "memory", "r0", "r3", "r4", "r5", "r6", "r7");
if (aborted && (vecin != vecout)){
printf("FAILED: vector state leaked on abort %f != %f\n",
(double)vecin, (double)vecout);
exit(1);
}
munmap(a, size);
close(fd);
printf("PASSED!\n");
return 0;
}
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2014-01-13 04:56:29 +00:00
|
|
|
tm_reclaim_current(TM_CAUSE_SIGNAL);
|
2013-05-26 18:09:41 +00:00
|
|
|
if (MSR_TM_TRANSACTIONAL(regs->msr))
|
|
|
|
return current->thread.ckpt_regs.gpr[1];
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return regs->gpr[1];
|
|
|
|
}
|