forked from Minki/linux
signal: protect SIGNAL_UNKILLABLE from unintentional clearing.
Since commit 00cd5c37af
("ptrace: permit ptracing of /sbin/init") we
can now trace init processes. init is initially protected with
SIGNAL_UNKILLABLE which will prevent fatal signals such as SIGSTOP, but
there are a number of paths during tracing where SIGNAL_UNKILLABLE can
be implicitly cleared.
This can result in init becoming stoppable/killable after tracing. For
example, running:
while true; do kill -STOP 1; done &
strace -p 1
and then stopping strace and the kill loop will result in init being
left in state TASK_STOPPED. Sending SIGCONT to init will resume it, but
init will now respond to future SIGSTOP signals rather than ignoring
them.
Make sure that when setting SIGNAL_STOP_CONTINUED/SIGNAL_STOP_STOPPED
that we don't clear SIGNAL_UNKILLABLE.
Link: http://lkml.kernel.org/r/20170104122017.25047-1-jamie.iles@oracle.com
Signed-off-by: Jamie Iles <jamie.iles@oracle.com>
Acked-by: Oleg Nesterov <oleg@redhat.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
20f664aabe
commit
2d39b3cd34
@ -854,6 +854,16 @@ struct signal_struct {
|
|||||||
|
|
||||||
#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
|
#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
|
||||||
|
|
||||||
|
#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
|
||||||
|
SIGNAL_STOP_CONTINUED)
|
||||||
|
|
||||||
|
static inline void signal_set_stop_flags(struct signal_struct *sig,
|
||||||
|
unsigned int flags)
|
||||||
|
{
|
||||||
|
WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
|
||||||
|
sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
|
||||||
|
}
|
||||||
|
|
||||||
/* If true, all threads except ->group_exit_task have pending SIGKILL */
|
/* If true, all threads except ->group_exit_task have pending SIGKILL */
|
||||||
static inline int signal_group_exit(const struct signal_struct *sig)
|
static inline int signal_group_exit(const struct signal_struct *sig)
|
||||||
{
|
{
|
||||||
|
@ -346,7 +346,7 @@ static bool task_participate_group_stop(struct task_struct *task)
|
|||||||
* fresh group stop. Read comment in do_signal_stop() for details.
|
* fresh group stop. Read comment in do_signal_stop() for details.
|
||||||
*/
|
*/
|
||||||
if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
|
if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
|
||||||
sig->flags = SIGNAL_STOP_STOPPED;
|
signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
@ -843,7 +843,7 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
|
|||||||
* will take ->siglock, notice SIGNAL_CLD_MASK, and
|
* will take ->siglock, notice SIGNAL_CLD_MASK, and
|
||||||
* notify its parent. See get_signal_to_deliver().
|
* notify its parent. See get_signal_to_deliver().
|
||||||
*/
|
*/
|
||||||
signal->flags = why | SIGNAL_STOP_CONTINUED;
|
signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
|
||||||
signal->group_stop_count = 0;
|
signal->group_stop_count = 0;
|
||||||
signal->group_exit_code = 0;
|
signal->group_exit_code = 0;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user