exit: Factor coredump_exit_mm out of exit_mm
Separate the coredump logic from the ordinary exit_mm logic by moving the coredump logic out of exit_mm into it's own function coredump_exit_mm. Link: https://lkml.kernel.org/r/87a6k2x277.fsf@disp2133 Reviewed-by: Kees Cook <keescook@chromium.org> Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
This commit is contained in:
@@ -404,8 +404,8 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
|
|||||||
*
|
*
|
||||||
* do_exit:
|
* do_exit:
|
||||||
* The caller holds mm->mmap_lock. This means that the task which
|
* The caller holds mm->mmap_lock. This means that the task which
|
||||||
* uses this mm can't pass exit_mm(), so it can't exit or clear
|
* uses this mm can't pass coredump_exit_mm(), so it can't exit or
|
||||||
* its ->mm.
|
* clear its ->mm.
|
||||||
*
|
*
|
||||||
* de_thread:
|
* de_thread:
|
||||||
* It does list_replace_rcu(&leader->tasks, ¤t->tasks),
|
* It does list_replace_rcu(&leader->tasks, ¤t->tasks),
|
||||||
@@ -500,7 +500,7 @@ static void coredump_finish(struct mm_struct *mm, bool core_dumped)
|
|||||||
next = curr->next;
|
next = curr->next;
|
||||||
task = curr->task;
|
task = curr->task;
|
||||||
/*
|
/*
|
||||||
* see exit_mm(), curr->task must not see
|
* see coredump_exit_mm(), curr->task must not see
|
||||||
* ->task == NULL before we read ->next.
|
* ->task == NULL before we read ->next.
|
||||||
*/
|
*/
|
||||||
smp_mb();
|
smp_mb();
|
||||||
|
|||||||
@@ -339,6 +339,46 @@ kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void coredump_exit_mm(struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
struct core_state *core_state;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Serialize with any possible pending coredump.
|
||||||
|
* We must hold mmap_lock around checking core_state
|
||||||
|
* and clearing tsk->mm. The core-inducing thread
|
||||||
|
* will increment ->nr_threads for each thread in the
|
||||||
|
* group with ->mm != NULL.
|
||||||
|
*/
|
||||||
|
core_state = mm->core_state;
|
||||||
|
if (core_state) {
|
||||||
|
struct core_thread self;
|
||||||
|
|
||||||
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
|
self.task = current;
|
||||||
|
if (self.task->flags & PF_SIGNALED)
|
||||||
|
self.next = xchg(&core_state->dumper.next, &self);
|
||||||
|
else
|
||||||
|
self.task = NULL;
|
||||||
|
/*
|
||||||
|
* Implies mb(), the result of xchg() must be visible
|
||||||
|
* to core_state->dumper.
|
||||||
|
*/
|
||||||
|
if (atomic_dec_and_test(&core_state->nr_threads))
|
||||||
|
complete(&core_state->startup);
|
||||||
|
|
||||||
|
for (;;) {
|
||||||
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||||
|
if (!self.task) /* see coredump_finish() */
|
||||||
|
break;
|
||||||
|
freezable_schedule();
|
||||||
|
}
|
||||||
|
__set_current_state(TASK_RUNNING);
|
||||||
|
mmap_read_lock(mm);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG
|
#ifdef CONFIG_MEMCG
|
||||||
/*
|
/*
|
||||||
* A task is exiting. If it owned this mm, find a new owner for the mm.
|
* A task is exiting. If it owned this mm, find a new owner for the mm.
|
||||||
@@ -434,47 +474,13 @@ assign_new_owner:
|
|||||||
static void exit_mm(void)
|
static void exit_mm(void)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
struct core_state *core_state;
|
|
||||||
|
|
||||||
exit_mm_release(current, mm);
|
exit_mm_release(current, mm);
|
||||||
if (!mm)
|
if (!mm)
|
||||||
return;
|
return;
|
||||||
sync_mm_rss(mm);
|
sync_mm_rss(mm);
|
||||||
/*
|
|
||||||
* Serialize with any possible pending coredump.
|
|
||||||
* We must hold mmap_lock around checking core_state
|
|
||||||
* and clearing tsk->mm. The core-inducing thread
|
|
||||||
* will increment ->nr_threads for each thread in the
|
|
||||||
* group with ->mm != NULL.
|
|
||||||
*/
|
|
||||||
mmap_read_lock(mm);
|
mmap_read_lock(mm);
|
||||||
core_state = mm->core_state;
|
coredump_exit_mm(mm);
|
||||||
if (core_state) {
|
|
||||||
struct core_thread self;
|
|
||||||
|
|
||||||
mmap_read_unlock(mm);
|
|
||||||
|
|
||||||
self.task = current;
|
|
||||||
if (self.task->flags & PF_SIGNALED)
|
|
||||||
self.next = xchg(&core_state->dumper.next, &self);
|
|
||||||
else
|
|
||||||
self.task = NULL;
|
|
||||||
/*
|
|
||||||
* Implies mb(), the result of xchg() must be visible
|
|
||||||
* to core_state->dumper.
|
|
||||||
*/
|
|
||||||
if (atomic_dec_and_test(&core_state->nr_threads))
|
|
||||||
complete(&core_state->startup);
|
|
||||||
|
|
||||||
for (;;) {
|
|
||||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
|
||||||
if (!self.task) /* see coredump_finish() */
|
|
||||||
break;
|
|
||||||
freezable_schedule();
|
|
||||||
}
|
|
||||||
__set_current_state(TASK_RUNNING);
|
|
||||||
mmap_read_lock(mm);
|
|
||||||
}
|
|
||||||
mmgrab(mm);
|
mmgrab(mm);
|
||||||
BUG_ON(mm != current->active_mm);
|
BUG_ON(mm != current->active_mm);
|
||||||
/* more a memory barrier than a real lock */
|
/* more a memory barrier than a real lock */
|
||||||
|
|||||||
@@ -787,9 +787,9 @@ static inline bool __task_will_free_mem(struct task_struct *task)
|
|||||||
struct signal_struct *sig = task->signal;
|
struct signal_struct *sig = task->signal;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A coredumping process may sleep for an extended period in exit_mm(),
|
* A coredumping process may sleep for an extended period in
|
||||||
* so the oom killer cannot assume that the process will promptly exit
|
* coredump_exit_mm(), so the oom killer cannot assume that
|
||||||
* and release memory.
|
* the process will promptly exit and release memory.
|
||||||
*/
|
*/
|
||||||
if (sig->flags & SIGNAL_GROUP_COREDUMP)
|
if (sig->flags & SIGNAL_GROUP_COREDUMP)
|
||||||
return false;
|
return false;
|
||||||
|
|||||||
Reference in New Issue
Block a user