exit: Factor coredump_exit_mm out of exit_mm
Separate the coredump logic from the ordinary exit_mm logic by moving the coredump logic out of exit_mm into it's own function coredump_exit_mm. Link: https://lkml.kernel.org/r/87a6k2x277.fsf@disp2133 Reviewed-by: Kees Cook <keescook@chromium.org> Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
This commit is contained in:
@@ -339,6 +339,46 @@ kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
|
||||
}
|
||||
}
|
||||
|
||||
static void coredump_exit_mm(struct mm_struct *mm)
|
||||
{
|
||||
struct core_state *core_state;
|
||||
|
||||
/*
|
||||
* Serialize with any possible pending coredump.
|
||||
* We must hold mmap_lock around checking core_state
|
||||
* and clearing tsk->mm. The core-inducing thread
|
||||
* will increment ->nr_threads for each thread in the
|
||||
* group with ->mm != NULL.
|
||||
*/
|
||||
core_state = mm->core_state;
|
||||
if (core_state) {
|
||||
struct core_thread self;
|
||||
|
||||
mmap_read_unlock(mm);
|
||||
|
||||
self.task = current;
|
||||
if (self.task->flags & PF_SIGNALED)
|
||||
self.next = xchg(&core_state->dumper.next, &self);
|
||||
else
|
||||
self.task = NULL;
|
||||
/*
|
||||
* Implies mb(), the result of xchg() must be visible
|
||||
* to core_state->dumper.
|
||||
*/
|
||||
if (atomic_dec_and_test(&core_state->nr_threads))
|
||||
complete(&core_state->startup);
|
||||
|
||||
for (;;) {
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
if (!self.task) /* see coredump_finish() */
|
||||
break;
|
||||
freezable_schedule();
|
||||
}
|
||||
__set_current_state(TASK_RUNNING);
|
||||
mmap_read_lock(mm);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMCG
|
||||
/*
|
||||
* A task is exiting. If it owned this mm, find a new owner for the mm.
|
||||
@@ -434,47 +474,13 @@ assign_new_owner:
|
||||
static void exit_mm(void)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct core_state *core_state;
|
||||
|
||||
exit_mm_release(current, mm);
|
||||
if (!mm)
|
||||
return;
|
||||
sync_mm_rss(mm);
|
||||
/*
|
||||
* Serialize with any possible pending coredump.
|
||||
* We must hold mmap_lock around checking core_state
|
||||
* and clearing tsk->mm. The core-inducing thread
|
||||
* will increment ->nr_threads for each thread in the
|
||||
* group with ->mm != NULL.
|
||||
*/
|
||||
mmap_read_lock(mm);
|
||||
core_state = mm->core_state;
|
||||
if (core_state) {
|
||||
struct core_thread self;
|
||||
|
||||
mmap_read_unlock(mm);
|
||||
|
||||
self.task = current;
|
||||
if (self.task->flags & PF_SIGNALED)
|
||||
self.next = xchg(&core_state->dumper.next, &self);
|
||||
else
|
||||
self.task = NULL;
|
||||
/*
|
||||
* Implies mb(), the result of xchg() must be visible
|
||||
* to core_state->dumper.
|
||||
*/
|
||||
if (atomic_dec_and_test(&core_state->nr_threads))
|
||||
complete(&core_state->startup);
|
||||
|
||||
for (;;) {
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
if (!self.task) /* see coredump_finish() */
|
||||
break;
|
||||
freezable_schedule();
|
||||
}
|
||||
__set_current_state(TASK_RUNNING);
|
||||
mmap_read_lock(mm);
|
||||
}
|
||||
coredump_exit_mm(mm);
|
||||
mmgrab(mm);
|
||||
BUG_ON(mm != current->active_mm);
|
||||
/* more a memory barrier than a real lock */
|
||||
|
||||
Reference in New Issue
Block a user