mirror of
https://github.com/torvalds/linux.git
synced 2024-11-28 07:01:32 +00:00
oom: don't assume that a coredumping thread will exit soon
oom_kill.c assumes that PF_EXITING task should exit and free the memory soon. This is wrong in many ways and one important case is the coredump. A task can sleep in exit_mm() "forever" while the coredumping sub-thread can need more memory. Change the PF_EXITING checks to take SIGNAL_GROUP_COREDUMP into account, we add the new trivial helper for that. Note: this is only the first step, this patch doesn't try to solve other problems. The SIGNAL_GROUP_COREDUMP check is obviously racy, a task can participate in coredump after it was already observed in PF_EXITING state, so TIF_MEMDIE (which also blocks oom-killer) still can be wrongly set. fatal_signal_pending() can be true because of SIGNAL_GROUP_COREDUMP so out_of_memory() and mem_cgroup_out_of_memory() shouldn't blindly trust it. And even the name/usage of the new helper is confusing, an exiting thread can only free its ->mm if it is the only/last task in thread group. [akpm@linux-foundation.org: add comment] Signed-off-by: Oleg Nesterov <oleg@redhat.com> Cc: Cong Wang <xiyou.wangcong@gmail.com> Acked-by: David Rientjes <rientjes@google.com> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
ba914f4815
commit
d003f371b2
@ -92,6 +92,17 @@ static inline bool oom_gfp_allowed(gfp_t gfp_mask)
|
|||||||
|
|
||||||
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
|
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
|
||||||
|
|
||||||
|
static inline bool task_will_free_mem(struct task_struct *task)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* A coredumping process may sleep for an extended period in exit_mm(),
|
||||||
|
* so the oom killer cannot assume that the process will promptly exit
|
||||||
|
* and release memory.
|
||||||
|
*/
|
||||||
|
return (task->flags & PF_EXITING) &&
|
||||||
|
!(task->signal->flags & SIGNAL_GROUP_COREDUMP);
|
||||||
|
}
|
||||||
|
|
||||||
/* sysctls */
|
/* sysctls */
|
||||||
extern int sysctl_oom_dump_tasks;
|
extern int sysctl_oom_dump_tasks;
|
||||||
extern int sysctl_oom_kill_allocating_task;
|
extern int sysctl_oom_kill_allocating_task;
|
||||||
|
@ -1559,7 +1559,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
|
|||||||
* select it. The goal is to allow it to allocate so that it may
|
* select it. The goal is to allow it to allocate so that it may
|
||||||
* quickly exit and free its memory.
|
* quickly exit and free its memory.
|
||||||
*/
|
*/
|
||||||
if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
|
if (fatal_signal_pending(current) || task_will_free_mem(current)) {
|
||||||
set_thread_flag(TIF_MEMDIE);
|
set_thread_flag(TIF_MEMDIE);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -281,7 +281,7 @@ enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
|
|||||||
if (oom_task_origin(task))
|
if (oom_task_origin(task))
|
||||||
return OOM_SCAN_SELECT;
|
return OOM_SCAN_SELECT;
|
||||||
|
|
||||||
if (task->flags & PF_EXITING && !force_kill) {
|
if (task_will_free_mem(task) && !force_kill) {
|
||||||
/*
|
/*
|
||||||
* If this task is not being ptraced on exit, then wait for it
|
* If this task is not being ptraced on exit, then wait for it
|
||||||
* to finish before killing some other task unnecessarily.
|
* to finish before killing some other task unnecessarily.
|
||||||
@ -443,7 +443,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
|
|||||||
* If the task is already exiting, don't alarm the sysadmin or kill
|
* If the task is already exiting, don't alarm the sysadmin or kill
|
||||||
* its children or threads, just set TIF_MEMDIE so it can die quickly
|
* its children or threads, just set TIF_MEMDIE so it can die quickly
|
||||||
*/
|
*/
|
||||||
if (p->flags & PF_EXITING) {
|
if (task_will_free_mem(p)) {
|
||||||
set_tsk_thread_flag(p, TIF_MEMDIE);
|
set_tsk_thread_flag(p, TIF_MEMDIE);
|
||||||
put_task_struct(p);
|
put_task_struct(p);
|
||||||
return;
|
return;
|
||||||
@ -649,7 +649,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
|
|||||||
* select it. The goal is to allow it to allocate so that it may
|
* select it. The goal is to allow it to allocate so that it may
|
||||||
* quickly exit and free its memory.
|
* quickly exit and free its memory.
|
||||||
*/
|
*/
|
||||||
if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
|
if (fatal_signal_pending(current) || task_will_free_mem(current)) {
|
||||||
set_thread_flag(TIF_MEMDIE);
|
set_thread_flag(TIF_MEMDIE);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user