Merge tag 'sched-core-2020-06-02' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar:
"The changes in this cycle are:
- Optimize the task wakeup CPU selection logic, to improve
scalability and reduce wakeup latency spikes
- PELT enhancements
- CFS bandwidth handling fixes
- Optimize the wakeup path by remove rq->wake_list and replacing it
with ->ttwu_pending
- Optimize IPI cross-calls by making flush_smp_call_function_queue()
process sync callbacks first.
- Misc fixes and enhancements"
* tag 'sched-core-2020-06-02' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (36 commits)
irq_work: Define irq_work_single() on !CONFIG_IRQ_WORK too
sched/headers: Split out open-coded prototypes into kernel/sched/smp.h
sched: Replace rq::wake_list
sched: Add rq::ttwu_pending
irq_work, smp: Allow irq_work on call_single_queue
smp: Optimize send_call_function_single_ipi()
smp: Move irq_work_run() out of flush_smp_call_function_queue()
smp: Optimize flush_smp_call_function_queue()
sched: Fix smp_call_function_single_async() usage for ILB
sched/core: Offload wakee task activation if it the wakee is descheduling
sched/core: Optimize ttwu() spinning on p->on_cpu
sched: Defend cfs and rt bandwidth quota against overflow
sched/cpuacct: Fix charge cpuacct.usage_sys
sched/fair: Replace zero-length array with flexible-array
sched/pelt: Sync util/runnable_sum with PELT window when propagating
sched/cpuacct: Use __this_cpu_add() instead of this_cpu_ptr()
sched/fair: Optimize enqueue_task_fair()
sched: Make scheduler_ipi inline
sched: Clean up scheduler_ipi()
sched/core: Simplify sched_init()
...
This commit is contained in:
@@ -708,8 +708,12 @@ void __noreturn do_exit(long code)
|
||||
struct task_struct *tsk = current;
|
||||
int group_dead;
|
||||
|
||||
profile_task_exit(tsk);
|
||||
kcov_task_exit(tsk);
|
||||
/*
|
||||
* We can get here from a kernel oops, sometimes with preemption off.
|
||||
* Start by checking for critical errors.
|
||||
* Then fix up important state like USER_DS and preemption.
|
||||
* Then do everything else.
|
||||
*/
|
||||
|
||||
WARN_ON(blk_needs_flush_plug(tsk));
|
||||
|
||||
@@ -727,6 +731,16 @@ void __noreturn do_exit(long code)
|
||||
*/
|
||||
set_fs(USER_DS);
|
||||
|
||||
if (unlikely(in_atomic())) {
|
||||
pr_info("note: %s[%d] exited with preempt_count %d\n",
|
||||
current->comm, task_pid_nr(current),
|
||||
preempt_count());
|
||||
preempt_count_set(PREEMPT_ENABLED);
|
||||
}
|
||||
|
||||
profile_task_exit(tsk);
|
||||
kcov_task_exit(tsk);
|
||||
|
||||
ptrace_event(PTRACE_EVENT_EXIT, code);
|
||||
|
||||
validate_creds_for_do_exit(tsk);
|
||||
@@ -744,13 +758,6 @@ void __noreturn do_exit(long code)
|
||||
|
||||
exit_signals(tsk); /* sets PF_EXITING */
|
||||
|
||||
if (unlikely(in_atomic())) {
|
||||
pr_info("note: %s[%d] exited with preempt_count %d\n",
|
||||
current->comm, task_pid_nr(current),
|
||||
preempt_count());
|
||||
preempt_count_set(PREEMPT_ENABLED);
|
||||
}
|
||||
|
||||
/* sync mm's RSS info before statistics gathering */
|
||||
if (tsk->mm)
|
||||
sync_mm_rss(tsk->mm);
|
||||
|
||||
Reference in New Issue
Block a user