mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
sched: Move mmdrop to RCU on RT
mmdrop() is invoked from finish_task_switch() by the incoming task to drop the mm which was handed over by the previous task. mmdrop() can be quite expensive which prevents an incoming real-time task from getting useful work done. Provide mmdrop_sched() which maps to mmdrop() on !RT kernels. On RT kernels it delagates the eventually required invocation of __mmdrop() to RCU. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20210928122411.648582026@linutronix.de
This commit is contained in:
parent
d07b2eee45
commit
8d491de6ed
@ -12,6 +12,7 @@
|
||||
#include <linux/completion.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/uprobes.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/page-flags-layout.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/seqlock.h>
|
||||
@ -572,6 +573,9 @@ struct mm_struct {
|
||||
bool tlb_flush_batched;
|
||||
#endif
|
||||
struct uprobes_state uprobes_state;
|
||||
#ifdef CONFIG_PREEMPT_RT
|
||||
struct rcu_head delayed_drop;
|
||||
#endif
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
atomic_long_t hugetlb_usage;
|
||||
#endif
|
||||
|
@ -49,6 +49,35 @@ static inline void mmdrop(struct mm_struct *mm)
|
||||
__mmdrop(mm);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PREEMPT_RT
|
||||
/*
|
||||
* RCU callback for delayed mm drop. Not strictly RCU, but call_rcu() is
|
||||
* by far the least expensive way to do that.
|
||||
*/
|
||||
static inline void __mmdrop_delayed(struct rcu_head *rhp)
|
||||
{
|
||||
struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
|
||||
|
||||
__mmdrop(mm);
|
||||
}
|
||||
|
||||
/*
|
||||
* Invoked from finish_task_switch(). Delegates the heavy lifting on RT
|
||||
* kernels via RCU.
|
||||
*/
|
||||
static inline void mmdrop_sched(struct mm_struct *mm)
|
||||
{
|
||||
/* Provides a full memory barrier. See mmdrop() */
|
||||
if (atomic_dec_and_test(&mm->mm_count))
|
||||
call_rcu(&mm->delayed_drop, __mmdrop_delayed);
|
||||
}
|
||||
#else
|
||||
static inline void mmdrop_sched(struct mm_struct *mm)
|
||||
{
|
||||
mmdrop(mm);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* mmget() - Pin the address space associated with a &struct mm_struct.
|
||||
* @mm: The address space to pin.
|
||||
|
@ -4836,7 +4836,7 @@ static struct rq *finish_task_switch(struct task_struct *prev)
|
||||
*/
|
||||
if (mm) {
|
||||
membarrier_mm_sync_core_before_usermode(mm);
|
||||
mmdrop(mm);
|
||||
mmdrop_sched(mm);
|
||||
}
|
||||
if (unlikely(prev_state == TASK_DEAD)) {
|
||||
if (prev->sched_class->task_dead)
|
||||
|
Loading…
Reference in New Issue
Block a user