mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 21:51:40 +00:00
sched: membarrier: cover kthread_use_mm (v4)
Add comments and memory barrier to kthread_use_mm and kthread_unuse_mm to allow the effect of membarrier(2) to apply to kthreads accessing user-space memory as well. Given that no prior kthread use this guarantee and that it only affects kthreads, adding this guarantee does not affect user-space ABI. Refine the check in membarrier_global_expedited to exclude runqueues running the idle thread rather than all kthreads from the IPI cpumask. Now that membarrier_global_expedited can IPI kthreads, the scheduler also needs to update the runqueue's membarrier_state when entering lazy TLB state. Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20201020134715.13909-3-mathieu.desnoyers@efficios.com
This commit is contained in:
parent
5bc7850232
commit
618758ed3a
@ -1248,6 +1248,7 @@ void kthread_use_mm(struct mm_struct *mm)
|
||||
tsk->active_mm = mm;
|
||||
}
|
||||
tsk->mm = mm;
|
||||
membarrier_update_current_mm(mm);
|
||||
switch_mm_irqs_off(active_mm, mm, tsk);
|
||||
local_irq_enable();
|
||||
task_unlock(tsk);
|
||||
@ -1255,8 +1256,19 @@ void kthread_use_mm(struct mm_struct *mm)
|
||||
finish_arch_post_lock_switch();
|
||||
#endif
|
||||
|
||||
/*
|
||||
* When a kthread starts operating on an address space, the loop
|
||||
* in membarrier_{private,global}_expedited() may not observe
|
||||
* that tsk->mm, and not issue an IPI. Membarrier requires a
|
||||
* memory barrier after storing to tsk->mm, before accessing
|
||||
* user-space memory. A full memory barrier for membarrier
|
||||
* {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
|
||||
* mmdrop(), or explicitly with smp_mb().
|
||||
*/
|
||||
if (active_mm != mm)
|
||||
mmdrop(active_mm);
|
||||
else
|
||||
smp_mb();
|
||||
|
||||
to_kthread(tsk)->oldfs = force_uaccess_begin();
|
||||
}
|
||||
@ -1276,9 +1288,18 @@ void kthread_unuse_mm(struct mm_struct *mm)
|
||||
force_uaccess_end(to_kthread(tsk)->oldfs);
|
||||
|
||||
task_lock(tsk);
|
||||
/*
|
||||
* When a kthread stops operating on an address space, the loop
|
||||
* in membarrier_{private,global}_expedited() may not observe
|
||||
* that tsk->mm, and not issue an IPI. Membarrier requires a
|
||||
* memory barrier after accessing user-space memory, before
|
||||
* clearing tsk->mm.
|
||||
*/
|
||||
smp_mb__after_spinlock();
|
||||
sync_mm_rss(mm);
|
||||
local_irq_disable();
|
||||
tsk->mm = NULL;
|
||||
membarrier_update_current_mm(NULL);
|
||||
/* active_mm is still 'mm' */
|
||||
enter_lazy_tlb(mm, tsk);
|
||||
local_irq_enable();
|
||||
|
@ -338,6 +338,7 @@ void play_idle_precise(u64 duration_ns, u64 latency_ns)
|
||||
WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
|
||||
WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
|
||||
WARN_ON_ONCE(!duration_ns);
|
||||
WARN_ON_ONCE(current->mm);
|
||||
|
||||
rcu_sleep_check();
|
||||
preempt_disable();
|
||||
|
@ -126,12 +126,11 @@ static int membarrier_global_expedited(void)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Skip the CPU if it runs a kernel thread. The scheduler
|
||||
* leaves the prior task mm in place as an optimization when
|
||||
* scheduling a kthread.
|
||||
* Skip the CPU if it runs a kernel thread which is not using
|
||||
* a task mm.
|
||||
*/
|
||||
p = rcu_dereference(cpu_rq(cpu)->curr);
|
||||
if (p->flags & PF_KTHREAD)
|
||||
if (!p->mm)
|
||||
continue;
|
||||
|
||||
__cpumask_set_cpu(cpu, tmpmask);
|
||||
|
Loading…
Reference in New Issue
Block a user