forked from Minki/linux
mm: lru_cache_disable: replace work queue synchronization with synchronize_rcu
On systems that run FIFO:1 applications that busy loop, any SCHED_OTHER
task that attempts to execute on such a CPU (such as work threads) will
not be scheduled, which leads to system hangs.
Commit d479960e44
("mm: disable LRU pagevec during the migration
temporarily") relies on queueing work items on all online CPUs to ensure
visibility of lru_disable_count.
To fix this, replace the usage of work items with synchronize_rcu,
which provides the same guarantees.
Readers of lru_disable_count are protected by either disabling
preemption or rcu_read_lock:
preempt_disable, local_irq_disable [bh_lru_lock()]
rcu_read_lock [rt_spin_lock CONFIG_PREEMPT_RT]
preempt_disable [local_lock !CONFIG_PREEMPT_RT]
Since v5.1 kernel, synchronize_rcu() is guaranteed to wait on
preempt_disable() regions of code. So any CPU which sees
lru_disable_count = 0 will have exited the critical section when
synchronize_rcu() returns.
Link: https://lkml.kernel.org/r/Yin7hDxdt0s/x+fp@fuller.cnet
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Reviewed-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
Acked-by: Minchan Kim <minchan@kernel.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
405cc51fc1
commit
ff042f4a9b
23
mm/swap.c
23
mm/swap.c
@ -831,8 +831,7 @@ inline void __lru_add_drain_all(bool force_all_cpus)
|
||||
for_each_online_cpu(cpu) {
|
||||
struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
|
||||
|
||||
if (force_all_cpus ||
|
||||
pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
|
||||
if (pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
|
||||
data_race(pagevec_count(&per_cpu(lru_rotate.pvec, cpu))) ||
|
||||
pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
|
||||
pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
|
||||
@ -876,15 +875,21 @@ atomic_t lru_disable_count = ATOMIC_INIT(0);
|
||||
void lru_cache_disable(void)
|
||||
{
|
||||
atomic_inc(&lru_disable_count);
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* lru_add_drain_all in the force mode will schedule draining on
|
||||
* all online CPUs so any calls of lru_cache_disabled wrapped by
|
||||
* local_lock or preemption disabled would be ordered by that.
|
||||
* The atomic operation doesn't need to have stronger ordering
|
||||
* requirements because that is enforced by the scheduling
|
||||
* guarantees.
|
||||
* Readers of lru_disable_count are protected by either disabling
|
||||
* preemption or rcu_read_lock:
|
||||
*
|
||||
* preempt_disable, local_irq_disable [bh_lru_lock()]
|
||||
* rcu_read_lock [rt_spin_lock CONFIG_PREEMPT_RT]
|
||||
* preempt_disable [local_lock !CONFIG_PREEMPT_RT]
|
||||
*
|
||||
* Since v5.1 kernel, synchronize_rcu() is guaranteed to wait on
|
||||
* preempt_disable() regions of code. So any CPU which sees
|
||||
* lru_disable_count = 0 will have exited the critical
|
||||
* section when synchronize_rcu() returns.
|
||||
*/
|
||||
synchronize_rcu();
|
||||
#ifdef CONFIG_SMP
|
||||
__lru_add_drain_all(true);
|
||||
#else
|
||||
lru_add_and_bh_lrus_drain();
|
||||
|
Loading…
Reference in New Issue
Block a user