rcu/kvfree: Remove useless monitor_todo flag
monitor_todo is not needed as the work struct already tracks if work is pending. Just use that to know if work is pending using schedule_delayed_work() helper. Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Reviewed-by: Neeraj Upadhyay <quic_neeraju@quicinc.com>
This commit is contained in:
parent
e2bb1288a3
commit
82d26c36cc
@ -3216,7 +3216,6 @@ struct kfree_rcu_cpu_work {
|
|||||||
* @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
|
* @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
|
||||||
* @lock: Synchronize access to this structure
|
* @lock: Synchronize access to this structure
|
||||||
* @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
|
* @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
|
||||||
* @monitor_todo: Tracks whether a @monitor_work delayed work is pending
|
|
||||||
* @initialized: The @rcu_work fields have been initialized
|
* @initialized: The @rcu_work fields have been initialized
|
||||||
* @count: Number of objects for which GP not started
|
* @count: Number of objects for which GP not started
|
||||||
* @bkvcache:
|
* @bkvcache:
|
||||||
@ -3241,7 +3240,6 @@ struct kfree_rcu_cpu {
|
|||||||
struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
|
struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
|
||||||
raw_spinlock_t lock;
|
raw_spinlock_t lock;
|
||||||
struct delayed_work monitor_work;
|
struct delayed_work monitor_work;
|
||||||
bool monitor_todo;
|
|
||||||
bool initialized;
|
bool initialized;
|
||||||
int count;
|
int count;
|
||||||
|
|
||||||
@ -3421,6 +3419,18 @@ static void kfree_rcu_work(struct work_struct *work)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
need_offload_krc(struct kfree_rcu_cpu *krcp)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < FREE_N_CHANNELS; i++)
|
||||||
|
if (krcp->bkvhead[i])
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return !!krcp->head;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
|
* This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
|
||||||
*/
|
*/
|
||||||
@ -3477,9 +3487,7 @@ static void kfree_rcu_monitor(struct work_struct *work)
|
|||||||
// of the channels that is still busy we should rearm the
|
// of the channels that is still busy we should rearm the
|
||||||
// work to repeat an attempt. Because previous batches are
|
// work to repeat an attempt. Because previous batches are
|
||||||
// still in progress.
|
// still in progress.
|
||||||
if (!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head)
|
if (need_offload_krc(krcp))
|
||||||
krcp->monitor_todo = false;
|
|
||||||
else
|
|
||||||
schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
|
schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
|
||||||
|
|
||||||
raw_spin_unlock_irqrestore(&krcp->lock, flags);
|
raw_spin_unlock_irqrestore(&krcp->lock, flags);
|
||||||
@ -3667,11 +3675,8 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
|
|||||||
WRITE_ONCE(krcp->count, krcp->count + 1);
|
WRITE_ONCE(krcp->count, krcp->count + 1);
|
||||||
|
|
||||||
// Set timer to drain after KFREE_DRAIN_JIFFIES.
|
// Set timer to drain after KFREE_DRAIN_JIFFIES.
|
||||||
if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
|
if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
|
||||||
!krcp->monitor_todo) {
|
|
||||||
krcp->monitor_todo = true;
|
|
||||||
schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
|
schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
|
||||||
}
|
|
||||||
|
|
||||||
unlock_return:
|
unlock_return:
|
||||||
krc_this_cpu_unlock(krcp, flags);
|
krc_this_cpu_unlock(krcp, flags);
|
||||||
@ -3746,14 +3751,8 @@ void __init kfree_rcu_scheduler_running(void)
|
|||||||
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
|
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&krcp->lock, flags);
|
raw_spin_lock_irqsave(&krcp->lock, flags);
|
||||||
if ((!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head) ||
|
if (need_offload_krc(krcp))
|
||||||
krcp->monitor_todo) {
|
schedule_delayed_work_on(cpu, &krcp->monitor_work, KFREE_DRAIN_JIFFIES);
|
||||||
raw_spin_unlock_irqrestore(&krcp->lock, flags);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
krcp->monitor_todo = true;
|
|
||||||
schedule_delayed_work_on(cpu, &krcp->monitor_work,
|
|
||||||
KFREE_DRAIN_JIFFIES);
|
|
||||||
raw_spin_unlock_irqrestore(&krcp->lock, flags);
|
raw_spin_unlock_irqrestore(&krcp->lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user