2019-01-17 18:13:19 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0+
|
2017-05-02 13:30:12 +00:00
|
|
|
/*
|
|
|
|
* RCU segmented callback lists, function definitions
|
|
|
|
*
|
|
|
|
* Copyright IBM Corporation, 2017
|
|
|
|
*
|
2019-01-17 18:13:19 +00:00
|
|
|
* Authors: Paul E. McKenney <paulmck@linux.ibm.com>
|
2017-05-02 13:30:12 +00:00
|
|
|
*/
|
|
|
|
|
2020-09-23 15:22:09 +00:00
|
|
|
#include <linux/cpu.h>
|
2017-05-02 13:30:12 +00:00
|
|
|
#include <linux/interrupt.h>
|
2020-09-23 15:22:09 +00:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/types.h>
|
2017-05-02 13:30:12 +00:00
|
|
|
|
|
|
|
#include "rcu_segcblist.h"
|
|
|
|
|
|
|
|
/* Initialize simple callback list. */
|
|
|
|
void rcu_cblist_init(struct rcu_cblist *rclp)
|
|
|
|
{
|
|
|
|
rclp->head = NULL;
|
|
|
|
rclp->tail = &rclp->head;
|
|
|
|
rclp->len = 0;
|
|
|
|
}
|
|
|
|
|
2019-07-02 00:36:53 +00:00
|
|
|
/*
|
|
|
|
* Enqueue an rcu_head structure onto the specified callback list.
|
|
|
|
*/
|
|
|
|
void rcu_cblist_enqueue(struct rcu_cblist *rclp, struct rcu_head *rhp)
|
|
|
|
{
|
|
|
|
*rclp->tail = rhp;
|
|
|
|
rclp->tail = &rhp->next;
|
|
|
|
WRITE_ONCE(rclp->len, rclp->len + 1);
|
|
|
|
}
|
|
|
|
|
rcu/nocb: Add bypass callback queueing
Use of the rcu_data structure's segmented ->cblist for no-CBs CPUs
takes advantage of unrelated grace periods, thus reducing the memory
footprint in the face of floods of call_rcu() invocations. However,
the ->cblist field is a more-complex rcu_segcblist structure which must
be protected via locking. Even though there are only three entities
which can acquire this lock (the CPU invoking call_rcu(), the no-CBs
grace-period kthread, and the no-CBs callbacks kthread), the contention
on this lock is excessive under heavy stress.
This commit therefore greatly reduces contention by provisioning
an rcu_cblist structure field named ->nocb_bypass within the
rcu_data structure. Each no-CBs CPU is permitted only a limited
number of enqueues onto the ->cblist per jiffy, controlled by a new
nocb_nobypass_lim_per_jiffy kernel boot parameter that defaults to
about 16 enqueues per millisecond (16 * 1000 / HZ). When that limit is
exceeded, the CPU instead enqueues onto the new ->nocb_bypass.
The ->nocb_bypass is flushed into the ->cblist every jiffy or when
the number of callbacks on ->nocb_bypass exceeds qhimark, whichever
happens first. During call_rcu() floods, this flushing is carried out
by the CPU during the course of its call_rcu() invocations. However,
a CPU could simply stop invoking call_rcu() at any time. The no-CBs
grace-period kthread therefore carries out less-aggressive flushing
(every few jiffies or when the number of callbacks on ->nocb_bypass
exceeds (2 * qhimark), whichever comes first). This means that the
no-CBs grace-period kthread cannot be permitted to do unbounded waits
while there are callbacks on ->nocb_bypass. A ->nocb_bypass_timer is
used to provide the needed wakeups.
[ paulmck: Apply Coverity feedback reported by Colin Ian King. ]
Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
2019-07-02 23:03:33 +00:00
|
|
|
/*
|
|
|
|
* Flush the second rcu_cblist structure onto the first one, obliterating
|
|
|
|
* any contents of the first. If rhp is non-NULL, enqueue it as the sole
|
|
|
|
* element of the second rcu_cblist structure, but ensuring that the second
|
|
|
|
* rcu_cblist structure, if initially non-empty, always appears non-empty
|
|
|
|
* throughout the process. If rdp is NULL, the second rcu_cblist structure
|
|
|
|
* is instead initialized to empty.
|
|
|
|
*/
|
|
|
|
void rcu_cblist_flush_enqueue(struct rcu_cblist *drclp,
|
|
|
|
struct rcu_cblist *srclp,
|
|
|
|
struct rcu_head *rhp)
|
|
|
|
{
|
|
|
|
drclp->head = srclp->head;
|
|
|
|
if (drclp->head)
|
|
|
|
drclp->tail = srclp->tail;
|
|
|
|
else
|
|
|
|
drclp->tail = &drclp->head;
|
|
|
|
drclp->len = srclp->len;
|
|
|
|
if (!rhp) {
|
|
|
|
rcu_cblist_init(srclp);
|
|
|
|
} else {
|
|
|
|
rhp->next = NULL;
|
|
|
|
srclp->head = rhp;
|
|
|
|
srclp->tail = &rhp->next;
|
|
|
|
WRITE_ONCE(srclp->len, 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-02 13:30:12 +00:00
|
|
|
/*
|
|
|
|
* Dequeue the oldest rcu_head structure from the specified callback
|
2019-08-30 16:36:32 +00:00
|
|
|
* list.
|
2017-05-02 13:30:12 +00:00
|
|
|
*/
|
|
|
|
struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp)
|
|
|
|
{
|
|
|
|
struct rcu_head *rhp;
|
|
|
|
|
|
|
|
rhp = rclp->head;
|
|
|
|
if (!rhp)
|
|
|
|
return NULL;
|
|
|
|
rclp->len--;
|
|
|
|
rclp->head = rhp->next;
|
|
|
|
if (!rclp->head)
|
|
|
|
rclp->tail = &rclp->head;
|
|
|
|
return rhp;
|
|
|
|
}
|
|
|
|
|
2019-07-02 00:36:53 +00:00
|
|
|
/* Set the length of an rcu_segcblist structure. */
|
2019-08-08 02:32:58 +00:00
|
|
|
static void rcu_segcblist_set_len(struct rcu_segcblist *rsclp, long v)
|
2019-07-02 00:36:53 +00:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_RCU_NOCB_CPU
|
|
|
|
atomic_long_set(&rsclp->len, v);
|
|
|
|
#else
|
|
|
|
WRITE_ONCE(rsclp->len, v);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2020-09-23 15:22:09 +00:00
|
|
|
/* Get the length of a segment of the rcu_segcblist structure. */
|
|
|
|
static long rcu_segcblist_get_seglen(struct rcu_segcblist *rsclp, int seg)
|
|
|
|
{
|
|
|
|
return READ_ONCE(rsclp->seglen[seg]);
|
|
|
|
}
|
|
|
|
|
2020-11-18 16:15:41 +00:00
|
|
|
/* Return number of callbacks in segmented callback list by summing seglen. */
|
|
|
|
long rcu_segcblist_n_segment_cbs(struct rcu_segcblist *rsclp)
|
|
|
|
{
|
|
|
|
long len = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++)
|
|
|
|
len += rcu_segcblist_get_seglen(rsclp, i);
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2020-09-23 15:22:09 +00:00
|
|
|
/* Set the length of a segment of the rcu_segcblist structure. */
|
|
|
|
static void rcu_segcblist_set_seglen(struct rcu_segcblist *rsclp, int seg, long v)
|
|
|
|
{
|
|
|
|
WRITE_ONCE(rsclp->seglen[seg], v);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Increase the numeric length of a segment by a specified amount. */
|
|
|
|
static void rcu_segcblist_add_seglen(struct rcu_segcblist *rsclp, int seg, long v)
|
|
|
|
{
|
|
|
|
WRITE_ONCE(rsclp->seglen[seg], rsclp->seglen[seg] + v);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Move from's segment length to to's segment. */
|
|
|
|
static void rcu_segcblist_move_seglen(struct rcu_segcblist *rsclp, int from, int to)
|
|
|
|
{
|
|
|
|
long len;
|
|
|
|
|
|
|
|
if (from == to)
|
|
|
|
return;
|
|
|
|
|
|
|
|
len = rcu_segcblist_get_seglen(rsclp, from);
|
|
|
|
if (!len)
|
|
|
|
return;
|
|
|
|
|
|
|
|
rcu_segcblist_add_seglen(rsclp, to, len);
|
|
|
|
rcu_segcblist_set_seglen(rsclp, from, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Increment segment's length. */
|
|
|
|
static void rcu_segcblist_inc_seglen(struct rcu_segcblist *rsclp, int seg)
|
|
|
|
{
|
|
|
|
rcu_segcblist_add_seglen(rsclp, seg, 1);
|
|
|
|
}
|
|
|
|
|
2019-07-02 00:36:53 +00:00
|
|
|
/*
|
|
|
|
* Increase the numeric length of an rcu_segcblist structure by the
|
|
|
|
* specified amount, which can be negative. This can cause the ->len
|
|
|
|
* field to disagree with the actual number of callbacks on the structure.
|
|
|
|
* This increase is fully ordered with respect to the callers accesses
|
|
|
|
* both before and after.
|
2020-11-03 14:26:03 +00:00
|
|
|
*
|
|
|
|
* So why on earth is a memory barrier required both before and after
|
|
|
|
* the update to the ->len field???
|
|
|
|
*
|
|
|
|
* The reason is that rcu_barrier() locklessly samples each CPU's ->len
|
|
|
|
* field, and if a given CPU's field is zero, avoids IPIing that CPU.
|
|
|
|
* This can of course race with both queuing and invoking of callbacks.
|
|
|
|
* Failing to correctly handle either of these races could result in
|
|
|
|
* rcu_barrier() failing to IPI a CPU that actually had callbacks queued
|
|
|
|
* which rcu_barrier() was obligated to wait on. And if rcu_barrier()
|
|
|
|
* failed to wait on such a callback, unloading certain kernel modules
|
|
|
|
* would result in calls to functions whose code was no longer present in
|
|
|
|
* the kernel, for but one example.
|
|
|
|
*
|
|
|
|
* Therefore, ->len transitions from 1->0 and 0->1 have to be carefully
|
|
|
|
* ordered with respect with both list modifications and the rcu_barrier().
|
|
|
|
*
|
|
|
|
* The queuing case is CASE 1 and the invoking case is CASE 2.
|
|
|
|
*
|
|
|
|
* CASE 1: Suppose that CPU 0 has no callbacks queued, but invokes
|
|
|
|
* call_rcu() just as CPU 1 invokes rcu_barrier(). CPU 0's ->len field
|
|
|
|
* will transition from 0->1, which is one of the transitions that must
|
|
|
|
* be handled carefully. Without the full memory barriers after the ->len
|
|
|
|
* update and at the beginning of rcu_barrier(), the following could happen:
|
|
|
|
*
|
|
|
|
* CPU 0 CPU 1
|
|
|
|
*
|
|
|
|
* call_rcu().
|
|
|
|
* rcu_barrier() sees ->len as 0.
|
|
|
|
* set ->len = 1.
|
|
|
|
* rcu_barrier() does nothing.
|
|
|
|
* module is unloaded.
|
|
|
|
* callback invokes unloaded function!
|
|
|
|
*
|
|
|
|
* With the full barriers, any case where rcu_barrier() sees ->len as 0 will
|
|
|
|
* have unambiguously preceded the return from the racing call_rcu(), which
|
|
|
|
* means that this call_rcu() invocation is OK to not wait on. After all,
|
|
|
|
* you are supposed to make sure that any problematic call_rcu() invocations
|
|
|
|
* happen before the rcu_barrier().
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* CASE 2: Suppose that CPU 0 is invoking its last callback just as
|
|
|
|
* CPU 1 invokes rcu_barrier(). CPU 0's ->len field will transition from
|
|
|
|
* 1->0, which is one of the transitions that must be handled carefully.
|
|
|
|
* Without the full memory barriers before the ->len update and at the
|
|
|
|
* end of rcu_barrier(), the following could happen:
|
|
|
|
*
|
|
|
|
* CPU 0 CPU 1
|
|
|
|
*
|
|
|
|
* start invoking last callback
|
|
|
|
* set ->len = 0 (reordered)
|
|
|
|
* rcu_barrier() sees ->len as 0
|
|
|
|
* rcu_barrier() does nothing.
|
|
|
|
* module is unloaded
|
|
|
|
* callback executing after unloaded!
|
|
|
|
*
|
|
|
|
* With the full barriers, any case where rcu_barrier() sees ->len as 0
|
|
|
|
* will be fully ordered after the completion of the callback function,
|
|
|
|
* so that the module unloading operation is completely safe.
|
|
|
|
*
|
2019-07-02 00:36:53 +00:00
|
|
|
*/
|
rcu/tree: Make rcu_do_batch count how many callbacks were executed
The rcu_do_batch() function extracts the ready-to-invoke callbacks
from the rcu_segcblist located in the ->cblist field of the current
CPU's rcu_data structure. These callbacks are first moved to a local
(unsegmented) rcu_cblist. The rcu_do_batch() function then uses this
rcu_cblist's ->len field to count how many CBs it has invoked, but it
does so by counting that field down from zero. Finally, this function
negates the value in this ->len field (resulting in a positive number)
and subtracts the result from the ->len field of the current CPU's
->cblist field.
Except that it is sometimes necessary for rcu_do_batch() to stop invoking
callbacks mid-stream, despite there being more ready to invoke, for
example, if a high-priority task wakes up. In this case the remaining
not-yet-invoked callbacks are requeued back onto the CPU's ->cblist,
but remain in the ready-to-invoke segment of that list. As above, the
negative of the local rcu_cblist's ->len field is still subtracted from
the ->len field of the current CPU's ->cblist field.
The design of counting down from 0 is confusing and error-prone, plus
use of a positive count will make it easier to provide a uniform and
consistent API to deal with the per-segment counts that are added
later in this series. For example, rcu_segcblist_extract_done_cbs()
can unconditionally populate the resulting unsegmented list's ->len
field during extraction.
This commit therefore explicitly counts how many callbacks were executed
in rcu_do_batch() itself, counting up from zero, and then uses that
to update the per-CPU segcb list's ->len field, without relying on the
downcounting of rcl->len from zero.
Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
Reviewed-by: Neeraj Upadhyay <neeraju@codeaurora.org>
Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
2020-11-03 14:25:57 +00:00
|
|
|
void rcu_segcblist_add_len(struct rcu_segcblist *rsclp, long v)
|
2019-07-02 00:36:53 +00:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_RCU_NOCB_CPU
|
2020-11-03 14:26:03 +00:00
|
|
|
smp_mb__before_atomic(); // Read header comment above.
|
2019-07-02 00:36:53 +00:00
|
|
|
atomic_long_add(v, &rsclp->len);
|
2020-11-03 14:26:03 +00:00
|
|
|
smp_mb__after_atomic(); // Read header comment above.
|
2019-07-02 00:36:53 +00:00
|
|
|
#else
|
2020-11-03 14:26:03 +00:00
|
|
|
smp_mb(); // Read header comment above.
|
2019-07-02 00:36:53 +00:00
|
|
|
WRITE_ONCE(rsclp->len, rsclp->len + v);
|
2020-11-03 14:26:03 +00:00
|
|
|
smp_mb(); // Read header comment above.
|
2019-07-02 00:36:53 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Increase the numeric length of an rcu_segcblist structure by one.
|
|
|
|
* This can cause the ->len field to disagree with the actual number of
|
|
|
|
* callbacks on the structure. This increase is fully ordered with respect
|
|
|
|
* to the callers accesses both before and after.
|
|
|
|
*/
|
|
|
|
void rcu_segcblist_inc_len(struct rcu_segcblist *rsclp)
|
|
|
|
{
|
|
|
|
rcu_segcblist_add_len(rsclp, 1);
|
|
|
|
}
|
|
|
|
|
2017-05-02 13:30:12 +00:00
|
|
|
/*
|
|
|
|
* Initialize an rcu_segcblist structure.
|
|
|
|
*/
|
|
|
|
void rcu_segcblist_init(struct rcu_segcblist *rsclp)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
BUILD_BUG_ON(RCU_NEXT_TAIL + 1 != ARRAY_SIZE(rsclp->gp_seq));
|
|
|
|
BUILD_BUG_ON(ARRAY_SIZE(rsclp->tails) != ARRAY_SIZE(rsclp->gp_seq));
|
|
|
|
rsclp->head = NULL;
|
2020-09-23 15:22:09 +00:00
|
|
|
for (i = 0; i < RCU_CBLIST_NSEGS; i++) {
|
2017-05-02 13:30:12 +00:00
|
|
|
rsclp->tails[i] = &rsclp->head;
|
2020-09-23 15:22:09 +00:00
|
|
|
rcu_segcblist_set_seglen(rsclp, i, 0);
|
|
|
|
}
|
2019-07-02 00:36:53 +00:00
|
|
|
rcu_segcblist_set_len(rsclp, 0);
|
2020-11-13 12:13:16 +00:00
|
|
|
rcu_segcblist_set_flags(rsclp, SEGCBLIST_ENABLED);
|
2017-05-02 13:30:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disable the specified rcu_segcblist structure, so that callbacks can
|
|
|
|
* no longer be posted to it. This structure must be empty.
|
|
|
|
*/
|
|
|
|
void rcu_segcblist_disable(struct rcu_segcblist *rsclp)
|
|
|
|
{
|
|
|
|
WARN_ON_ONCE(!rcu_segcblist_empty(rsclp));
|
|
|
|
WARN_ON_ONCE(rcu_segcblist_n_cbs(rsclp));
|
2020-11-13 12:13:16 +00:00
|
|
|
rcu_segcblist_clear_flags(rsclp, SEGCBLIST_ENABLED);
|
2017-05-02 13:30:12 +00:00
|
|
|
}
|
|
|
|
|
2019-04-12 22:58:34 +00:00
|
|
|
/*
|
2021-10-19 00:08:07 +00:00
|
|
|
* Mark the specified rcu_segcblist structure as offloaded (or not)
|
2019-04-12 22:58:34 +00:00
|
|
|
*/
|
2020-11-13 12:13:19 +00:00
|
|
|
void rcu_segcblist_offload(struct rcu_segcblist *rsclp, bool offload)
|
2019-04-12 22:58:34 +00:00
|
|
|
{
|
2021-10-19 00:08:08 +00:00
|
|
|
if (offload)
|
2021-10-19 00:08:07 +00:00
|
|
|
rcu_segcblist_set_flags(rsclp, SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED);
|
2021-10-19 00:08:08 +00:00
|
|
|
else
|
2020-11-13 12:13:19 +00:00
|
|
|
rcu_segcblist_clear_flags(rsclp, SEGCBLIST_OFFLOADED);
|
2019-04-12 22:58:34 +00:00
|
|
|
}
|
|
|
|
|
2017-05-02 13:30:12 +00:00
|
|
|
/*
|
|
|
|
* Does the specified rcu_segcblist structure contain callbacks that
|
|
|
|
* are ready to be invoked?
|
|
|
|
*/
|
|
|
|
bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp)
|
|
|
|
{
|
|
|
|
return rcu_segcblist_is_enabled(rsclp) &&
|
2020-01-04 00:14:08 +00:00
|
|
|
&rsclp->head != READ_ONCE(rsclp->tails[RCU_DONE_TAIL]);
|
2017-05-02 13:30:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Does the specified rcu_segcblist structure contain callbacks that
|
|
|
|
* are still pending, that is, not yet ready to be invoked?
|
|
|
|
*/
|
|
|
|
bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp)
|
|
|
|
{
|
|
|
|
return rcu_segcblist_is_enabled(rsclp) &&
|
|
|
|
!rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return a pointer to the first callback in the specified rcu_segcblist
|
|
|
|
* structure. This is useful for diagnostics.
|
|
|
|
*/
|
|
|
|
struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp)
|
|
|
|
{
|
|
|
|
if (rcu_segcblist_is_enabled(rsclp))
|
|
|
|
return rsclp->head;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return a pointer to the first pending callback in the specified
|
|
|
|
* rcu_segcblist structure. This is useful just after posting a given
|
|
|
|
* callback -- if that callback is the first pending callback, then
|
|
|
|
* you cannot rely on someone else having already started up the required
|
|
|
|
* grace period.
|
|
|
|
*/
|
|
|
|
struct rcu_head *rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp)
|
|
|
|
{
|
|
|
|
if (rcu_segcblist_is_enabled(rsclp))
|
|
|
|
return *rsclp->tails[RCU_DONE_TAIL];
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-05-15 16:56:40 +00:00
|
|
|
/*
|
|
|
|
* Return false if there are no CBs awaiting grace periods, otherwise,
|
|
|
|
* return true and store the nearest waited-upon grace period into *lp.
|
|
|
|
*/
|
|
|
|
bool rcu_segcblist_nextgp(struct rcu_segcblist *rsclp, unsigned long *lp)
|
|
|
|
{
|
|
|
|
if (!rcu_segcblist_pend_cbs(rsclp))
|
|
|
|
return false;
|
|
|
|
*lp = rsclp->gp_seq[RCU_WAIT_TAIL];
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-05-02 13:30:12 +00:00
|
|
|
/*
|
|
|
|
* Enqueue the specified callback onto the specified rcu_segcblist
|
|
|
|
* structure, updating accounting as needed. Note that the ->len
|
|
|
|
* field may be accessed locklessly, hence the WRITE_ONCE().
|
|
|
|
* The ->len field is used by rcu_barrier() and friends to determine
|
|
|
|
* if it must post a callback on this structure, and it is OK
|
|
|
|
* for rcu_barrier() to sometimes post callbacks needlessly, but
|
|
|
|
* absolutely not OK for it to ever miss posting a callback.
|
|
|
|
*/
|
|
|
|
void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp,
|
2019-08-30 16:36:32 +00:00
|
|
|
struct rcu_head *rhp)
|
2017-05-02 13:30:12 +00:00
|
|
|
{
|
2019-07-02 00:36:53 +00:00
|
|
|
rcu_segcblist_inc_len(rsclp);
|
2020-09-23 15:22:09 +00:00
|
|
|
rcu_segcblist_inc_seglen(rsclp, RCU_NEXT_TAIL);
|
2017-05-02 13:30:12 +00:00
|
|
|
rhp->next = NULL;
|
2019-05-13 21:36:11 +00:00
|
|
|
WRITE_ONCE(*rsclp->tails[RCU_NEXT_TAIL], rhp);
|
|
|
|
WRITE_ONCE(rsclp->tails[RCU_NEXT_TAIL], &rhp->next);
|
2017-05-02 13:30:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Entrain the specified callback onto the specified rcu_segcblist at
|
|
|
|
* the end of the last non-empty segment. If the entire rcu_segcblist
|
|
|
|
* is empty, make no change, but return false.
|
|
|
|
*
|
|
|
|
* This is intended for use by rcu_barrier()-like primitives, -not-
|
|
|
|
* for normal grace-period use. IMPORTANT: The callback you enqueue
|
|
|
|
* will wait for all prior callbacks, NOT necessarily for a grace
|
|
|
|
* period. You have been warned.
|
|
|
|
*/
|
|
|
|
bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp,
|
2019-08-30 16:36:32 +00:00
|
|
|
struct rcu_head *rhp)
|
2017-05-02 13:30:12 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (rcu_segcblist_n_cbs(rsclp) == 0)
|
|
|
|
return false;
|
2019-07-02 00:36:53 +00:00
|
|
|
rcu_segcblist_inc_len(rsclp);
|
2017-05-02 13:30:12 +00:00
|
|
|
smp_mb(); /* Ensure counts are updated before callback is entrained. */
|
|
|
|
rhp->next = NULL;
|
|
|
|
for (i = RCU_NEXT_TAIL; i > RCU_DONE_TAIL; i--)
|
|
|
|
if (rsclp->tails[i] != rsclp->tails[i - 1])
|
|
|
|
break;
|
2020-09-23 15:22:09 +00:00
|
|
|
rcu_segcblist_inc_seglen(rsclp, i);
|
2019-05-13 21:36:11 +00:00
|
|
|
WRITE_ONCE(*rsclp->tails[i], rhp);
|
2017-05-02 13:30:12 +00:00
|
|
|
for (; i <= RCU_NEXT_TAIL; i++)
|
2019-05-13 21:36:11 +00:00
|
|
|
WRITE_ONCE(rsclp->tails[i], &rhp->next);
|
2017-05-02 13:30:12 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Extract only those callbacks ready to be invoked from the specified
|
|
|
|
* rcu_segcblist structure and place them in the specified rcu_cblist
|
|
|
|
* structure.
|
|
|
|
*/
|
|
|
|
void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp,
|
|
|
|
struct rcu_cblist *rclp)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!rcu_segcblist_ready_cbs(rsclp))
|
|
|
|
return; /* Nothing to do. */
|
2020-09-23 15:22:09 +00:00
|
|
|
rclp->len = rcu_segcblist_get_seglen(rsclp, RCU_DONE_TAIL);
|
2017-05-02 13:30:12 +00:00
|
|
|
*rclp->tail = rsclp->head;
|
2019-05-13 22:57:50 +00:00
|
|
|
WRITE_ONCE(rsclp->head, *rsclp->tails[RCU_DONE_TAIL]);
|
2019-05-13 21:36:11 +00:00
|
|
|
WRITE_ONCE(*rsclp->tails[RCU_DONE_TAIL], NULL);
|
2017-05-02 13:30:12 +00:00
|
|
|
rclp->tail = rsclp->tails[RCU_DONE_TAIL];
|
|
|
|
for (i = RCU_CBLIST_NSEGS - 1; i >= RCU_DONE_TAIL; i--)
|
|
|
|
if (rsclp->tails[i] == rsclp->tails[RCU_DONE_TAIL])
|
2019-05-13 21:36:11 +00:00
|
|
|
WRITE_ONCE(rsclp->tails[i], &rsclp->head);
|
2020-09-23 15:22:09 +00:00
|
|
|
rcu_segcblist_set_seglen(rsclp, RCU_DONE_TAIL, 0);
|
2017-05-02 13:30:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Extract only those callbacks still pending (not yet ready to be
|
|
|
|
* invoked) from the specified rcu_segcblist structure and place them in
|
|
|
|
* the specified rcu_cblist structure. Note that this loses information
|
|
|
|
* about any callbacks that might have been partway done waiting for
|
|
|
|
* their grace period. Too bad! They will have to start over.
|
|
|
|
*/
|
|
|
|
void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp,
|
|
|
|
struct rcu_cblist *rclp)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!rcu_segcblist_pend_cbs(rsclp))
|
|
|
|
return; /* Nothing to do. */
|
2020-09-23 15:22:09 +00:00
|
|
|
rclp->len = 0;
|
2017-05-02 13:30:12 +00:00
|
|
|
*rclp->tail = *rsclp->tails[RCU_DONE_TAIL];
|
|
|
|
rclp->tail = rsclp->tails[RCU_NEXT_TAIL];
|
2019-05-13 21:36:11 +00:00
|
|
|
WRITE_ONCE(*rsclp->tails[RCU_DONE_TAIL], NULL);
|
2020-09-23 15:22:09 +00:00
|
|
|
for (i = RCU_DONE_TAIL + 1; i < RCU_CBLIST_NSEGS; i++) {
|
|
|
|
rclp->len += rcu_segcblist_get_seglen(rsclp, i);
|
2019-05-13 21:36:11 +00:00
|
|
|
WRITE_ONCE(rsclp->tails[i], rsclp->tails[RCU_DONE_TAIL]);
|
2020-09-23 15:22:09 +00:00
|
|
|
rcu_segcblist_set_seglen(rsclp, i, 0);
|
|
|
|
}
|
2017-05-02 13:30:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Insert counts from the specified rcu_cblist structure in the
|
|
|
|
* specified rcu_segcblist structure.
|
|
|
|
*/
|
|
|
|
void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp,
|
|
|
|
struct rcu_cblist *rclp)
|
|
|
|
{
|
2019-07-02 00:36:53 +00:00
|
|
|
rcu_segcblist_add_len(rsclp, rclp->len);
|
2017-05-02 13:30:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Move callbacks from the specified rcu_cblist to the beginning of the
|
|
|
|
* done-callbacks segment of the specified rcu_segcblist.
|
|
|
|
*/
|
|
|
|
void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp,
|
|
|
|
struct rcu_cblist *rclp)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!rclp->head)
|
|
|
|
return; /* No callbacks to move. */
|
2020-09-23 15:22:09 +00:00
|
|
|
rcu_segcblist_add_seglen(rsclp, RCU_DONE_TAIL, rclp->len);
|
2017-05-02 13:30:12 +00:00
|
|
|
*rclp->tail = rsclp->head;
|
2019-05-13 22:57:50 +00:00
|
|
|
WRITE_ONCE(rsclp->head, rclp->head);
|
2017-05-02 13:30:12 +00:00
|
|
|
for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++)
|
|
|
|
if (&rsclp->head == rsclp->tails[i])
|
2019-05-13 21:36:11 +00:00
|
|
|
WRITE_ONCE(rsclp->tails[i], rclp->tail);
|
2017-05-02 13:30:12 +00:00
|
|
|
else
|
|
|
|
break;
|
|
|
|
rclp->head = NULL;
|
|
|
|
rclp->tail = &rclp->head;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Move callbacks from the specified rcu_cblist to the end of the
|
|
|
|
* new-callbacks segment of the specified rcu_segcblist.
|
|
|
|
*/
|
|
|
|
void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp,
|
|
|
|
struct rcu_cblist *rclp)
|
|
|
|
{
|
|
|
|
if (!rclp->head)
|
|
|
|
return; /* Nothing to do. */
|
2020-09-23 15:22:09 +00:00
|
|
|
|
|
|
|
rcu_segcblist_add_seglen(rsclp, RCU_NEXT_TAIL, rclp->len);
|
2019-05-13 21:36:11 +00:00
|
|
|
WRITE_ONCE(*rsclp->tails[RCU_NEXT_TAIL], rclp->head);
|
|
|
|
WRITE_ONCE(rsclp->tails[RCU_NEXT_TAIL], rclp->tail);
|
2017-05-02 13:30:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Advance the callbacks in the specified rcu_segcblist structure based
|
|
|
|
* on the current value passed in for the grace-period counter.
|
|
|
|
*/
|
|
|
|
void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq)
|
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
|
|
|
|
WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp));
|
|
|
|
if (rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find all callbacks whose ->gp_seq numbers indicate that they
|
|
|
|
* are ready to invoke, and put them into the RCU_DONE_TAIL segment.
|
|
|
|
*/
|
|
|
|
for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) {
|
|
|
|
if (ULONG_CMP_LT(seq, rsclp->gp_seq[i]))
|
|
|
|
break;
|
2019-05-13 21:36:11 +00:00
|
|
|
WRITE_ONCE(rsclp->tails[RCU_DONE_TAIL], rsclp->tails[i]);
|
2020-09-23 15:22:09 +00:00
|
|
|
rcu_segcblist_move_seglen(rsclp, i, RCU_DONE_TAIL);
|
2017-05-02 13:30:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* If no callbacks moved, nothing more need be done. */
|
|
|
|
if (i == RCU_WAIT_TAIL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Clean up tail pointers that might have been misordered above. */
|
|
|
|
for (j = RCU_WAIT_TAIL; j < i; j++)
|
2019-05-13 21:36:11 +00:00
|
|
|
WRITE_ONCE(rsclp->tails[j], rsclp->tails[RCU_DONE_TAIL]);
|
2017-05-02 13:30:12 +00:00
|
|
|
|
|
|
|
/*
|
2022-02-02 17:10:04 +00:00
|
|
|
* Callbacks moved, so there might be an empty RCU_WAIT_TAIL
|
|
|
|
* and a non-empty RCU_NEXT_READY_TAIL. If so, copy the
|
|
|
|
* RCU_NEXT_READY_TAIL segment to fill the RCU_WAIT_TAIL gap
|
|
|
|
* created by the now-ready-to-invoke segments.
|
2017-05-02 13:30:12 +00:00
|
|
|
*/
|
|
|
|
for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) {
|
|
|
|
if (rsclp->tails[j] == rsclp->tails[RCU_NEXT_TAIL])
|
|
|
|
break; /* No more callbacks. */
|
2019-05-13 21:36:11 +00:00
|
|
|
WRITE_ONCE(rsclp->tails[j], rsclp->tails[i]);
|
2020-09-23 15:22:09 +00:00
|
|
|
rcu_segcblist_move_seglen(rsclp, i, j);
|
2017-05-02 13:30:12 +00:00
|
|
|
rsclp->gp_seq[j] = rsclp->gp_seq[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* "Accelerate" callbacks based on more-accurate grace-period information.
|
|
|
|
* The reason for this is that RCU does not synchronize the beginnings and
|
|
|
|
* ends of grace periods, and that callbacks are posted locally. This in
|
|
|
|
* turn means that the callbacks must be labelled conservatively early
|
|
|
|
* on, as getting exact information would degrade both performance and
|
|
|
|
* scalability. When more accurate grace-period information becomes
|
|
|
|
* available, previously posted callbacks can be "accelerated", marking
|
|
|
|
* them to complete at the end of the earlier grace period.
|
|
|
|
*
|
|
|
|
* This function operates on an rcu_segcblist structure, and also the
|
|
|
|
* grace-period sequence number seq at which new callbacks would become
|
|
|
|
* ready to invoke. Returns true if there are callbacks that won't be
|
|
|
|
* ready to invoke until seq, false otherwise.
|
|
|
|
*/
|
|
|
|
bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq)
|
|
|
|
{
|
2020-09-23 15:22:09 +00:00
|
|
|
int i, j;
|
2017-05-02 13:30:12 +00:00
|
|
|
|
|
|
|
WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp));
|
|
|
|
if (rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find the segment preceding the oldest segment of callbacks
|
|
|
|
* whose ->gp_seq[] completion is at or after that passed in via
|
|
|
|
* "seq", skipping any empty segments. This oldest segment, along
|
|
|
|
* with any later segments, can be merged in with any newly arrived
|
|
|
|
* callbacks in the RCU_NEXT_TAIL segment, and assigned "seq"
|
|
|
|
* as their ->gp_seq[] grace-period completion sequence number.
|
|
|
|
*/
|
|
|
|
for (i = RCU_NEXT_READY_TAIL; i > RCU_DONE_TAIL; i--)
|
|
|
|
if (rsclp->tails[i] != rsclp->tails[i - 1] &&
|
|
|
|
ULONG_CMP_LT(rsclp->gp_seq[i], seq))
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If all the segments contain callbacks that correspond to
|
|
|
|
* earlier grace-period sequence numbers than "seq", leave.
|
|
|
|
* Assuming that the rcu_segcblist structure has enough
|
|
|
|
* segments in its arrays, this can only happen if some of
|
|
|
|
* the non-done segments contain callbacks that really are
|
|
|
|
* ready to invoke. This situation will get straightened
|
|
|
|
* out by the next call to rcu_segcblist_advance().
|
|
|
|
*
|
|
|
|
* Also advance to the oldest segment of callbacks whose
|
|
|
|
* ->gp_seq[] completion is at or after that passed in via "seq",
|
|
|
|
* skipping any empty segments.
|
2020-06-18 20:29:49 +00:00
|
|
|
*
|
|
|
|
* Note that segment "i" (and any lower-numbered segments
|
|
|
|
* containing older callbacks) will be unaffected, and their
|
|
|
|
* grace-period numbers remain unchanged. For example, if i ==
|
|
|
|
* WAIT_TAIL, then neither WAIT_TAIL nor DONE_TAIL will be touched.
|
|
|
|
* Instead, the CBs in NEXT_TAIL will be merged with those in
|
|
|
|
* NEXT_READY_TAIL and the grace-period number of NEXT_READY_TAIL
|
|
|
|
* would be updated. NEXT_TAIL would then be empty.
|
2017-05-02 13:30:12 +00:00
|
|
|
*/
|
2020-06-18 20:29:49 +00:00
|
|
|
if (rcu_segcblist_restempty(rsclp, i) || ++i >= RCU_NEXT_TAIL)
|
2017-05-02 13:30:12 +00:00
|
|
|
return false;
|
|
|
|
|
2020-09-23 15:22:09 +00:00
|
|
|
/* Accounting: everything below i is about to get merged into i. */
|
|
|
|
for (j = i + 1; j <= RCU_NEXT_TAIL; j++)
|
|
|
|
rcu_segcblist_move_seglen(rsclp, j, i);
|
|
|
|
|
2017-05-02 13:30:12 +00:00
|
|
|
/*
|
|
|
|
* Merge all later callbacks, including newly arrived callbacks,
|
|
|
|
* into the segment located by the for-loop above. Assign "seq"
|
|
|
|
* as the ->gp_seq[] value in order to correctly handle the case
|
|
|
|
* where there were no pending callbacks in the rcu_segcblist
|
|
|
|
* structure other than in the RCU_NEXT_TAIL segment.
|
|
|
|
*/
|
|
|
|
for (; i < RCU_NEXT_TAIL; i++) {
|
2019-05-13 21:36:11 +00:00
|
|
|
WRITE_ONCE(rsclp->tails[i], rsclp->tails[RCU_NEXT_TAIL]);
|
2017-05-02 13:30:12 +00:00
|
|
|
rsclp->gp_seq[i] = seq;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-06-27 14:44:06 +00:00
|
|
|
/*
|
|
|
|
* Merge the source rcu_segcblist structure into the destination
|
|
|
|
* rcu_segcblist structure, then initialize the source. Any pending
|
|
|
|
* callbacks from the source get to start over. It is best to
|
|
|
|
* advance and accelerate both the destination and the source
|
|
|
|
* before merging.
|
|
|
|
*/
|
|
|
|
void rcu_segcblist_merge(struct rcu_segcblist *dst_rsclp,
|
|
|
|
struct rcu_segcblist *src_rsclp)
|
|
|
|
{
|
|
|
|
struct rcu_cblist donecbs;
|
|
|
|
struct rcu_cblist pendcbs;
|
|
|
|
|
2020-09-23 15:22:09 +00:00
|
|
|
lockdep_assert_cpus_held();
|
|
|
|
|
2017-06-27 14:44:06 +00:00
|
|
|
rcu_cblist_init(&donecbs);
|
|
|
|
rcu_cblist_init(&pendcbs);
|
2020-09-23 15:22:09 +00:00
|
|
|
|
2017-06-27 14:44:06 +00:00
|
|
|
rcu_segcblist_extract_done_cbs(src_rsclp, &donecbs);
|
|
|
|
rcu_segcblist_extract_pend_cbs(src_rsclp, &pendcbs);
|
2020-09-23 15:22:09 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* No need smp_mb() before setting length to 0, because CPU hotplug
|
|
|
|
* lock excludes rcu_barrier.
|
|
|
|
*/
|
|
|
|
rcu_segcblist_set_len(src_rsclp, 0);
|
|
|
|
|
2017-06-27 14:44:06 +00:00
|
|
|
rcu_segcblist_insert_count(dst_rsclp, &donecbs);
|
2020-09-23 15:22:09 +00:00
|
|
|
rcu_segcblist_insert_count(dst_rsclp, &pendcbs);
|
2017-06-27 14:44:06 +00:00
|
|
|
rcu_segcblist_insert_done_cbs(dst_rsclp, &donecbs);
|
|
|
|
rcu_segcblist_insert_pend_cbs(dst_rsclp, &pendcbs);
|
2020-09-23 15:22:09 +00:00
|
|
|
|
2017-06-27 14:44:06 +00:00
|
|
|
rcu_segcblist_init(src_rsclp);
|
|
|
|
}
|