summaryrefslogtreecommitdiff
path: root/kernel/rcu/srcutree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcu/srcutree.c')
-rw-r--r--kernel/rcu/srcutree.c489
1 files changed, 247 insertions, 242 deletions
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index a8846ed7f352..3600d88d8956 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -56,7 +56,7 @@ static LIST_HEAD(srcu_boot_list);
static bool __read_mostly srcu_init_done;
static void srcu_invoke_callbacks(struct work_struct *work);
-static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay);
+static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
static void process_srcu(struct work_struct *work);
/* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
@@ -92,7 +92,7 @@ do { \
* srcu_read_unlock() running against them. So if the is_static parameter
* is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
*/
-static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
+static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static)
{
int cpu;
int i;
@@ -103,13 +103,13 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
struct srcu_node *snp_first;
/* Work out the overall tree geometry. */
- sp->level[0] = &sp->node[0];
+ ssp->level[0] = &ssp->node[0];
for (i = 1; i < rcu_num_lvls; i++)
- sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1];
+ ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1];
rcu_init_levelspread(levelspread, num_rcu_lvl);
/* Each pass through this loop initializes one srcu_node structure. */
- srcu_for_each_node_breadth_first(sp, snp) {
+ srcu_for_each_node_breadth_first(ssp, snp) {
spin_lock_init(&ACCESS_PRIVATE(snp, lock));
WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
ARRAY_SIZE(snp->srcu_data_have_cbs));
@@ -120,17 +120,17 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
snp->srcu_gp_seq_needed_exp = 0;
snp->grplo = -1;
snp->grphi = -1;
- if (snp == &sp->node[0]) {
+ if (snp == &ssp->node[0]) {
/* Root node, special case. */
snp->srcu_parent = NULL;
continue;
}
/* Non-root node. */
- if (snp == sp->level[level + 1])
+ if (snp == ssp->level[level + 1])
level++;
- snp->srcu_parent = sp->level[level - 1] +
- (snp - sp->level[level]) /
+ snp->srcu_parent = ssp->level[level - 1] +
+ (snp - ssp->level[level]) /
levelspread[level - 1];
}
@@ -141,14 +141,14 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
ARRAY_SIZE(sdp->srcu_unlock_count));
level = rcu_num_lvls - 1;
- snp_first = sp->level[level];
+ snp_first = ssp->level[level];
for_each_possible_cpu(cpu) {
- sdp = per_cpu_ptr(sp->sda, cpu);
+ sdp = per_cpu_ptr(ssp->sda, cpu);
spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
rcu_segcblist_init(&sdp->srcu_cblist);
sdp->srcu_cblist_invoking = false;
- sdp->srcu_gp_seq_needed = sp->srcu_gp_seq;
- sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq;
+ sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
+ sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
sdp->mynode = &snp_first[cpu / levelspread[level]];
for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
if (snp->grplo < 0)
@@ -157,7 +157,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
}
sdp->cpu = cpu;
INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks);
- sdp->sp = sp;
+ sdp->ssp = ssp;
sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
if (is_static)
continue;
@@ -176,35 +176,35 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
* parameter is passed through to init_srcu_struct_nodes(), and
* also tells us that ->sda has already been wired up to srcu_data.
*/
-static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static)
+static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
{
- mutex_init(&sp->srcu_cb_mutex);
- mutex_init(&sp->srcu_gp_mutex);
- sp->srcu_idx = 0;
- sp->srcu_gp_seq = 0;
- sp->srcu_barrier_seq = 0;
- mutex_init(&sp->srcu_barrier_mutex);
- atomic_set(&sp->srcu_barrier_cpu_cnt, 0);
- INIT_DELAYED_WORK(&sp->work, process_srcu);
+ mutex_init(&ssp->srcu_cb_mutex);
+ mutex_init(&ssp->srcu_gp_mutex);
+ ssp->srcu_idx = 0;
+ ssp->srcu_gp_seq = 0;
+ ssp->srcu_barrier_seq = 0;
+ mutex_init(&ssp->srcu_barrier_mutex);
+ atomic_set(&ssp->srcu_barrier_cpu_cnt, 0);
+ INIT_DELAYED_WORK(&ssp->work, process_srcu);
if (!is_static)
- sp->sda = alloc_percpu(struct srcu_data);
- init_srcu_struct_nodes(sp, is_static);
- sp->srcu_gp_seq_needed_exp = 0;
- sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
- smp_store_release(&sp->srcu_gp_seq_needed, 0); /* Init done. */
- return sp->sda ? 0 : -ENOMEM;
+ ssp->sda = alloc_percpu(struct srcu_data);
+ init_srcu_struct_nodes(ssp, is_static);
+ ssp->srcu_gp_seq_needed_exp = 0;
+ ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
+ smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */
+ return ssp->sda ? 0 : -ENOMEM;
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-int __init_srcu_struct(struct srcu_struct *sp, const char *name,
+int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
struct lock_class_key *key)
{
/* Don't re-initialize a lock while it is held. */
- debug_check_no_locks_freed((void *)sp, sizeof(*sp));
- lockdep_init_map(&sp->dep_map, name, key, 0);
- spin_lock_init(&ACCESS_PRIVATE(sp, lock));
- return init_srcu_struct_fields(sp, false);
+ debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
+ lockdep_init_map(&ssp->dep_map, name, key, 0);
+ spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
+ return init_srcu_struct_fields(ssp, false);
}
EXPORT_SYMBOL_GPL(__init_srcu_struct);
@@ -212,16 +212,16 @@ EXPORT_SYMBOL_GPL(__init_srcu_struct);
/**
* init_srcu_struct - initialize a sleep-RCU structure
- * @sp: structure to initialize.
+ * @ssp: structure to initialize.
*
* Must invoke this on a given srcu_struct before passing that srcu_struct
* to any other function. Each srcu_struct represents a separate domain
* of SRCU protection.
*/
-int init_srcu_struct(struct srcu_struct *sp)
+int init_srcu_struct(struct srcu_struct *ssp)
{
- spin_lock_init(&ACCESS_PRIVATE(sp, lock));
- return init_srcu_struct_fields(sp, false);
+ spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
+ return init_srcu_struct_fields(ssp, false);
}
EXPORT_SYMBOL_GPL(init_srcu_struct);
@@ -231,37 +231,37 @@ EXPORT_SYMBOL_GPL(init_srcu_struct);
* First-use initialization of statically allocated srcu_struct
* structure. Wiring up the combining tree is more than can be
* done with compile-time initialization, so this check is added
- * to each update-side SRCU primitive. Use sp->lock, which -is-
+ * to each update-side SRCU primitive. Use ssp->lock, which -is-
* compile-time initialized, to resolve races involving multiple
* CPUs trying to garner first-use privileges.
*/
-static void check_init_srcu_struct(struct srcu_struct *sp)
+static void check_init_srcu_struct(struct srcu_struct *ssp)
{
unsigned long flags;
/* The smp_load_acquire() pairs with the smp_store_release(). */
- if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/
+ if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/
return; /* Already initialized. */
- spin_lock_irqsave_rcu_node(sp, flags);
- if (!rcu_seq_state(sp->srcu_gp_seq_needed)) {
- spin_unlock_irqrestore_rcu_node(sp, flags);
+ spin_lock_irqsave_rcu_node(ssp, flags);
+ if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) {
+ spin_unlock_irqrestore_rcu_node(ssp, flags);
return;
}
- init_srcu_struct_fields(sp, true);
- spin_unlock_irqrestore_rcu_node(sp, flags);
+ init_srcu_struct_fields(ssp, true);
+ spin_unlock_irqrestore_rcu_node(ssp, flags);
}
/*
* Returns approximate total of the readers' ->srcu_lock_count[] values
* for the rank of per-CPU counters specified by idx.
*/
-static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx)
+static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
{
int cpu;
unsigned long sum = 0;
for_each_possible_cpu(cpu) {
- struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
+ struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
}
@@ -272,13 +272,13 @@ static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx)
* Returns approximate total of the readers' ->srcu_unlock_count[] values
* for the rank of per-CPU counters specified by idx.
*/
-static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx)
+static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
{
int cpu;
unsigned long sum = 0;
for_each_possible_cpu(cpu) {
- struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
+ struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
}
@@ -289,11 +289,11 @@ static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx)
* Return true if the number of pre-existing readers is determined to
* be zero.
*/
-static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
+static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
{
unsigned long unlocks;
- unlocks = srcu_readers_unlock_idx(sp, idx);
+ unlocks = srcu_readers_unlock_idx(ssp, idx);
/*
* Make sure that a lock is always counted if the corresponding
@@ -329,25 +329,25 @@ static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
* of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient,
* especially on 64-bit systems.
*/
- return srcu_readers_lock_idx(sp, idx) == unlocks;
+ return srcu_readers_lock_idx(ssp, idx) == unlocks;
}
/**
* srcu_readers_active - returns true if there are readers. and false
* otherwise
- * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
+ * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
*
* Note that this is not an atomic primitive, and can therefore suffer
* severe errors when invoked on an active srcu_struct. That said, it
* can be useful as an error check at cleanup time.
*/
-static bool srcu_readers_active(struct srcu_struct *sp)
+static bool srcu_readers_active(struct srcu_struct *ssp)
{
int cpu;
unsigned long sum = 0;
for_each_possible_cpu(cpu) {
- struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
+ struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
sum += READ_ONCE(cpuc->srcu_lock_count[0]);
sum += READ_ONCE(cpuc->srcu_lock_count[1]);
@@ -363,44 +363,44 @@ static bool srcu_readers_active(struct srcu_struct *sp)
* Return grace-period delay, zero if there are expedited grace
* periods pending, SRCU_INTERVAL otherwise.
*/
-static unsigned long srcu_get_delay(struct srcu_struct *sp)
+static unsigned long srcu_get_delay(struct srcu_struct *ssp)
{
- if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq),
- READ_ONCE(sp->srcu_gp_seq_needed_exp)))
+ if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq),
+ READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
return 0;
return SRCU_INTERVAL;
}
/* Helper for cleanup_srcu_struct() and cleanup_srcu_struct_quiesced(). */
-void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced)
+void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced)
{
int cpu;
- if (WARN_ON(!srcu_get_delay(sp)))
+ if (WARN_ON(!srcu_get_delay(ssp)))
return; /* Just leak it! */
- if (WARN_ON(srcu_readers_active(sp)))
+ if (WARN_ON(srcu_readers_active(ssp)))
return; /* Just leak it! */
if (quiesced) {
- if (WARN_ON(delayed_work_pending(&sp->work)))
+ if (WARN_ON(delayed_work_pending(&ssp->work)))
return; /* Just leak it! */
} else {
- flush_delayed_work(&sp->work);
+ flush_delayed_work(&ssp->work);
}
for_each_possible_cpu(cpu)
if (quiesced) {
- if (WARN_ON(delayed_work_pending(&per_cpu_ptr(sp->sda, cpu)->work)))
+ if (WARN_ON(delayed_work_pending(&per_cpu_ptr(ssp->sda, cpu)->work)))
return; /* Just leak it! */
} else {
- flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work);
+ flush_delayed_work(&per_cpu_ptr(ssp->sda, cpu)->work);
}
- if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
- WARN_ON(srcu_readers_active(sp))) {
+ if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
+ WARN_ON(srcu_readers_active(ssp))) {
pr_info("%s: Active srcu_struct %p state: %d\n",
- __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
+ __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)));
return; /* Caller forgot to stop doing call_srcu()? */
}
- free_percpu(sp->sda);
- sp->sda = NULL;
+ free_percpu(ssp->sda);
+ ssp->sda = NULL;
}
EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
@@ -409,12 +409,12 @@ EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
* srcu_struct.
* Returns an index that must be passed to the matching srcu_read_unlock().
*/
-int __srcu_read_lock(struct srcu_struct *sp)
+int __srcu_read_lock(struct srcu_struct *ssp)
{
int idx;
- idx = READ_ONCE(sp->srcu_idx) & 0x1;
- this_cpu_inc(sp->sda->srcu_lock_count[idx]);
+ idx = READ_ONCE(ssp->srcu_idx) & 0x1;
+ this_cpu_inc(ssp->sda->srcu_lock_count[idx]);
smp_mb(); /* B */ /* Avoid leaking the critical section. */
return idx;
}
@@ -425,10 +425,10 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);
* element of the srcu_struct. Note that this may well be a different
* CPU than that which was incremented by the corresponding srcu_read_lock().
*/
-void __srcu_read_unlock(struct srcu_struct *sp, int idx)
+void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
{
smp_mb(); /* C */ /* Avoid leaking the critical section. */
- this_cpu_inc(sp->sda->srcu_unlock_count[idx]);
+ this_cpu_inc(ssp->sda->srcu_unlock_count[idx]);
}
EXPORT_SYMBOL_GPL(__srcu_read_unlock);
@@ -444,20 +444,22 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock);
/*
* Start an SRCU grace period.
*/
-static void srcu_gp_start(struct srcu_struct *sp)
+static void srcu_gp_start(struct srcu_struct *ssp)
{
- struct srcu_data *sdp = this_cpu_ptr(sp->sda);
+ struct srcu_data *sdp = this_cpu_ptr(ssp->sda);
int state;
- lockdep_assert_held(&ACCESS_PRIVATE(sp, lock));
- WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
+ lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
+ WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
+ spin_lock_rcu_node(sdp); /* Interrupts already disabled. */
rcu_segcblist_advance(&sdp->srcu_cblist,
- rcu_seq_current(&sp->srcu_gp_seq));
+ rcu_seq_current(&ssp->srcu_gp_seq));
(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
- rcu_seq_snap(&sp->srcu_gp_seq));
+ rcu_seq_snap(&ssp->srcu_gp_seq));
+ spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */
smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
- rcu_seq_start(&sp->srcu_gp_seq);
- state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
+ rcu_seq_start(&ssp->srcu_gp_seq);
+ state = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
}
@@ -511,7 +513,7 @@ static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
* just-completed grace period, the one corresponding to idx. If possible,
* schedule this invocation on the corresponding CPUs.
*/
-static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp,
+static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
unsigned long mask, unsigned long delay)
{
int cpu;
@@ -519,7 +521,7 @@ static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp,
for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
if (!(mask & (1 << (cpu - snp->grplo))))
continue;
- srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay);
+ srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
}
}
@@ -532,7 +534,7 @@ static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp,
* are initiating callback invocation. This allows the ->srcu_have_cbs[]
* array to have a finite number of elements.
*/
-static void srcu_gp_end(struct srcu_struct *sp)
+static void srcu_gp_end(struct srcu_struct *ssp)
{
unsigned long cbdelay;
bool cbs;
@@ -546,28 +548,28 @@ static void srcu_gp_end(struct srcu_struct *sp)
struct srcu_node *snp;
/* Prevent more than one additional grace period. */
- mutex_lock(&sp->srcu_cb_mutex);
+ mutex_lock(&ssp->srcu_cb_mutex);
/* End the current grace period. */
- spin_lock_irq_rcu_node(sp);
- idx = rcu_seq_state(sp->srcu_gp_seq);
+ spin_lock_irq_rcu_node(ssp);
+ idx = rcu_seq_state(ssp->srcu_gp_seq);
WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
- cbdelay = srcu_get_delay(sp);
- sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
- rcu_seq_end(&sp->srcu_gp_seq);
- gpseq = rcu_seq_current(&sp->srcu_gp_seq);
- if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
- sp->srcu_gp_seq_needed_exp = gpseq;
- spin_unlock_irq_rcu_node(sp);
- mutex_unlock(&sp->srcu_gp_mutex);
+ cbdelay = srcu_get_delay(ssp);
+ ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
+ rcu_seq_end(&ssp->srcu_gp_seq);
+ gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
+ if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq))
+ ssp->srcu_gp_seq_needed_exp = gpseq;
+ spin_unlock_irq_rcu_node(ssp);
+ mutex_unlock(&ssp->srcu_gp_mutex);
/* A new grace period can start at this point. But only one. */
/* Initiate callback invocation as needed. */
idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
- srcu_for_each_node_breadth_first(sp, snp) {
+ srcu_for_each_node_breadth_first(ssp, snp) {
spin_lock_irq_rcu_node(snp);
cbs = false;
- last_lvl = snp >= sp->level[rcu_num_lvls - 1];
+ last_lvl = snp >= ssp->level[rcu_num_lvls - 1];
if (last_lvl)
cbs = snp->srcu_have_cbs[idx] == gpseq;
snp->srcu_have_cbs[idx] = gpseq;
@@ -578,12 +580,12 @@ static void srcu_gp_end(struct srcu_struct *sp)
snp->srcu_data_have_cbs[idx] = 0;
spin_unlock_irq_rcu_node(snp);
if (cbs)
- srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);
+ srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
/* Occasionally prevent srcu_data counter wrap. */
if (!(gpseq & counter_wrap_check) && last_lvl)
for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
- sdp = per_cpu_ptr(sp->sda, cpu);
+ sdp = per_cpu_ptr(ssp->sda, cpu);
spin_lock_irqsave_rcu_node(sdp, flags);
if (ULONG_CMP_GE(gpseq,
sdp->srcu_gp_seq_needed + 100))
@@ -596,18 +598,18 @@ static void srcu_gp_end(struct srcu_struct *sp)
}
/* Callback initiation done, allow grace periods after next. */
- mutex_unlock(&sp->srcu_cb_mutex);
+ mutex_unlock(&ssp->srcu_cb_mutex);
/* Start a new grace period if needed. */
- spin_lock_irq_rcu_node(sp);
- gpseq = rcu_seq_current(&sp->srcu_gp_seq);
+ spin_lock_irq_rcu_node(ssp);
+ gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
if (!rcu_seq_state(gpseq) &&
- ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) {
- srcu_gp_start(sp);
- spin_unlock_irq_rcu_node(sp);
- srcu_reschedule(sp, 0);
+ ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) {
+ srcu_gp_start(ssp);
+ spin_unlock_irq_rcu_node(ssp);
+ srcu_reschedule(ssp, 0);
} else {
- spin_unlock_irq_rcu_node(sp);
+ spin_unlock_irq_rcu_node(ssp);
}
}
@@ -618,13 +620,13 @@ static void srcu_gp_end(struct srcu_struct *sp)
* but without expediting. To start a completely new grace period,
* whether expedited or not, use srcu_funnel_gp_start() instead.
*/
-static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
+static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
unsigned long s)
{
unsigned long flags;
for (; snp != NULL; snp = snp->srcu_parent) {
- if (rcu_seq_done(&sp->srcu_gp_seq, s) ||
+ if (rcu_seq_done(&ssp->srcu_gp_seq, s) ||
ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
return;
spin_lock_irqsave_rcu_node(snp, flags);
@@ -635,10 +637,10 @@ static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
spin_unlock_irqrestore_rcu_node(snp, flags);
}
- spin_lock_irqsave_rcu_node(sp, flags);
- if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
- sp->srcu_gp_seq_needed_exp = s;
- spin_unlock_irqrestore_rcu_node(sp, flags);
+ spin_lock_irqsave_rcu_node(ssp, flags);
+ if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
+ ssp->srcu_gp_seq_needed_exp = s;
+ spin_unlock_irqrestore_rcu_node(ssp, flags);
}
/*
@@ -651,7 +653,7 @@ static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
* Note that this function also does the work of srcu_funnel_exp_start(),
* in some cases by directly invoking it.
*/
-static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
+static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
unsigned long s, bool do_norm)
{
unsigned long flags;
@@ -661,7 +663,7 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
/* Each pass through the loop does one level of the srcu_node tree. */
for (; snp != NULL; snp = snp->srcu_parent) {
- if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode)
+ if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode)
return; /* GP already done and CBs recorded. */
spin_lock_irqsave_rcu_node(snp, flags);
if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
@@ -676,7 +678,7 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
return;
}
if (!do_norm)
- srcu_funnel_exp_start(sp, snp, s);
+ srcu_funnel_exp_start(ssp, snp, s);
return;
}
snp->srcu_have_cbs[idx] = s;
@@ -688,29 +690,29 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
}
/* Top of tree, must ensure the grace period will be started. */
- spin_lock_irqsave_rcu_node(sp, flags);
- if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) {
+ spin_lock_irqsave_rcu_node(ssp, flags);
+ if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) {
/*
* Record need for grace period s. Pair with load
* acquire setting up for initialization.
*/
- smp_store_release(&sp->srcu_gp_seq_needed, s); /*^^^*/
+ smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/
}
- if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
- sp->srcu_gp_seq_needed_exp = s;
+ if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
+ ssp->srcu_gp_seq_needed_exp = s;
/* If grace period not already done and none in progress, start it. */
- if (!rcu_seq_done(&sp->srcu_gp_seq, s) &&
- rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) {
- WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
- srcu_gp_start(sp);
+ if (!rcu_seq_done(&ssp->srcu_gp_seq, s) &&
+ rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) {
+ WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
+ srcu_gp_start(ssp);
if (likely(srcu_init_done))
- queue_delayed_work(rcu_gp_wq, &sp->work,
- srcu_get_delay(sp));
- else if (list_empty(&sp->work.work.entry))
- list_add(&sp->work.work.entry, &srcu_boot_list);
+ queue_delayed_work(rcu_gp_wq, &ssp->work,
+ srcu_get_delay(ssp));
+ else if (list_empty(&ssp->work.work.entry))
+ list_add(&ssp->work.work.entry, &srcu_boot_list);
}
- spin_unlock_irqrestore_rcu_node(sp, flags);
+ spin_unlock_irqrestore_rcu_node(ssp, flags);
}
/*
@@ -718,12 +720,12 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
* loop an additional time if there is an expedited grace period pending.
* The caller must ensure that ->srcu_idx is not changed while checking.
*/
-static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
+static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
{
for (;;) {
- if (srcu_readers_active_idx_check(sp, idx))
+ if (srcu_readers_active_idx_check(ssp, idx))
return true;
- if (--trycount + !srcu_get_delay(sp) <= 0)
+ if (--trycount + !srcu_get_delay(ssp) <= 0)
return false;
udelay(SRCU_RETRY_CHECK_DELAY);
}
@@ -734,7 +736,7 @@ static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
* use the other rank of the ->srcu_(un)lock_count[] arrays. This allows
* us to wait for pre-existing readers in a starvation-free manner.
*/
-static void srcu_flip(struct srcu_struct *sp)
+static void srcu_flip(struct srcu_struct *ssp)
{
/*
* Ensure that if this updater saw a given reader's increment
@@ -746,7 +748,7 @@ static void srcu_flip(struct srcu_struct *sp)
*/
smp_mb(); /* E */ /* Pairs with B and C. */
- WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1);
+ WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
/*
* Ensure that if the updater misses an __srcu_read_unlock()
@@ -779,7 +781,7 @@ static void srcu_flip(struct srcu_struct *sp)
* negligible when amoritized over that time period, and the extra latency
* of a needlessly non-expedited grace period is similarly negligible.
*/
-static bool srcu_might_be_idle(struct srcu_struct *sp)
+static bool srcu_might_be_idle(struct srcu_struct *ssp)
{
unsigned long curseq;
unsigned long flags;
@@ -788,7 +790,7 @@ static bool srcu_might_be_idle(struct srcu_struct *sp)
/* If the local srcu_data structure has callbacks, not idle. */
local_irq_save(flags);
- sdp = this_cpu_ptr(sp->sda);
+ sdp = this_cpu_ptr(ssp->sda);
if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
local_irq_restore(flags);
return false; /* Callbacks already present, so not idle. */
@@ -804,17 +806,17 @@ static bool srcu_might_be_idle(struct srcu_struct *sp)
/* First, see if enough time has passed since the last GP. */
t = ktime_get_mono_fast_ns();
if (exp_holdoff == 0 ||
- time_in_range_open(t, sp->srcu_last_gp_end,
- sp->srcu_last_gp_end + exp_holdoff))
+ time_in_range_open(t, ssp->srcu_last_gp_end,
+ ssp->srcu_last_gp_end + exp_holdoff))
return false; /* Too soon after last GP. */
/* Next, check for probable idleness. */
- curseq = rcu_seq_current(&sp->srcu_gp_seq);
+ curseq = rcu_seq_current(&ssp->srcu_gp_seq);
smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
- if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed)))
+ if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed)))
return false; /* Grace period in progress, so not idle. */
smp_mb(); /* Order ->srcu_gp_seq with prior access. */
- if (curseq != rcu_seq_current(&sp->srcu_gp_seq))
+ if (curseq != rcu_seq_current(&ssp->srcu_gp_seq))
return false; /* GP # changed, so not idle. */
return true; /* With reasonable probability, idle! */
}
@@ -854,16 +856,17 @@ static void srcu_leak_callback(struct rcu_head *rhp)
* srcu_read_lock(), and srcu_read_unlock() that are all passed the same
* srcu_struct structure.
*/
-void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
+void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
rcu_callback_t func, bool do_norm)
{
unsigned long flags;
+ int idx;
bool needexp = false;
bool needgp = false;
unsigned long s;
struct srcu_data *sdp;
- check_init_srcu_struct(sp);
+ check_init_srcu_struct(ssp);
if (debug_rcu_head_queue(rhp)) {
/* Probable double call_srcu(), so leak the callback. */
WRITE_ONCE(rhp->func, srcu_leak_callback);
@@ -871,13 +874,14 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
return;
}
rhp->func = func;
+ idx = srcu_read_lock(ssp);
local_irq_save(flags);
- sdp = this_cpu_ptr(sp->sda);
+ sdp = this_cpu_ptr(ssp->sda);
spin_lock_rcu_node(sdp);
rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
rcu_segcblist_advance(&sdp->srcu_cblist,
- rcu_seq_current(&sp->srcu_gp_seq));
- s = rcu_seq_snap(&sp->srcu_gp_seq);
+ rcu_seq_current(&ssp->srcu_gp_seq));
+ s = rcu_seq_snap(&ssp->srcu_gp_seq);
(void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
sdp->srcu_gp_seq_needed = s;
@@ -889,14 +893,15 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
}
spin_unlock_irqrestore_rcu_node(sdp, flags);
if (needgp)
- srcu_funnel_gp_start(sp, sdp, s, do_norm);
+ srcu_funnel_gp_start(ssp, sdp, s, do_norm);
else if (needexp)
- srcu_funnel_exp_start(sp, sdp->mynode, s);
+ srcu_funnel_exp_start(ssp, sdp->mynode, s);
+ srcu_read_unlock(ssp, idx);
}
/**
* call_srcu() - Queue a callback for invocation after an SRCU grace period
- * @sp: srcu_struct in queue the callback
+ * @ssp: srcu_struct in queue the callback
* @rhp: structure to be used for queueing the SRCU callback.
* @func: function to be invoked after the SRCU grace period
*
@@ -911,21 +916,21 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
* The callback will be invoked from process context, but must nevertheless
* be fast and must not block.
*/
-void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
+void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
rcu_callback_t func)
{
- __call_srcu(sp, rhp, func, true);
+ __call_srcu(ssp, rhp, func, true);
}
EXPORT_SYMBOL_GPL(call_srcu);
/*
* Helper function for synchronize_srcu() and synchronize_srcu_expedited().
*/
-static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
+static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
{
struct rcu_synchronize rcu;
- RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) ||
+ RCU_LOCKDEP_WARN(lock_is_held(&ssp->dep_map) ||
lock_is_held(&rcu_bh_lock_map) ||
lock_is_held(&rcu_lock_map) ||
lock_is_held(&rcu_sched_lock_map),
@@ -934,10 +939,10 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
return;
might_sleep();
- check_init_srcu_struct(sp);
+ check_init_srcu_struct(ssp);
init_completion(&rcu.completion);
init_rcu_head_on_stack(&rcu.head);
- __call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm);
+ __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
wait_for_completion(&rcu.completion);
destroy_rcu_head_on_stack(&rcu.head);
@@ -953,7 +958,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
/**
* synchronize_srcu_expedited - Brute-force SRCU grace period
- * @sp: srcu_struct with which to synchronize.
+ * @ssp: srcu_struct with which to synchronize.
*
* Wait for an SRCU grace period to elapse, but be more aggressive about
* spinning rather than blocking when waiting.
@@ -961,15 +966,15 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
* Note that synchronize_srcu_expedited() has the same deadlock and
* memory-ordering properties as does synchronize_srcu().
*/
-void synchronize_srcu_expedited(struct srcu_struct *sp)
+void synchronize_srcu_expedited(struct srcu_struct *ssp)
{
- __synchronize_srcu(sp, rcu_gp_is_normal());
+ __synchronize_srcu(ssp, rcu_gp_is_normal());
}
EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
/**
* synchronize_srcu - wait for prior SRCU read-side critical-section completion
- * @sp: srcu_struct with which to synchronize.
+ * @ssp: srcu_struct with which to synchronize.
*
* Wait for the count to drain to zero of both indexes. To avoid the
* possible starvation of synchronize_srcu(), it waits for the count of
@@ -1011,12 +1016,12 @@ EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
* SRCU must also provide it. Note that detecting idleness is heuristic
* and subject to both false positives and negatives.
*/
-void synchronize_srcu(struct srcu_struct *sp)
+void synchronize_srcu(struct srcu_struct *ssp)
{
- if (srcu_might_be_idle(sp) || rcu_gp_is_expedited())
- synchronize_srcu_expedited(sp);
+ if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
+ synchronize_srcu_expedited(ssp);
else
- __synchronize_srcu(sp, true);
+ __synchronize_srcu(ssp, true);
}
EXPORT_SYMBOL_GPL(synchronize_srcu);
@@ -1026,36 +1031,36 @@ EXPORT_SYMBOL_GPL(synchronize_srcu);
static void srcu_barrier_cb(struct rcu_head *rhp)
{
struct srcu_data *sdp;
- struct srcu_struct *sp;
+ struct srcu_struct *ssp;
sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
- sp = sdp->sp;
- if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
- complete(&sp->srcu_barrier_completion);
+ ssp = sdp->ssp;
+ if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
+ complete(&ssp->srcu_barrier_completion);
}
/**
* srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
- * @sp: srcu_struct on which to wait for in-flight callbacks.
+ * @ssp: srcu_struct on which to wait for in-flight callbacks.
*/
-void srcu_barrier(struct srcu_struct *sp)
+void srcu_barrier(struct srcu_struct *ssp)
{
int cpu;
struct srcu_data *sdp;
- unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq);
+ unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
- check_init_srcu_struct(sp);
- mutex_lock(&sp->srcu_barrier_mutex);
- if (rcu_seq_done(&sp->srcu_barrier_seq, s)) {
+ check_init_srcu_struct(ssp);
+ mutex_lock(&ssp->srcu_barrier_mutex);
+ if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) {
smp_mb(); /* Force ordering following return. */
- mutex_unlock(&sp->srcu_barrier_mutex);
+ mutex_unlock(&ssp->srcu_barrier_mutex);
return; /* Someone else did our work for us. */
}
- rcu_seq_start(&sp->srcu_barrier_seq);
- init_completion(&sp->srcu_barrier_completion);
+ rcu_seq_start(&ssp->srcu_barrier_seq);
+ init_completion(&ssp->srcu_barrier_completion);
/* Initial count prevents reaching zero until all CBs are posted. */
- atomic_set(&sp->srcu_barrier_cpu_cnt, 1);
+ atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
/*
* Each pass through this loop enqueues a callback, but only
@@ -1066,39 +1071,39 @@ void srcu_barrier(struct srcu_struct *sp)
* grace period as the last callback already in the queue.
*/
for_each_possible_cpu(cpu) {
- sdp = per_cpu_ptr(sp->sda, cpu);
+ sdp = per_cpu_ptr(ssp->sda, cpu);
spin_lock_irq_rcu_node(sdp);
- atomic_inc(&sp->srcu_barrier_cpu_cnt);
+ atomic_inc(&ssp->srcu_barrier_cpu_cnt);
sdp->srcu_barrier_head.func = srcu_barrier_cb;
debug_rcu_head_queue(&sdp->srcu_barrier_head);
if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
&sdp->srcu_barrier_head, 0)) {
debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
- atomic_dec(&sp->srcu_barrier_cpu_cnt);
+ atomic_dec(&ssp->srcu_barrier_cpu_cnt);
}
spin_unlock_irq_rcu_node(sdp);
}
/* Remove the initial count, at which point reaching zero can happen. */
- if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
- complete(&sp->srcu_barrier_completion);
- wait_for_completion(&sp->srcu_barrier_completion);
+ if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
+ complete(&ssp->srcu_barrier_completion);
+ wait_for_completion(&ssp->srcu_barrier_completion);
- rcu_seq_end(&sp->srcu_barrier_seq);
- mutex_unlock(&sp->srcu_barrier_mutex);
+ rcu_seq_end(&ssp->srcu_barrier_seq);
+ mutex_unlock(&ssp->srcu_barrier_mutex);
}
EXPORT_SYMBOL_GPL(srcu_barrier);
/**
* srcu_batches_completed - return batches completed.
- * @sp: srcu_struct on which to report batch completion.
+ * @ssp: srcu_struct on which to report batch completion.
*
* Report the number of batches, correlated with, but not necessarily
* precisely the same as, the number of grace periods that have elapsed.
*/
-unsigned long srcu_batches_completed(struct srcu_struct *sp)
+unsigned long srcu_batches_completed(struct srcu_struct *ssp)
{
- return sp->srcu_idx;
+ return ssp->srcu_idx;
}
EXPORT_SYMBOL_GPL(srcu_batches_completed);
@@ -1107,11 +1112,11 @@ EXPORT_SYMBOL_GPL(srcu_batches_completed);
* to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
* completed in that state.
*/
-static void srcu_advance_state(struct srcu_struct *sp)
+static void srcu_advance_state(struct srcu_struct *ssp)
{
int idx;
- mutex_lock(&sp->srcu_gp_mutex);
+ mutex_lock(&ssp->srcu_gp_mutex);
/*
* Because readers might be delayed for an extended period after
@@ -1123,47 +1128,47 @@ static void srcu_advance_state(struct srcu_struct *sp)
* The load-acquire ensures that we see the accesses performed
* by the prior grace period.
*/
- idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */
+ idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */
if (idx == SRCU_STATE_IDLE) {
- spin_lock_irq_rcu_node(sp);
- if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
- WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq));
- spin_unlock_irq_rcu_node(sp);
- mutex_unlock(&sp->srcu_gp_mutex);
+ spin_lock_irq_rcu_node(ssp);
+ if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
+ WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq));
+ spin_unlock_irq_rcu_node(ssp);
+ mutex_unlock(&ssp->srcu_gp_mutex);
return;
}
- idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
+ idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
if (idx == SRCU_STATE_IDLE)
- srcu_gp_start(sp);
- spin_unlock_irq_rcu_node(sp);
+ srcu_gp_start(ssp);
+ spin_unlock_irq_rcu_node(ssp);
if (idx != SRCU_STATE_IDLE) {
- mutex_unlock(&sp->srcu_gp_mutex);
+ mutex_unlock(&ssp->srcu_gp_mutex);
return; /* Someone else started the grace period. */
}
}
- if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
- idx = 1 ^ (sp->srcu_idx & 1);
- if (!try_check_zero(sp, idx, 1)) {
- mutex_unlock(&sp->srcu_gp_mutex);
+ if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
+ idx = 1 ^ (ssp->srcu_idx & 1);
+ if (!try_check_zero(ssp, idx, 1)) {
+ mutex_unlock(&ssp->srcu_gp_mutex);
return; /* readers present, retry later. */
}
- srcu_flip(sp);
- rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2);
+ srcu_flip(ssp);
+ rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2);
}
- if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
+ if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
/*
* SRCU read-side critical sections are normally short,
* so check at least twice in quick succession after a flip.
*/
- idx = 1 ^ (sp->srcu_idx & 1);
- if (!try_check_zero(sp, idx, 2)) {
- mutex_unlock(&sp->srcu_gp_mutex);
+ idx = 1 ^ (ssp->srcu_idx & 1);
+ if (!try_check_zero(ssp, idx, 2)) {
+ mutex_unlock(&ssp->srcu_gp_mutex);
return; /* readers present, retry later. */
}
- srcu_gp_end(sp); /* Releases ->srcu_gp_mutex. */
+ srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */
}
}
@@ -1179,14 +1184,14 @@ static void srcu_invoke_callbacks(struct work_struct *work)
struct rcu_cblist ready_cbs;
struct rcu_head *rhp;
struct srcu_data *sdp;
- struct srcu_struct *sp;
+ struct srcu_struct *ssp;
sdp = container_of(work, struct srcu_data, work.work);
- sp = sdp->sp;
+ ssp = sdp->ssp;
rcu_cblist_init(&ready_cbs);
spin_lock_irq_rcu_node(sdp);
rcu_segcblist_advance(&sdp->srcu_cblist,
- rcu_seq_current(&sp->srcu_gp_seq));
+ rcu_seq_current(&ssp->srcu_gp_seq));
if (sdp->srcu_cblist_invoking ||
!rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
spin_unlock_irq_rcu_node(sdp);
@@ -1212,7 +1217,7 @@ static void srcu_invoke_callbacks(struct work_struct *work)
spin_lock_irq_rcu_node(sdp);
rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
- rcu_seq_snap(&sp->srcu_gp_seq));
+ rcu_seq_snap(&ssp->srcu_gp_seq));
sdp->srcu_cblist_invoking = false;
more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
spin_unlock_irq_rcu_node(sdp);
@@ -1224,24 +1229,24 @@ static void srcu_invoke_callbacks(struct work_struct *work)
* Finished one round of SRCU grace period. Start another if there are
* more SRCU callbacks queued, otherwise put SRCU into not-running state.
*/
-static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
+static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
{
bool pushgp = true;
- spin_lock_irq_rcu_node(sp);
- if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
- if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) {
+ spin_lock_irq_rcu_node(ssp);
+ if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
+ if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) {
/* All requests fulfilled, time to go idle. */
pushgp = false;
}
- } else if (!rcu_seq_state(sp->srcu_gp_seq)) {
+ } else if (!rcu_seq_state(ssp->srcu_gp_seq)) {
/* Outstanding request and no GP. Start one. */
- srcu_gp_start(sp);
+ srcu_gp_start(ssp);
}
- spin_unlock_irq_rcu_node(sp);
+ spin_unlock_irq_rcu_node(ssp);
if (pushgp)
- queue_delayed_work(rcu_gp_wq, &sp->work, delay);
+ queue_delayed_work(rcu_gp_wq, &ssp->work, delay);
}
/*
@@ -1249,41 +1254,41 @@ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
*/
static void process_srcu(struct work_struct *work)
{
- struct srcu_struct *sp;
+ struct srcu_struct *ssp;
- sp = container_of(work, struct srcu_struct, work.work);
+ ssp = container_of(work, struct srcu_struct, work.work);
- srcu_advance_state(sp);
- srcu_reschedule(sp, srcu_get_delay(sp));
+ srcu_advance_state(ssp);
+ srcu_reschedule(ssp, srcu_get_delay(ssp));
}
void srcutorture_get_gp_data(enum rcutorture_type test_type,
- struct srcu_struct *sp, int *flags,
+ struct srcu_struct *ssp, int *flags,
unsigned long *gp_seq)
{
if (test_type != SRCU_FLAVOR)
return;
*flags = 0;
- *gp_seq = rcu_seq_current(&sp->srcu_gp_seq);
+ *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq);
}
EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
-void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf)
+void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
{
int cpu;
int idx;
unsigned long s0 = 0, s1 = 0;
- idx = sp->srcu_idx & 0x1;
+ idx = ssp->srcu_idx & 0x1;
pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):",
- tt, tf, rcu_seq_current(&sp->srcu_gp_seq), idx);
+ tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx);
for_each_possible_cpu(cpu) {
unsigned long l0, l1;
unsigned long u0, u1;
long c0, c1;
struct srcu_data *sdp;
- sdp = per_cpu_ptr(sp->sda, cpu);
+ sdp = per_cpu_ptr(ssp->sda, cpu);
u0 = sdp->srcu_unlock_count[!idx];
u1 = sdp->srcu_unlock_count[idx];
@@ -1318,14 +1323,14 @@ early_initcall(srcu_bootup_announce);
void __init srcu_init(void)
{
- struct srcu_struct *sp;
+ struct srcu_struct *ssp;
srcu_init_done = true;
while (!list_empty(&srcu_boot_list)) {
- sp = list_first_entry(&srcu_boot_list, struct srcu_struct,
+ ssp = list_first_entry(&srcu_boot_list, struct srcu_struct,
work.work.entry);
- check_init_srcu_struct(sp);
- list_del_init(&sp->work.work.entry);
- queue_work(rcu_gp_wq, &sp->work.work);
+ check_init_srcu_struct(ssp);
+ list_del_init(&ssp->work.work.entry);
+ queue_work(rcu_gp_wq, &ssp->work.work);
}
}