diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index ef8d36f580fc..6b8bfd5f6217 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1726,15 +1726,40 @@ static void rcu_strict_gp_boundary(void *unused) invoke_rcu_core(); } +/* + * Atomically increase rnp->ofl_seq to an odd number, thus 'acquiring' + * the sequence update. + */ +static inline unsigned long acquire_ofl_seq(struct rcu_node *rnp) +{ + while (1) { + unsigned long seq = READ_ONCE(rnp->ofl_seq); + if (!(seq & 1) && cmpxchg(&rnp->ofl_seq, seq, seq+1) == seq) + return seq + 1; + + cpu_relax(); + } +} + +/* + * Complete the previously acquired sequence update by increasing + * rnp->ofl_seq to an even number to release the 'lock'. + */ +static inline void release_ofl_seq(struct rcu_node *rnp, unsigned long seq) +{ + BUG_ON(!(seq & 1)); + WARN_ON_ONCE(cmpxchg(&rnp->ofl_seq, seq, seq + 1) != seq); +} + /* * Initialize a new grace period. Return false if no grace period required. */ static noinline_for_stack bool rcu_gp_init(void) { - unsigned long firstseq; unsigned long flags; unsigned long oldmask; unsigned long mask; + unsigned long seq; struct rcu_data *rdp; struct rcu_node *rnp = rcu_get_root(); @@ -1778,10 +1803,7 @@ static noinline_for_stack bool rcu_gp_init(void) // Wait for CPU-hotplug operations that might have // started before this grace period did. smp_mb(); // Pair with barriers used when updating ->ofl_seq to odd values. - firstseq = READ_ONCE(rnp->ofl_seq); - if (firstseq & 0x1) - while (firstseq == READ_ONCE(rnp->ofl_seq)) - schedule_timeout_idle(1); // Can't wake unless RCU is watching. + seq = acquire_ofl_seq(rnp); smp_mb(); // Pair with barriers used when updating ->ofl_seq to even values. raw_spin_lock(&rcu_state.ofl_lock); raw_spin_lock_irq_rcu_node(rnp); @@ -1790,6 +1812,7 @@ static noinline_for_stack bool rcu_gp_init(void) /* Nothing to do on this leaf rcu_node structure. */ raw_spin_unlock_irq_rcu_node(rnp); raw_spin_unlock(&rcu_state.ofl_lock); + release_ofl_seq(rnp, seq); continue; } @@ -1826,6 +1849,7 @@ static noinline_for_stack bool rcu_gp_init(void) raw_spin_unlock_irq_rcu_node(rnp); raw_spin_unlock(&rcu_state.ofl_lock); + release_ofl_seq(rnp, seq); } rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */ @@ -4235,10 +4259,12 @@ void rcu_cpu_starting(unsigned int cpu) { unsigned long flags; unsigned long mask; + unsigned long seq; struct rcu_data *rdp; struct rcu_node *rnp; bool newcpu; + rdp = per_cpu_ptr(&rcu_data, cpu); if (rdp->cpu_started) return; @@ -4246,8 +4272,7 @@ void rcu_cpu_starting(unsigned int cpu) rnp = rdp->mynode; mask = rdp->grpmask; - WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1); - WARN_ON_ONCE(!(rnp->ofl_seq & 0x1)); + seq = acquire_ofl_seq(rnp); rcu_dynticks_eqs_online(); smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier(). raw_spin_lock_irqsave_rcu_node(rnp, flags); @@ -4270,8 +4295,7 @@ void rcu_cpu_starting(unsigned int cpu) raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier(). - WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1); - WARN_ON_ONCE(rnp->ofl_seq & 0x1); + release_ofl_seq(rnp, seq); smp_mb(); /* Ensure RCU read-side usage follows above initialization. */ } @@ -4287,6 +4311,7 @@ void rcu_report_dead(unsigned int cpu) { unsigned long flags; unsigned long mask; + unsigned long seq; struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ @@ -4299,8 +4324,7 @@ void rcu_report_dead(unsigned int cpu) /* Remove outgoing CPU from mask in the leaf rcu_node structure. */ mask = rdp->grpmask; - WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1); - WARN_ON_ONCE(!(rnp->ofl_seq & 0x1)); + seq = acquire_ofl_seq(rnp); smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier(). raw_spin_lock(&rcu_state.ofl_lock); raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ @@ -4315,8 +4339,7 @@ void rcu_report_dead(unsigned int cpu) raw_spin_unlock_irqrestore_rcu_node(rnp, flags); raw_spin_unlock(&rcu_state.ofl_lock); smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier(). - WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1); - WARN_ON_ONCE(rnp->ofl_seq & 0x1); + release_ofl_seq(rnp, seq); rdp->cpu_started = false; }