sparc/sysrq: fix inconstistent help message of sysrq key
[cascardo/linux.git] / kernel / rcutree.c
index 0d53295..d853430 100644 (file)
@@ -224,6 +224,8 @@ static ulong jiffies_till_next_fqs = RCU_JIFFIES_TILL_FORCE_QS;
 module_param(jiffies_till_first_fqs, ulong, 0644);
 module_param(jiffies_till_next_fqs, ulong, 0644);
 
+static void rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
+                                 struct rcu_data *rdp);
 static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *));
 static void force_quiescent_state(struct rcu_state *rsp);
 static int rcu_pending(int cpu);
@@ -1074,6 +1076,120 @@ static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
        return rnp->completed + 2;
 }
 
+/*
+ * Trace-event helper function for rcu_start_future_gp() and
+ * rcu_nocb_wait_gp().
+ */
+static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
+                               unsigned long c, char *s)
+{
+       trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
+                                     rnp->completed, c, rnp->level,
+                                     rnp->grplo, rnp->grphi, s);
+}
+
+/*
+ * Start some future grace period, as needed to handle newly arrived
+ * callbacks.  The required future grace periods are recorded in each
+ * rcu_node structure's ->need_future_gp field.
+ *
+ * The caller must hold the specified rcu_node structure's ->lock.
+ */
+static unsigned long __maybe_unused
+rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp)
+{
+       unsigned long c;
+       int i;
+       struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
+
+       /*
+        * Pick up grace-period number for new callbacks.  If this
+        * grace period is already marked as needed, return to the caller.
+        */
+       c = rcu_cbs_completed(rdp->rsp, rnp);
+       trace_rcu_future_gp(rnp, rdp, c, "Startleaf");
+       if (rnp->need_future_gp[c & 0x1]) {
+               trace_rcu_future_gp(rnp, rdp, c, "Prestartleaf");
+               return c;
+       }
+
+       /*
+        * If either this rcu_node structure or the root rcu_node structure
+        * believe that a grace period is in progress, then we must wait
+        * for the one following, which is in "c".  Because our request
+        * will be noticed at the end of the current grace period, we don't
+        * need to explicitly start one.
+        */
+       if (rnp->gpnum != rnp->completed ||
+           ACCESS_ONCE(rnp->gpnum) != ACCESS_ONCE(rnp->completed)) {
+               rnp->need_future_gp[c & 0x1]++;
+               trace_rcu_future_gp(rnp, rdp, c, "Startedleaf");
+               return c;
+       }
+
+       /*
+        * There might be no grace period in progress.  If we don't already
+        * hold it, acquire the root rcu_node structure's lock in order to
+        * start one (if needed).
+        */
+       if (rnp != rnp_root)
+               raw_spin_lock(&rnp_root->lock);
+
+       /*
+        * Get a new grace-period number.  If there really is no grace
+        * period in progress, it will be smaller than the one we obtained
+        * earlier.  Adjust callbacks as needed.  Note that even no-CBs
+        * CPUs have a ->nxtcompleted[] array, so no no-CBs checks needed.
+        */
+       c = rcu_cbs_completed(rdp->rsp, rnp_root);
+       for (i = RCU_DONE_TAIL; i < RCU_NEXT_TAIL; i++)
+               if (ULONG_CMP_LT(c, rdp->nxtcompleted[i]))
+                       rdp->nxtcompleted[i] = c;
+
+       /*
+        * If the needed for the required grace period is already
+        * recorded, trace and leave.
+        */
+       if (rnp_root->need_future_gp[c & 0x1]) {
+               trace_rcu_future_gp(rnp, rdp, c, "Prestartedroot");
+               goto unlock_out;
+       }
+
+       /* Record the need for the future grace period. */
+       rnp_root->need_future_gp[c & 0x1]++;
+
+       /* If a grace period is not already in progress, start one. */
+       if (rnp_root->gpnum != rnp_root->completed) {
+               trace_rcu_future_gp(rnp, rdp, c, "Startedleafroot");
+       } else {
+               trace_rcu_future_gp(rnp, rdp, c, "Startedroot");
+               rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp);
+       }
+unlock_out:
+       if (rnp != rnp_root)
+               raw_spin_unlock(&rnp_root->lock);
+       return c;
+}
+
+/*
+ * Clean up any old requests for the just-ended grace period.  Also return
+ * whether any additional grace periods have been requested.  Also invoke
+ * rcu_nocb_gp_cleanup() in order to wake up any no-callbacks kthreads
+ * waiting for this grace period to complete.
+ */
+static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
+{
+       int c = rnp->completed;
+       int needmore;
+       struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
+
+       rcu_nocb_gp_cleanup(rsp, rnp);
+       rnp->need_future_gp[c & 0x1] = 0;
+       needmore = rnp->need_future_gp[(c + 1) & 0x1];
+       trace_rcu_future_gp(rnp, rdp, c, needmore ? "CleanupMore" : "Cleanup");
+       return needmore;
+}
+
 /*
  * If there is room, assign a ->completed number to any callbacks on
  * this CPU that have not already been assigned.  Also accelerate any
@@ -1133,6 +1249,8 @@ static void rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
                rdp->nxttail[i] = rdp->nxttail[RCU_NEXT_TAIL];
                rdp->nxtcompleted[i] = c;
        }
+       /* Record any needed additional grace periods. */
+       rcu_start_future_gp(rnp, rdp);
 
        /* Trace depending on how much we were able to accelerate. */
        if (!*rdp->nxttail[RCU_WAIT_TAIL])
@@ -1312,9 +1430,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
                rdp = this_cpu_ptr(rsp->rda);
                rcu_preempt_check_blocked_tasks(rnp);
                rnp->qsmask = rnp->qsmaskinit;
-               rnp->gpnum = rsp->gpnum;
+               ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
                WARN_ON_ONCE(rnp->completed != rsp->completed);
-               rnp->completed = rsp->completed;
+               ACCESS_ONCE(rnp->completed) = rsp->completed;
                if (rnp == rdp->mynode)
                        rcu_start_gp_per_cpu(rsp, rnp, rdp);
                rcu_preempt_boost_start_gp(rnp);
@@ -1323,7 +1441,8 @@ static int rcu_gp_init(struct rcu_state *rsp)
                                            rnp->grphi, rnp->qsmask);
                raw_spin_unlock_irq(&rnp->lock);
 #ifdef CONFIG_PROVE_RCU_DELAY
-               if ((random32() % (rcu_num_nodes * 8)) == 0)
+               if ((prandom_u32() % (rcu_num_nodes * 8)) == 0 &&
+                   system_state == SYSTEM_RUNNING)
                        schedule_timeout_uninterruptible(2);
 #endif /* #ifdef CONFIG_PROVE_RCU_DELAY */
                cond_resched();
@@ -1395,11 +1514,11 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
         */
        rcu_for_each_node_breadth_first(rsp, rnp) {
                raw_spin_lock_irq(&rnp->lock);
-               rnp->completed = rsp->gpnum;
+               ACCESS_ONCE(rnp->completed) = rsp->gpnum;
                rdp = this_cpu_ptr(rsp->rda);
                if (rnp == rdp->mynode)
                        __rcu_process_gp_end(rsp, rnp, rdp);
-               nocb += rcu_nocb_gp_cleanup(rsp, rnp);
+               nocb += rcu_future_gp_cleanup(rsp, rnp);
                raw_spin_unlock_irq(&rnp->lock);
                cond_resched();
        }
@@ -1494,20 +1613,9 @@ static int __noreturn rcu_gp_kthread(void *arg)
  * quiescent state.
  */
 static void
-rcu_start_gp(struct rcu_state *rsp)
+rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
+                     struct rcu_data *rdp)
 {
-       struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
-       struct rcu_node *rnp = rcu_get_root(rsp);
-
-       /*
-        * If there is no grace period in progress right now, any
-        * callbacks we have up to this point will be satisfied by the
-        * next grace period.  Also, advancing the callbacks reduces the
-        * probability of false positives from cpu_needs_another_gp()
-        * resulting in pointless grace periods.  So, advance callbacks!
-        */
-       rcu_advance_cbs(rsp, rnp, rdp);
-
        if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) {
                /*
                 * Either we have not yet spawned the grace-period
@@ -1519,13 +1627,35 @@ rcu_start_gp(struct rcu_state *rsp)
        }
        rsp->gp_flags = RCU_GP_FLAG_INIT;
 
-       /* Ensure that CPU is aware of completion of last grace period. */
-       __rcu_process_gp_end(rsp, rdp->mynode, rdp);
-
        /* Wake up rcu_gp_kthread() to start the grace period. */
        wake_up(&rsp->gp_wq);
 }
 
+/*
+ * Similar to rcu_start_gp_advanced(), but also advance the calling CPU's
+ * callbacks.  Note that rcu_start_gp_advanced() cannot do this because it
+ * is invoked indirectly from rcu_advance_cbs(), which would result in
+ * endless recursion -- or would do so if it wasn't for the self-deadlock
+ * that is encountered beforehand.
+ */
+static void
+rcu_start_gp(struct rcu_state *rsp)
+{
+       struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
+       struct rcu_node *rnp = rcu_get_root(rsp);
+
+       /*
+        * If there is no grace period in progress right now, any
+        * callbacks we have up to this point will be satisfied by the
+        * next grace period.  Also, advancing the callbacks reduces the
+        * probability of false positives from cpu_needs_another_gp()
+        * resulting in pointless grace periods.  So, advance callbacks
+        * then start the grace period!
+        */
+       rcu_advance_cbs(rsp, rnp, rdp);
+       rcu_start_gp_advanced(rsp, rnp, rdp);
+}
+
 /*
  * Report a full set of quiescent states to the specified rcu_state
  * data structure.  This involves cleaning up after the prior grace
@@ -2175,7 +2305,8 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
 
 static void invoke_rcu_core(void)
 {
-       raise_softirq(RCU_SOFTIRQ);
+       if (cpu_online(smp_processor_id()))
+               raise_softirq(RCU_SOFTIRQ);
 }
 
 /*
@@ -2939,11 +3070,6 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
                break;
        case CPU_DYING:
        case CPU_DYING_FROZEN:
-               /*
-                * The whole machine is "stopped" except this CPU, so we can
-                * touch any data without introducing corruption. We send the
-                * dying CPU's callbacks to an arbitrarily chosen online CPU.
-                */
                for_each_rcu_flavor(rsp)
                        rcu_cleanup_dying_cpu(rsp);
                break;
@@ -3179,7 +3305,7 @@ void __init rcu_init(void)
        rcu_init_one(&rcu_sched_state, &rcu_sched_data);
        rcu_init_one(&rcu_bh_state, &rcu_bh_data);
        __rcu_init_preempt();
-        open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
+       open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
 
        /*
         * We don't need protection against CPU-hotplug here because