static void print_other_cpu_stall(struct rcu_state *rsp) { int cpu; long delta; unsigned long flags; struct rcu_node *rnp = rcu_get_root(rsp); struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1]; struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES]; /* Only let one CPU complain about others per time interval. */ spin_lock_irqsave(&rnp->lock, flags); delta = jiffies - rsp->jiffies_stall; if (delta < RCU_STALL_RAT_DELAY || rsp->gpnum == rsp->completed) { spin_unlock_irqrestore(&rnp->lock, flags); return; } rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; spin_unlock_irqrestore(&rnp->lock, flags); /* OK, time to rat on our buddy... */ printk(KERN_ERR "INFO: RCU detected CPU stalls:"); for (; rnp_cur < rnp_end; rnp_cur++) { if (rnp_cur->qsmask == 0) continue; for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++) if (rnp_cur->qsmask & (1UL << cpu)) printk(" %d", rnp_cur->grplo + cpu); } printk(" (detected by %d, t=%ld jiffies)\n", smp_processor_id(), (long)(jiffies - rsp->gp_start)); force_quiescent_state(rsp, 0); /* Kick them all. */ }
static void print_other_cpu_stall(struct rcu_state *rsp) { int cpu; long delta; unsigned long flags; int ndetected = 0; struct rcu_node *rnp = rcu_get_root(rsp); raw_spin_lock_irqsave(&rnp->lock, flags); delta = jiffies - rsp->jiffies_stall; if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) { raw_spin_unlock_irqrestore(&rnp->lock, flags); return; } rsp->jiffies_stall = jiffies + 3 * jiffies_till_stall_check() + 3; raw_spin_unlock_irqrestore(&rnp->lock, flags); printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks:", rsp->name); print_cpu_stall_info_begin(); rcu_for_each_leaf_node(rsp, rnp) { raw_spin_lock_irqsave(&rnp->lock, flags); ndetected += rcu_print_task_stall(rnp); raw_spin_unlock_irqrestore(&rnp->lock, flags); if (rnp->qsmask == 0) continue; for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) if (rnp->qsmask & (1UL << cpu)) { print_cpu_stall_info(rsp, rnp->grplo + cpu); ndetected++; } }
static void print_other_cpu_stall(struct rcu_state *rsp) { int cpu; long delta; unsigned long flags; struct rcu_node *rnp = rcu_get_root(rsp); spin_lock_irqsave(&rnp->lock, flags); delta = jiffies - rsp->jiffies_stall; if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) { spin_unlock_irqrestore(&rnp->lock, flags); return; } rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; rcu_print_task_stall(rnp); spin_unlock_irqrestore(&rnp->lock, flags); printk(KERN_ERR "INFO: RCU detected CPU stalls:"); rcu_for_each_leaf_node(rsp, rnp) { rcu_print_task_stall(rnp); if (rnp->qsmask == 0) continue; for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) if (rnp->qsmask & (1UL << cpu)) printk(" %d", rnp->grplo + cpu); }
static void print_cpu_stall(struct rcu_state *rsp) { unsigned long flags; struct rcu_node *rnp = rcu_get_root(rsp); printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n", smp_processor_id(), jiffies - rsp->gp_start); dump_stack(); spin_lock_irqsave(&rnp->lock, flags); if ((long)(jiffies - rsp->jiffies_stall) >= 0) rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; spin_unlock_irqrestore(&rnp->lock, flags); set_need_resched(); /* kick ourselves to get things going. */ }
static void print_other_cpu_stall(struct rcu_state *rsp) { int cpu; long delta; unsigned long flags; struct rcu_node *rnp = rcu_get_root(rsp); /* Only let one CPU complain about others per time interval. */ raw_spin_lock_irqsave(&rnp->lock, flags); delta = jiffies - rsp->jiffies_stall; if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) { raw_spin_unlock_irqrestore(&rnp->lock, flags); return; } rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; /* * Now rat on any tasks that got kicked up to the root rcu_node * due to CPU offlining. */ rcu_print_task_stall(rnp); raw_spin_unlock_irqrestore(&rnp->lock, flags); /* OK, time to rat on our buddy... */ printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks: {", rsp->name); rcu_for_each_leaf_node(rsp, rnp) { raw_spin_lock_irqsave(&rnp->lock, flags); rcu_print_task_stall(rnp); raw_spin_unlock_irqrestore(&rnp->lock, flags); if (rnp->qsmask == 0) continue; for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) if (rnp->qsmask & (1UL << cpu)) printk(" %d", rnp->grplo + cpu); }
__releases(rcu_get_root(rsp)->lock) { struct rcu_data *rdp = rsp->rda[smp_processor_id()]; struct rcu_node *rnp = rcu_get_root(rsp); struct rcu_node *rnp_cur; struct rcu_node *rnp_end; if (!cpu_needs_another_gp(rsp, rdp)) { spin_unlock_irqrestore(&rnp->lock, flags); return; } /* Advance to a new grace period and initialize state. */ rsp->gpnum++; rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; record_gp_stall_check_time(rsp); dyntick_record_completed(rsp, rsp->completed - 1); note_new_gpnum(rsp, rdp); /* * Because we are first, we know that all our callbacks will * be covered by this upcoming grace period, even the ones * that were registered arbitrarily recently. */ rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; /* Special-case the common single-level case. */ if (NUM_RCU_NODES == 1) { rnp->qsmask = rnp->qsmaskinit; rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ spin_unlock_irqrestore(&rnp->lock, flags); return; } spin_unlock(&rnp->lock); /* leave irqs disabled. */ /* Exclude any concurrent CPU-hotplug operations. */ spin_lock(&rsp->onofflock); /* irqs already disabled. */ /* * Set the quiescent-state-needed bits in all the non-leaf RCU * nodes for all currently online CPUs. This operation relies * on the layout of the hierarchy within the rsp->node[] array. * Note that other CPUs will access only the leaves of the * hierarchy, which still indicate that no grace period is in * progress. In addition, we have excluded CPU-hotplug operations. * * We therefore do not need to hold any locks. Any required * memory barriers will be supplied by the locks guarding the * leaf rcu_nodes in the hierarchy. */ rnp_end = rsp->level[NUM_RCU_LVLS - 1]; for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++) rnp_cur->qsmask = rnp_cur->qsmaskinit; /* * Now set up the leaf nodes. Here we must be careful. First, * we need to hold the lock in order to exclude other CPUs, which * might be contending for the leaf nodes' locks. Second, as * soon as we initialize a given leaf node, its CPUs might run * up the rest of the hierarchy. We must therefore acquire locks * for each node that we touch during this stage. (But we still * are excluding CPU-hotplug operations.) * * Note that the grace period cannot complete until we finish * the initialization process, as there will be at least one * qsmask bit set in the root node until that time, namely the * one corresponding to this CPU. */ rnp_end = &rsp->node[NUM_RCU_NODES]; rnp_cur = rsp->level[NUM_RCU_LVLS - 1]; for (; rnp_cur < rnp_end; rnp_cur++) { spin_lock(&rnp_cur->lock); /* irqs already disabled. */ rnp_cur->qsmask = rnp_cur->qsmaskinit; spin_unlock(&rnp_cur->lock); /* irqs already disabled. */ } rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ spin_unlock_irqrestore(&rsp->onofflock, flags); }
rsp->name); print_cpu_stall_info_begin(); rcu_for_each_leaf_node(rsp, rnp) { raw_spin_lock_irqsave(&rnp->lock, flags); ndetected += rcu_print_task_stall(rnp); raw_spin_unlock_irqrestore(&rnp->lock, flags); if (rnp->qsmask == 0) continue; for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) if (rnp->qsmask & (1UL << cpu)) { print_cpu_stall_info(rsp, rnp->grplo + cpu); ndetected++; } } rnp = rcu_get_root(rsp); raw_spin_lock_irqsave(&rnp->lock, flags); ndetected = rcu_print_task_stall(rnp); raw_spin_unlock_irqrestore(&rnp->lock, flags); print_cpu_stall_info_end(); printk(KERN_CONT "(detected by %d, t=%ld jiffies)\n", smp_processor_id(), (long)(jiffies - rsp->gp_start)); if (ndetected == 0) printk(KERN_ERR "INFO: Stall ended before state dump start\n"); else if (!trigger_all_cpu_backtrace()) dump_stack(); rcu_print_detail_task_stall(rsp);