Example #1
0
/*
 * Check if the conditions for ending the current batch are true. If
 * so then end it.
 *
 * Must be invoked periodically, and the periodic invocations must be
 * far enough apart in time for the previous batch to become quiescent.
 * This is a few tens of microseconds unless NMIs are involved; an NMI
 * stretches out the requirement by the duration of the NMI.
 *
 * "Quiescent" means the owning cpu is no longer appending callbacks
 * and has completed execution of a trailing write-memory-barrier insn.
 */
static void __rcu_delimit_batches(struct rcu_list *pending)
{
       struct rcu_data *rd;
       struct rcu_list *plist;
       int cpu, eob, prev;

       if (!rcu_scheduler_active)
               return;

       rcu_stats.nlast++;

       /* If an NMI occured then the previous batch may not yet be
        * quiescent.  Let's wait till it is.
        */
       if (rcu_nmi_seen) {
               rcu_nmi_seen = 0;
               rcu_stats.nmis++;
               return;
       }

       /*
        * Find out if the current batch has ended
        * (end-of-batch).
        */
       eob = 1;
       for_each_online_cpu(cpu) {
               rd = &rcu_data[cpu];
               if (rd->wait) {
                       rd->wait = preempt_count_cpu(cpu) > idle_cpu(cpu);
                       if (rd->wait) {
                               eob = 0;
                               break;
                       }
               }
       }

       /*
        * Exit if batch has not ended.  But first, tickle all non-cooperating
        * CPUs if enough time has passed.
        */
       if (eob == 0) {
               if (rcu_wdog_ctr >= rcu_wdog_lim) {
                       rcu_wdog_ctr = 0;
                       rcu_stats.nforced++;
                       for_each_online_cpu(cpu) {
                               if (rcu_data[cpu].wait)
                                       force_cpu_resched(cpu);
                       }
               }
               rcu_wdog_ctr += rcu_hz_period_us;
               return;
       }
Example #2
0
/*
 * Check if the conditions for ending the current batch are true. If
 * so then end it.
 *
 * Must be invoked periodically, and the periodic invocations must be
 * far enough apart in time for the previous batch to become quiescent.
 * This is a few tens of microseconds unless NMIs are involved; an NMI
 * stretches out the requirement by the duration of the NMI.
 *
 * "Quiescent" means the owning cpu is no longer appending callbacks
 * and has completed execution of a trailing write-memory-barrier insn.
 */
static void __rcu_delimit_batches(struct rcu_list *pending)
{
	struct rcu_data *rd;
	int cpu, eob;
	u64 rcu_now;

	/* If an NMI occured then the previous batch may not yet be
	 * quiescent.  Let's wait till it is.
	 */
	if (rcu_nmi_seen) {
		rcu_nmi_seen = 0;
		return;
	}

	if (!rcu_scheduler_active)
		return;

	/*
	 * Find out if the current batch has ended
	 * (end-of-batch).
	 */
	eob = 1;
	for_each_online_cpu(cpu) {
		rd = &rcu_data[cpu];
		if (rd->wait) {
			eob = 0;
			break;
		}
	}

	/*
	 * Force end-of-batch if too much time (n seconds) has
	 * gone by.  The forcing method is slightly questionable,
	 * hence the WARN_ON.
	 */
	rcu_now = sched_clock();
	if (!eob && !rcu_timestamp
	&& ((rcu_now - rcu_timestamp) > 3LL * NSEC_PER_SEC)) {
		rcu_stats.nforced++;
		WARN_ON_ONCE(1);
		eob = 1;
	}

	/*
	 * Just return if the current batch has not yet
	 * ended.  Also, keep track of just how long it
	 * has been since we've actually seen end-of-batch.
	 */

	if (!eob)
		return;

	rcu_timestamp = rcu_now;

	/*
	 * End the current RCU batch and start a new one.
	 */
	for_each_present_cpu(cpu) {
		rd = &rcu_data[cpu];
		rcu_end_batch(rd, pending);
		if (cpu_online(cpu)) /* wins race with offlining every time */
			rd->wait = preempt_count_cpu(cpu) > idle_cpu(cpu);
		else
			rd->wait = 0;
	}
	rcu_stats.nbatches++;
}