Exemple #1
0
/*
 * Core SRCU state machine.  Advance callbacks from ->batch_check0 to
 * ->batch_check1 and then to ->batch_done as readers drain.
 */
static void srcu_advance_batches(struct srcu_struct *sp, int trycount)
{
	int idx = 1 ^ (sp->completed & 1);

	/*
	 * Because readers might be delayed for an extended period after
	 * fetching ->completed for their index, at any point in time there
	 * might well be readers using both idx=0 and idx=1.  We therefore
	 * need to wait for readers to clear from both index values before
	 * invoking a callback.
	 */

	if (rcu_batch_empty(&sp->batch_check0) &&
	    rcu_batch_empty(&sp->batch_check1))
		return; /* no callbacks need to be advanced */

	if (!try_check_zero(sp, idx, trycount))
		return; /* failed to advance, will try after SRCU_INTERVAL */

	/*
	 * The callbacks in ->batch_check1 have already done with their
	 * first zero check and flip back when they were enqueued on
	 * ->batch_check0 in a previous invocation of srcu_advance_batches().
	 * (Presumably try_check_zero() returned false during that
	 * invocation, leaving the callbacks stranded on ->batch_check1.)
	 * They are therefore ready to invoke, so move them to ->batch_done.
	 */
	rcu_batch_move(&sp->batch_done, &sp->batch_check1);

	if (rcu_batch_empty(&sp->batch_check0))
		return; /* no callbacks need to be advanced */
	srcu_flip(sp);

	/*
	 * The callbacks in ->batch_check0 just finished their
	 * first check zero and flip, so move them to ->batch_check1
	 * for future checking on the other idx.
	 */
	rcu_batch_move(&sp->batch_check1, &sp->batch_check0);

	/*
	 * SRCU read-side critical sections are normally short, so check
	 * at least twice in quick succession after a flip.
	 */
	trycount = trycount < 2 ? 2 : trycount;
	if (!try_check_zero(sp, idx^1, trycount))
		return; /* failed to advance, will try after SRCU_INTERVAL */

	/*
	 * The callbacks in ->batch_check1 have now waited for all
	 * pre-existing readers using both idx values.  They are therefore
	 * ready to invoke, so move them to ->batch_done.
	 */
	rcu_batch_move(&sp->batch_done, &sp->batch_check1);
}
Exemple #2
0
/*
 * Move all callbacks from the rcu_batch structure specified by "from" to
 * the structure specified by "to".
 */
static inline void rcu_batch_move(struct rcu_batch *to, struct rcu_batch *from)
{
	if (!rcu_batch_empty(from)) {
		*to->tail = from->head;
		to->tail = from->tail;
		rcu_batch_init(from);
	}
}
Exemple #3
0
/*
 * Move any new SRCU callbacks to the first stage of the SRCU grace
 * period pipeline.
 */
static void srcu_collect_new(struct srcu_struct *sp)
{
	if (!rcu_batch_empty(&sp->batch_queue)) {
		spin_lock_irq(&sp->queue_lock);
		rcu_batch_move(&sp->batch_check0, &sp->batch_queue);
		spin_unlock_irq(&sp->queue_lock);
	}
}
Exemple #4
0
/*
 * Remove the callback at the head of the specified rcu_batch structure
 * and return a pointer to it, or return NULL if the structure is empty.
 */
static inline struct rcu_head *rcu_batch_dequeue(struct rcu_batch *b)
{
	struct rcu_head *head;

	if (rcu_batch_empty(b))
		return NULL;

	head = b->head;
	b->head = head->next;
	if (b->tail == &head->next)
		rcu_batch_init(b);

	return head;
}
Exemple #5
0
/*
 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
 */
static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
{
	struct rcu_synchronize rcu;
	struct rcu_head *head = &rcu.head;
	bool done = false;

	RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) ||
			 lock_is_held(&rcu_bh_lock_map) ||
			 lock_is_held(&rcu_lock_map) ||
			 lock_is_held(&rcu_sched_lock_map),
			 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");

	might_sleep();
	init_completion(&rcu.completion);

	head->next = NULL;
	head->func = wakeme_after_rcu;
	spin_lock_irq(&sp->queue_lock);
	smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */
	if (!sp->running) {
		/* steal the processing owner */
		sp->running = true;
		rcu_batch_queue(&sp->batch_check0, head);
		spin_unlock_irq(&sp->queue_lock);

		srcu_advance_batches(sp, trycount);
		if (!rcu_batch_empty(&sp->batch_done)) {
			BUG_ON(sp->batch_done.head != head);
			rcu_batch_dequeue(&sp->batch_done);
			done = true;
		}
		/* give the processing owner to work_struct */
		srcu_reschedule(sp);
	} else {
		rcu_batch_queue(&sp->batch_queue, head);
		spin_unlock_irq(&sp->queue_lock);
	}

	if (!done) {
		wait_for_completion(&rcu.completion);
		smp_mb(); /* Caller's later accesses after GP. */
	}

}
Exemple #6
0
/*
 * Finished one round of SRCU grace period.  Start another if there are
 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
 */
static void srcu_reschedule(struct srcu_struct *sp)
{
	bool pending = true;

	if (rcu_batch_empty(&sp->batch_done) &&
	    rcu_batch_empty(&sp->batch_check1) &&
	    rcu_batch_empty(&sp->batch_check0) &&
	    rcu_batch_empty(&sp->batch_queue)) {
		spin_lock_irq(&sp->queue_lock);
		if (rcu_batch_empty(&sp->batch_done) &&
		    rcu_batch_empty(&sp->batch_check1) &&
		    rcu_batch_empty(&sp->batch_check0) &&
		    rcu_batch_empty(&sp->batch_queue)) {
			sp->running = false;
			pending = false;
		}
		spin_unlock_irq(&sp->queue_lock);
	}

	if (pending)
		queue_delayed_work(system_power_efficient_wq,
				   &sp->work, SRCU_INTERVAL);
}