Пример #1
0
void sched_put_rd(struct root_domain *rd)
{
	if (!atomic_dec_and_test(&rd->refcount))
		return;

	call_rcu_sched(&rd->rcu, free_rootdomain);
}
Пример #2
0
void rcu_barrier_sched(void)
{
	struct rcu_synchronize rcu;

	init_completion(&rcu.completion);
	/* Will wake me after RCU finished. */
	call_rcu_sched(&rcu.head, wakeme_after_rcu);
	/* Wait for it. */
	wait_for_completion(&rcu.completion);
}
Пример #3
0
/**
 * synchronize_sched - wait until an rcu-sched grace period has elapsed.
 *
 * Control will return to the caller some time after a full rcu-sched
 * grace period has elapsed, in other words after all currently executing
 * rcu-sched read-side critical sections have completed.   These read-side
 * critical sections are delimited by rcu_read_lock_sched() and
 * rcu_read_unlock_sched(), and may be nested.  Note that preempt_disable(),
 * local_irq_disable(), and so on may be used in place of
 * rcu_read_lock_sched().
 *
 * This means that all preempt_disable code sequences, including NMI and
 * hardware-interrupt handlers, in progress on entry will have completed
 * before this primitive returns.  However, this does not guarantee that
 * softirq handlers will have completed, since in some kernels, these
 * handlers can run in process context, and can block.
 *
 * This primitive provides the guarantees made by the (now removed)
 * synchronize_kernel() API.  In contrast, synchronize_rcu() only
 * guarantees that rcu_read_lock() sections will have completed.
 * In "classic RCU", these two guarantees happen to be one and
 * the same, but can differ in realtime RCU implementations.
 */
void synchronize_sched(void)
{
	struct rcu_synchronize rcu;

	if (rcu_blocking_is_gp())
		return;

	init_completion(&rcu.completion);
	/* Will wake me after RCU finished. */
	call_rcu_sched(&rcu.head, wakeme_after_rcu);
	/* Wait for it. */
	wait_for_completion(&rcu.completion);
}
Пример #4
0
/*
 * Called with preemption disabled, and from cross-cpu IRQ context.
 */
static void rcu_barrier_func(void *type)
{
	int cpu = smp_processor_id();
	struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);

	atomic_inc(&rcu_barrier_cpu_count);
	switch ((enum rcu_barrier)type) {
	case RCU_BARRIER_STD:
		call_rcu(head, rcu_barrier_callback);
		break;
	case RCU_BARRIER_BH:
		call_rcu_bh(head, rcu_barrier_callback);
		break;
	case RCU_BARRIER_SCHED:
		call_rcu_sched(head, rcu_barrier_callback);
		break;
	}
}
Пример #5
0
void rq_attach_root(struct rq *rq, struct root_domain *rd)
{
	struct root_domain *old_rd = NULL;
	unsigned long flags;

	raw_spin_lock_irqsave(&rq->lock, flags);

	if (rq->rd) {
		old_rd = rq->rd;

		if (cpumask_test_cpu(rq->cpu, old_rd->online))
			set_rq_offline(rq);

		cpumask_clear_cpu(rq->cpu, old_rd->span);

		/*
		 * If we dont want to free the old_rd yet then
		 * set old_rd to NULL to skip the freeing later
		 * in this function:
		 */
		if (!atomic_dec_and_test(&old_rd->refcount))
			old_rd = NULL;
	}

	atomic_inc(&rd->refcount);
	rq->rd = rd;

	cpumask_set_cpu(rq->cpu, rd->span);
	if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
		set_rq_online(rq);

	raw_spin_unlock_irqrestore(&rq->lock, flags);

	if (old_rd)
		call_rcu_sched(&old_rd->rcu, free_rootdomain);
}
Пример #6
0
static
void discard_event(struct latency_tracker *tracker,
                   struct latency_tracker_event *s)
{
#if defined(BASEHT) && !defined(LLFREELIST)
    __wrapper_freelist_put_event(tracker, s);
#else
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0))
    /*
     * Our own call_rcu because the mainline one causes sched_wakeups
     * that we might want to instrument causing deadlocks.
     */
    int was_empty;

    was_empty = llist_add(&s->release_llist, &tracker->to_release);
    if (was_empty)
        queue_delayed_work(tracker->tracker_call_rcu_q,
                           &tracker->tracker_call_rcu_w, 100);
#else
    call_rcu_sched(&s->urcuhead,
                   deferred_latency_tracker_put_event);
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) */
#endif
}
Пример #7
0
static
void discard_event(struct latency_tracker *tracker,
		struct latency_tracker_event *s)
{
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0))
	/*
	 * Our own call_rcu because the mainline one causes sched_wakeups
	 * that we might want to instrument causing deadlocks.
	 */
	int was_empty;

	/*
	 * We can reuse llist node because it is not used anymore
	 * by the parent list.
	 */
	was_empty = llist_add(&s->llist, &tracker->to_release);
	if (was_empty)
		queue_delayed_work(tracker->tracker_call_rcu_q,
				&tracker->tracker_call_rcu_w, 100);
#else
	call_rcu_sched(&s->u.urcuhead,
			deferred_latency_tracker_put_event);
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) */
}