コード例 #1
0
void __ipipe_post_work_root(struct ipipe_work_header *work)
{
	unsigned long flags;
	void *tail;
	int cpu;

	/*
	 * Subtle: we want to use the head stall/unstall operators,
	 * not the hard_* routines to protect against races. This way,
	 * we ensure that a root-based caller will trigger the virq
	 * handling immediately when unstalling the head stage, as a
	 * result of calling __ipipe_sync_pipeline() under the hood.
	 */
	flags = ipipe_test_and_stall_head();
	cpu = ipipe_processor_id();
	tail = per_cpu(work_tail, cpu);

	if (WARN_ON_ONCE((unsigned char *)tail + work->size >=
			 per_cpu(work_buf, cpu) + WORKBUF_SIZE))
		goto out;

	/* Work handling is deferred, so data has to be copied. */
	memcpy(tail, work, work->size);
	per_cpu(work_tail, cpu) = tail + work->size;
	ipipe_post_irq_root(__ipipe_work_virq);
out:
	ipipe_restore_head(flags);
}
コード例 #2
0
/* Always called with hw interrupts off. */
void __ipipe_do_critical_sync(unsigned int irq, void *cookie)
{
	int cpu = ipipe_processor_id();

	cpu_set(cpu, __ipipe_cpu_sync_map);

	/*
	 * Now we are in sync with the lock requestor running on
	 * another CPU. Enter a spinning wait until he releases the
	 * global lock.
	 */
	spin_lock(&__ipipe_cpu_barrier);

	/* Got it. Now get out. */

	/* Call the sync routine if any. */
	if (__ipipe_cpu_sync)
		__ipipe_cpu_sync();

	cpu_set(cpu, __ipipe_cpu_pass_map);

	spin_unlock(&__ipipe_cpu_barrier);

	cpu_clear(cpu, __ipipe_cpu_sync_map);
}
コード例 #3
0
int __ipipe_send_ipi(unsigned ipi, cpumask_t cpumask)
{
	unsigned long flags;
	int self;

	local_irq_save_hw(flags);

	self = cpu_isset(ipipe_processor_id(),cpumask);
	cpu_clear(ipipe_processor_id(), cpumask);

	if (!cpus_empty(cpumask))
		apic->send_IPI_mask(&cpumask, ipipe_apic_irq_vector(ipi));

	if (self)
		ipipe_trigger_irq(ipi);

	local_irq_restore_hw(flags);

	return 0;
}
コード例 #4
0
void twd_hrtimer_debug(unsigned int irq) /* hw interrupt off */
{
	int cpu = ipipe_processor_id();

	if ((++per_cpu(irqs, cpu) % HZ) == 0) {
#if 0
		__ipipe_serial_debug("%c", 'A' + cpu);
#else
		do { } while (0);
#endif
	}
}
コード例 #5
0
ファイル: timer.c プロジェクト: ChunHungLiu/xenomai
void __xntimer_init(struct xntimer *timer,
		    struct xnclock *clock,
		    void (*handler)(struct xntimer *timer),
		    struct xnsched *sched,
		    int flags)
{
	spl_t s __maybe_unused;
	int cpu;

#ifdef CONFIG_XENO_OPT_EXTCLOCK
	timer->clock = clock;
#endif
	xntimerh_init(&timer->aplink);
	xntimerh_date(&timer->aplink) = XN_INFINITE;
	xntimer_set_priority(timer, XNTIMER_STDPRIO);
	timer->status = (XNTIMER_DEQUEUED|(flags & XNTIMER_INIT_MASK));
	timer->handler = handler;
	timer->interval_ns = 0;
	/*
	 * Timers are affine to a scheduler slot, which is in turn
	 * bound to a real-time CPU. If no scheduler affinity was
	 * given, assign the timer to the scheduler slot of the
	 * current CPU if real-time, otherwise default to the
	 * scheduler slot of the first real-time CPU.
	 */
	if (sched)
		timer->sched = sched;
	else {
		cpu = ipipe_processor_id();
		if (!xnsched_supported_cpu(cpu))
			cpu = first_cpu(xnsched_realtime_cpus);

		timer->sched = xnsched_struct(cpu);
	}

#ifdef CONFIG_XENO_OPT_STATS
#ifdef CONFIG_XENO_OPT_EXTCLOCK
	timer->tracker = clock;
#endif
	ksformat(timer->name, XNOBJECT_NAME_LEN, "%d/%s",
		 current->pid, current->comm);
	xntimer_reset_stats(timer);
	xnlock_get_irqsave(&nklock, s);
	list_add_tail(&timer->next_stat, &clock->timerq);
	clock->nrtimers++;
	xnvfile_touch(&clock->timer_vfile);
	xnlock_put_irqrestore(&nklock, s);
#endif /* CONFIG_XENO_OPT_STATS */
}
コード例 #6
0
void ipipe_critical_exit(unsigned long flags)
{
	if (num_online_cpus() == 1) {
		hard_local_irq_restore(flags);
		return;
	}

#ifdef CONFIG_SMP
	if (atomic_dec_and_test(&__ipipe_critical_count)) {
		spin_unlock(&__ipipe_cpu_barrier);
		while (!cpus_empty(__ipipe_cpu_sync_map))
			cpu_relax();
		cpu_clear(ipipe_processor_id(), __ipipe_cpu_lock_map);
		clear_bit(0, &__ipipe_critical_lock);
		smp_mb__after_clear_bit();
	}
#endif /* CONFIG_SMP */

	hard_local_irq_restore(flags);
}
コード例 #7
0
unsigned long ipipe_critical_enter(void (*syncfn)(void))
{
	int cpu __maybe_unused, n __maybe_unused;
	unsigned long flags, loops __maybe_unused;
	cpumask_t allbutself __maybe_unused;

	flags = hard_local_irq_save();

	if (num_online_cpus() == 1)
		return flags;

#ifdef CONFIG_SMP

	cpu = ipipe_processor_id();
	if (!cpu_test_and_set(cpu, __ipipe_cpu_lock_map)) {
		while (test_and_set_bit(0, &__ipipe_critical_lock)) {
			n = 0;
			hard_local_irq_enable();

			do
				cpu_relax();
			while (++n < cpu);

			hard_local_irq_disable();
		}
restart:
		spin_lock(&__ipipe_cpu_barrier);

		__ipipe_cpu_sync = syncfn;

		cpus_clear(__ipipe_cpu_pass_map);
		cpu_set(cpu, __ipipe_cpu_pass_map);

		/*
		 * Send the sync IPI to all processors but the current
		 * one.
		 */
		cpus_andnot(allbutself, cpu_online_map, __ipipe_cpu_pass_map);
		ipipe_send_ipi(IPIPE_CRITICAL_IPI, allbutself);
		loops = IPIPE_CRITICAL_TIMEOUT;

		while (!cpus_equal(__ipipe_cpu_sync_map, allbutself)) {
			if (--loops > 0) {
				cpu_relax();
				continue;
			}
			/*
			 * We ran into a deadlock due to a contended
			 * rwlock. Cancel this round and retry.
			 */
			__ipipe_cpu_sync = NULL;

			spin_unlock(&__ipipe_cpu_barrier);
			/*
			 * Ensure all CPUs consumed the IPI to avoid
			 * running __ipipe_cpu_sync prematurely. This
			 * usually resolves the deadlock reason too.
			 */
			while (!cpus_equal(cpu_online_map, __ipipe_cpu_pass_map))
				cpu_relax();

			goto restart;
		}
	}

	atomic_inc(&__ipipe_critical_count);

#endif	/* CONFIG_SMP */

	return flags;
}