예제 #1
0
void __ipipe_restore_head(unsigned long x) /* hw interrupt off */
{
	struct ipipe_percpu_domain_data *p = ipipe_this_cpu_head_context();

	if (x) {
#ifdef CONFIG_DEBUG_KERNEL
		static int warned;
		if (!warned &&
		    __test_and_set_bit(IPIPE_STALL_FLAG, &p->status)) {
			/*
			 * Already stalled albeit ipipe_restore_head()
			 * should have detected it? Send a warning once.
			 */
			hard_local_irq_enable();
			warned = 1;
			printk(KERN_WARNING
				   "I-pipe: ipipe_restore_head() optimization failed.\n");
			dump_stack();
			hard_local_irq_disable();
		}
#else /* !CONFIG_DEBUG_KERNEL */
		__set_bit(IPIPE_STALL_FLAG, &p->status);
#endif /* CONFIG_DEBUG_KERNEL */
	} else {
		__clear_bit(IPIPE_STALL_FLAG, &p->status);
		if (unlikely(__ipipe_ipending_p(p)))
			__ipipe_sync_pipeline(ipipe_head_domain);
		hard_local_irq_enable();
	}
}
예제 #2
0
asmlinkage void __sched __ipipe_preempt_schedule_irq(void)
{
	struct ipipe_percpu_domain_data *p;
	unsigned long flags;

	BUG_ON(!hard_irqs_disabled());
	local_irq_save(flags);
	hard_local_irq_enable();
	preempt_schedule_irq(); /* Ok, may reschedule now. */
	hard_local_irq_disable();

	/*
	 * Flush any pending interrupt that may have been logged after
	 * preempt_schedule_irq() stalled the root stage before
	 * returning to us, and now.
	 */
	p = ipipe_this_cpu_root_context();
	if (unlikely(__ipipe_ipending_p(p))) {
		add_preempt_count(PREEMPT_ACTIVE);
		trace_hardirqs_on();
		__clear_bit(IPIPE_STALL_FLAG, &p->status);
		__ipipe_sync_stage();
		sub_preempt_count(PREEMPT_ACTIVE);
	}

	__ipipe_restore_root_nosync(flags);
}
예제 #3
0
/*
 * __ipipe_do_sync_stage() -- Flush the pending IRQs for the current
 * domain (and processor). This routine flushes the interrupt log (see
 * "Optimistic interrupt protection" from D. Stodolsky et al. for more
 * on the deferred interrupt scheme). Every interrupt that occurred
 * while the pipeline was stalled gets played.
 *
 * WARNING: CPU migration may occur over this routine.
 */
void __ipipe_do_sync_stage(void)
{
	struct ipipe_percpu_domain_data *p;
	struct ipipe_domain *ipd;
	int irq;

	p = __ipipe_current_context;
	ipd = p->domain;

	__set_bit(IPIPE_STALL_FLAG, &p->status);
	smp_wmb();

	if (ipd == ipipe_root_domain)
		trace_hardirqs_off();

	for (;;) {
		irq = __ipipe_next_irq(p);
		if (irq < 0)
			break;
		/*
		 * Make sure the compiler does not reorder wrongly, so
		 * that all updates to maps are done before the
		 * handler gets called.
		 */
		barrier();

		if (test_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control))
			continue;

		if (ipd != ipipe_head_domain)
			hard_local_irq_enable();

		if (likely(ipd != ipipe_root_domain)) {
			ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie);
			__ipipe_run_irqtail(irq);
			hard_local_irq_disable();
		} else if (ipipe_virtual_irq_p(irq)) {
			irq_enter();
			ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie);
			irq_exit();
			root_stall_after_handler();
			hard_local_irq_disable();
			while (__ipipe_check_root_resched())
				__ipipe_preempt_schedule_irq();
		} else {
			ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie);
			root_stall_after_handler();
			hard_local_irq_disable();
		}

		p = __ipipe_current_context;
	}

	if (ipd == ipipe_root_domain)
		trace_hardirqs_on();

	__clear_bit(IPIPE_STALL_FLAG, &p->status);
}
/*
 * This is our default idle handler.  We need to disable
 * interrupts here to ensure we don't miss a wakeup call.
 */
static void default_idle(void)
{
#ifdef CONFIG_IPIPE
	ipipe_suspend_domain();
#endif
	hard_local_irq_disable();
	if (!need_resched())
		idle_with_irq_disabled();

	hard_local_irq_enable();
}
예제 #5
0
파일: ipipe.c 프로젝트: CSCLOG/beaglebone
asmlinkage int __ipipe_syscall_root(struct pt_regs *regs)
{
    struct ipipe_percpu_domain_data *p;
    void (*hook)(void);
    int ret;

    WARN_ON_ONCE(irqs_disabled_hw());

    /*
     * We need to run the IRQ tail hook each time we intercept a
     * syscall, because we know that important operations might be
     * pending there (e.g. Xenomai deferred rescheduling).
     */
    hook = (__typeof__(hook))__ipipe_irq_tail_hook;
    hook();

    /*
     * This routine either returns:
     * 0 -- if the syscall is to be passed to Linux;
     * >0 -- if the syscall should not be passed to Linux, and no
     * tail work should be performed;
     * <0 -- if the syscall should not be passed to Linux but the
     * tail work has to be performed (for handling signals etc).
     */

    if (!__ipipe_syscall_watched_p(current, regs->orig_p0) ||
            !__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL))
        return 0;

    ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs);

    hard_local_irq_disable();

    /*
     * This is the end of the syscall path, so we may
     * safely assume a valid Linux task stack here.
     */
    if (current->ipipe_flags & PF_EVTRET) {
        current->ipipe_flags &= ~PF_EVTRET;
        __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
    }

    if (!__ipipe_root_domain_p)
        ret = -1;
    else {
        p = ipipe_root_cpudom_ptr();
        if (__ipipe_ipending_p(p))
            __ipipe_sync_pipeline();
    }

    hard_local_irq_enable();

    return -ret;
}
예제 #6
0
void ipipe_unstall_head(void)
{
	struct ipipe_percpu_domain_data *p = ipipe_this_cpu_head_context();

	hard_local_irq_disable();

	__clear_bit(IPIPE_STALL_FLAG, &p->status);

	if (unlikely(__ipipe_ipending_p(p)))
		__ipipe_sync_pipeline(ipipe_head_domain);

	hard_local_irq_enable();
}
예제 #7
0
void ipipe_unstall_root(void)
{
	struct ipipe_percpu_domain_data *p;

	hard_local_irq_disable();

	/* This helps catching bad usage from assembly call sites. */
	ipipe_root_only();

	p = ipipe_this_cpu_root_context();

	__clear_bit(IPIPE_STALL_FLAG, &p->status);

	if (unlikely(__ipipe_ipending_p(p)))
		__ipipe_sync_stage();

	hard_local_irq_enable();
}
예제 #8
0
int __ipipe_switch_tail(void)
{
	int x;

#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
	hard_local_irq_disable();
#endif
	x = __ipipe_root_p;

#ifdef CONFIG_IPIPE_LEGACY
	current->state &= ~TASK_HARDENING;
#else
	complete_domain_migration();
#endif	/* !CONFIG_IPIPE_LEGACY */

#ifndef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
	if (x)
#endif
		hard_local_irq_enable();

	return !x;
}
예제 #9
0
void __ipipe_spin_unlock_irq(ipipe_spinlock_t *lock)
{
	arch_spin_unlock(&lock->arch_lock);
	__clear_bit(IPIPE_STALL_FLAG, &__ipipe_current_context->status);
	hard_local_irq_enable();
}
예제 #10
0
unsigned long ipipe_critical_enter(void (*syncfn)(void))
{
	int cpu __maybe_unused, n __maybe_unused;
	unsigned long flags, loops __maybe_unused;
	cpumask_t allbutself __maybe_unused;

	flags = hard_local_irq_save();

	if (num_online_cpus() == 1)
		return flags;

#ifdef CONFIG_SMP

	cpu = ipipe_processor_id();
	if (!cpu_test_and_set(cpu, __ipipe_cpu_lock_map)) {
		while (test_and_set_bit(0, &__ipipe_critical_lock)) {
			n = 0;
			hard_local_irq_enable();

			do
				cpu_relax();
			while (++n < cpu);

			hard_local_irq_disable();
		}
restart:
		spin_lock(&__ipipe_cpu_barrier);

		__ipipe_cpu_sync = syncfn;

		cpus_clear(__ipipe_cpu_pass_map);
		cpu_set(cpu, __ipipe_cpu_pass_map);

		/*
		 * Send the sync IPI to all processors but the current
		 * one.
		 */
		cpus_andnot(allbutself, cpu_online_map, __ipipe_cpu_pass_map);
		ipipe_send_ipi(IPIPE_CRITICAL_IPI, allbutself);
		loops = IPIPE_CRITICAL_TIMEOUT;

		while (!cpus_equal(__ipipe_cpu_sync_map, allbutself)) {
			if (--loops > 0) {
				cpu_relax();
				continue;
			}
			/*
			 * We ran into a deadlock due to a contended
			 * rwlock. Cancel this round and retry.
			 */
			__ipipe_cpu_sync = NULL;

			spin_unlock(&__ipipe_cpu_barrier);
			/*
			 * Ensure all CPUs consumed the IPI to avoid
			 * running __ipipe_cpu_sync prematurely. This
			 * usually resolves the deadlock reason too.
			 */
			while (!cpus_equal(cpu_online_map, __ipipe_cpu_pass_map))
				cpu_relax();

			goto restart;
		}
	}

	atomic_inc(&__ipipe_critical_count);

#endif	/* CONFIG_SMP */

	return flags;
}