示例#1
0
int __ipipe_notify_trap(int exception, struct pt_regs *regs)
{
	struct ipipe_percpu_domain_data *p;
	struct ipipe_trap_data data;
	unsigned long flags;
	int ret = 0;

	flags = hard_local_irq_save();

	/*
	 * We send a notification about all traps raised over a
	 * registered head domain only.
	 */
	if (__ipipe_root_p)
		goto out;

	p = ipipe_this_cpu_head_context();
	if (likely(p->coflags & __IPIPE_TRAP_E)) {
		p->coflags |= __IPIPE_TRAP_R;
		hard_local_irq_restore(flags);
		data.exception = exception;
		data.regs = regs;
		ret = ipipe_trap_hook(&data);
		flags = hard_local_irq_save();
		p->coflags &= ~__IPIPE_TRAP_R;
	}
out:
	hard_local_irq_restore(flags);

	return ret;
}
示例#2
0
static void __ipipe_do_work(unsigned int virq, void *cookie)
{
	struct ipipe_work_header *work;
	unsigned long flags;
	void *curr, *tail;
	int cpu;

	/*
	 * Work is dispatched in enqueuing order. This interrupt
	 * context can't migrate to another CPU.
	 */
	cpu = smp_processor_id();
	curr = per_cpu(work_buf, cpu);

	for (;;) {
		flags = hard_local_irq_save();
		tail = per_cpu(work_tail, cpu);
		if (curr == tail) {
			per_cpu(work_tail, cpu) = per_cpu(work_buf, cpu);
			hard_local_irq_restore(flags);
			return;
		}
		work = curr;
		curr += work->size;
		hard_local_irq_restore(flags);
		work->handler(work);
	}
}
示例#3
0
文件: ipipe.c 项目: CSCLOG/beaglebone
void __ipipe_unlock_root(void)
{
    unsigned long *p, flags;

    flags = hard_local_irq_save();
    p = &__ipipe_root_status;
    __clear_bit(IPIPE_SYNCDEFER_FLAG, p);
    hard_local_irq_restore(flags);
}
示例#4
0
void __ipipe_complete_domain_migration(void)
{
	unsigned long flags;

	ipipe_root_only();
	flags = hard_local_irq_save();
	complete_domain_migration();
	hard_local_irq_restore(flags);
}
示例#5
0
文件: ipipe.c 项目: CSCLOG/beaglebone
/*
 * We could use standard atomic bitops in the following root status
 * manipulation routines, but let's prepare for SMP support in the
 * same move, preventing CPU migration as required.
 */
void __ipipe_stall_root(void)
{
    unsigned long *p, flags;

    flags = hard_local_irq_save();
    p = &__ipipe_root_status;
    __set_bit(IPIPE_STALL_FLAG, p);
    hard_local_irq_restore(flags);
}
示例#6
0
unsigned long __ipipe_spin_lock_irqsave(ipipe_spinlock_t *lock)
{
	unsigned long flags;
	int s;

	flags = hard_local_irq_save();
	arch_spin_lock(&lock->arch_lock);
	s = __test_and_set_bit(IPIPE_STALL_FLAG, &__ipipe_current_context->status);

	return arch_mangle_irq_bits(s, flags);
}
示例#7
0
文件: ipipe.c 项目: CSCLOG/beaglebone
unsigned long __ipipe_test_and_stall_root(void)
{
    unsigned long *p, flags;
    int x;

    flags = hard_local_irq_save();
    p = &__ipipe_root_status;
    x = __test_and_set_bit(IPIPE_STALL_FLAG, p);
    hard_local_irq_restore(flags);

    return x;
}
示例#8
0
int __ipipe_spin_trylock_irq(ipipe_spinlock_t *lock)
{
	unsigned long flags;

	flags = hard_local_irq_save();
	if (!arch_spin_trylock(&lock->arch_lock)) {
		hard_local_irq_restore(flags);
		return 0;
	}
	__set_bit(IPIPE_STALL_FLAG, &__ipipe_current_context->status);

	return 1;
}
示例#9
0
int __ipipe_notify_kevent(int kevent, void *data)
{
	struct ipipe_percpu_domain_data *p;
	unsigned long flags;
	int ret = 0;

	ipipe_root_only();

	flags = hard_local_irq_save();

	p = ipipe_this_cpu_root_context();
	if (likely(p->coflags & __IPIPE_KEVENT_E)) {
		p->coflags |= __IPIPE_KEVENT_R;
		hard_local_irq_restore(flags);
		ret = ipipe_kevent_hook(kevent, data);
		flags = hard_local_irq_save();
		p->coflags &= ~__IPIPE_KEVENT_R;
	}

	hard_local_irq_restore(flags);

	return ret;
}
示例#10
0
int __ipipe_notify_syscall(struct pt_regs *regs)
{
	struct ipipe_domain *caller_domain, *this_domain, *ipd;
	struct ipipe_percpu_domain_data *p;
	unsigned long flags;
	int ret = 0;

	flags = hard_local_irq_save();
	caller_domain = this_domain = __ipipe_current_domain;
	ipd = ipipe_head_domain;
next:
	p = ipipe_this_cpu_context(ipd);
	if (likely(p->coflags & __IPIPE_SYSCALL_E)) {
		__ipipe_set_current_context(p);
		p->coflags |= __IPIPE_SYSCALL_R;
		hard_local_irq_restore(flags);
		ret = ipipe_syscall_hook(caller_domain, regs);
		flags = hard_local_irq_save();
		p->coflags &= ~__IPIPE_SYSCALL_R;
		if (__ipipe_current_domain != ipd)
			/* Account for domain migration. */
			this_domain = __ipipe_current_domain;
		else
			__ipipe_set_current_domain(this_domain);
	}

	if (this_domain == ipipe_root_domain &&
	    ipd != ipipe_root_domain && ret == 0) {
		ipd = ipipe_root_domain;
		goto next;
	}

	hard_local_irq_restore(flags);

	return ret;
}
示例#11
0
int __ipipe_spin_trylock_irqsave(ipipe_spinlock_t *lock,
				 unsigned long *x)
{
	unsigned long flags;
	int s;

	flags = hard_local_irq_save();
	if (!arch_spin_trylock(&lock->arch_lock)) {
		hard_local_irq_restore(flags);
		return 0;
	}
	s = __test_and_set_bit(IPIPE_STALL_FLAG, &__ipipe_current_context->status);
	*x = arch_mangle_irq_bits(s, flags);

	return 1;
}
示例#12
0
文件: ipipe.c 项目: CSCLOG/beaglebone
/*
 * ipipe_trigger_irq() -- Push the interrupt at front of the pipeline
 * just like if it has been actually received from a hw source. Also
 * works for virtual interrupts.
 */
int ipipe_trigger_irq(unsigned irq)
{
    unsigned long flags;

#ifdef CONFIG_IPIPE_DEBUG
    if (irq >= IPIPE_NR_IRQS ||
            (ipipe_virtual_irq_p(irq)
             && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map)))
        return -EINVAL;
#endif

    flags = hard_local_irq_save();
    __ipipe_handle_irq(irq, NULL);
    hard_local_irq_restore(flags);

    return 1;
}
示例#13
0
文件: ipipe.c 项目: CSCLOG/beaglebone
asmlinkage void __ipipe_sync_root(void)
{
    void (*irq_tail_hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
    struct ipipe_percpu_domain_data *p;
    unsigned long flags;

    BUG_ON(irqs_disabled());

    flags = hard_local_irq_save();

    if (irq_tail_hook)
        irq_tail_hook();

    clear_thread_flag(TIF_IRQ_SYNC);

    p = ipipe_root_cpudom_ptr();
    if (__ipipe_ipending_p(p))
        __ipipe_sync_pipeline();

    hard_local_irq_restore(flags);
}
示例#14
0
unsigned long ipipe_critical_enter(void (*syncfn)(void))
{
	int cpu __maybe_unused, n __maybe_unused;
	unsigned long flags, loops __maybe_unused;
	cpumask_t allbutself __maybe_unused;

	flags = hard_local_irq_save();

	if (num_online_cpus() == 1)
		return flags;

#ifdef CONFIG_SMP

	cpu = ipipe_processor_id();
	if (!cpu_test_and_set(cpu, __ipipe_cpu_lock_map)) {
		while (test_and_set_bit(0, &__ipipe_critical_lock)) {
			n = 0;
			hard_local_irq_enable();

			do
				cpu_relax();
			while (++n < cpu);

			hard_local_irq_disable();
		}
restart:
		spin_lock(&__ipipe_cpu_barrier);

		__ipipe_cpu_sync = syncfn;

		cpus_clear(__ipipe_cpu_pass_map);
		cpu_set(cpu, __ipipe_cpu_pass_map);

		/*
		 * Send the sync IPI to all processors but the current
		 * one.
		 */
		cpus_andnot(allbutself, cpu_online_map, __ipipe_cpu_pass_map);
		ipipe_send_ipi(IPIPE_CRITICAL_IPI, allbutself);
		loops = IPIPE_CRITICAL_TIMEOUT;

		while (!cpus_equal(__ipipe_cpu_sync_map, allbutself)) {
			if (--loops > 0) {
				cpu_relax();
				continue;
			}
			/*
			 * We ran into a deadlock due to a contended
			 * rwlock. Cancel this round and retry.
			 */
			__ipipe_cpu_sync = NULL;

			spin_unlock(&__ipipe_cpu_barrier);
			/*
			 * Ensure all CPUs consumed the IPI to avoid
			 * running __ipipe_cpu_sync prematurely. This
			 * usually resolves the deadlock reason too.
			 */
			while (!cpus_equal(cpu_online_map, __ipipe_cpu_pass_map))
				cpu_relax();

			goto restart;
		}
	}

	atomic_inc(&__ipipe_critical_count);

#endif	/* CONFIG_SMP */

	return flags;
}