Esempio n. 1
0
int fastcall __ipipe_divert_exception(struct pt_regs *regs, int vector)
{
	if (__ipipe_event_monitored_p(vector) &&
	    __ipipe_dispatch_event(vector,regs) != 0)
		return 1;

	__fixup_if(regs);

	return 0;
}
Esempio n. 2
0
asmlinkage int __ipipe_handle_exception(int vector, struct pt_regs *regs, long error_code)
{
	if (!__ipipe_event_monitored_p(vector) ||
	    __ipipe_dispatch_event(vector,regs) == 0) {
		__ipipe_exptr handler = __ipipe_std_extable[vector];
		handler(regs,error_code);
		__fixup_if(regs);
		return 0;
	}

	return 1;
}
Esempio n. 3
0
asmlinkage int __ipipe_syscall_root(struct pt_regs regs)
{
	ipipe_declare_cpuid;
	unsigned long flags;

	__fixup_if(&regs);

	/* This routine either returns:
	    0 -- if the syscall is to be passed to Linux;
	   >0 -- if the syscall should not be passed to Linux, and no
	   tail work should be performed;
	   <0 -- if the syscall should not be passed to Linux but the
	   tail work has to be performed (for handling signals etc). */

	if (__ipipe_syscall_watched_p(current, regs.orig_eax) &&
	    __ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL) &&
	    __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL,&regs) > 0) {
		/* We might enter here over a non-root domain and exit
		 * over the root one as a result of the syscall
		 * (i.e. by recycling the register set of the current
		 * context across the migration), so we need to fixup
		 * the interrupt flag upon return too, so that
		 * __ipipe_unstall_iret_root() resets the correct
		 * stall bit on exit. */
		__fixup_if(&regs);

		if (ipipe_current_domain == ipipe_root_domain) {
			/* Sync pending VIRQs before _TIF_NEED_RESCHED
			 * is tested. */
			ipipe_lock_cpu(flags);
			if ((ipipe_root_domain->cpudata[cpuid].irq_pending_hi & IPIPE_IRQMASK_VIRT) != 0)
				__ipipe_sync_stage(IPIPE_IRQMASK_VIRT);
			ipipe_unlock_cpu(flags);
			return -1;
		}
		return 1;
	}

    return 0;
}
Esempio n. 4
0
int __ipipe_divert_exception(struct pt_regs *regs, int vector)
{
	bool root_entry = false;
	unsigned long flags = 0;

	if (ipipe_root_domain_p) {
		root_entry = true;

		local_save_flags(flags);

		if (irqs_disabled_hw()) {
			/*
			 * Same root state handling as in
			 * __ipipe_handle_exception.
			 */
			local_irq_disable();
		}
	}
#ifdef CONFIG_KGDB
	/* catch int1 and int3 over non-root domains */
	else {
#ifdef CONFIG_X86_32
		if (vector != ex_do_device_not_available)
#endif
		{
			unsigned int condition = 0;

			if (vector == 1)
				get_debugreg(condition, 6);
			if (!kgdb_handle_exception(vector, SIGTRAP, condition, regs))
				return 1;
		}
	}
#endif /* CONFIG_KGDB */

	if (unlikely(ipipe_trap_notify(vector, regs))) {
		if (root_entry)
			local_irq_restore_nosync(flags);
		return 1;
	}

	/* see __ipipe_handle_exception */
	if (likely(ipipe_root_domain_p))
		__fixup_if(root_entry ? raw_irqs_disabled_flags(flags) :
					raw_irqs_disabled(), regs);
	/*
	 * No need to restore root state in the 64-bit case, the Linux handler
	 * and the return code will take care of it.
	 */

	return 0;
}
Esempio n. 5
0
int __ipipe_handle_exception(struct pt_regs *regs, long error_code, int vector)
{
	bool root_entry = false;
	unsigned long flags = 0;
	unsigned long cr2 = 0;

	if (ipipe_root_domain_p) {
		root_entry = true;

		local_save_flags(flags);
		/*
		 * Replicate hw interrupt state into the virtual mask
		 * before calling the I-pipe event handler over the
		 * root domain. Also required later when calling the
		 * Linux exception handler.
		 */
		if (irqs_disabled_hw())
			local_irq_disable();
	}
#ifdef CONFIG_KGDB
	/* catch exception KGDB is interested in over non-root domains */
	else if (__ipipe_xlate_signo[vector] >= 0 &&
		 !kgdb_handle_exception(vector, __ipipe_xlate_signo[vector],
					error_code, regs))
		return 1;
#endif /* CONFIG_KGDB */

	if (vector == ex_do_page_fault)
		cr2 = native_read_cr2();

	if (unlikely(ipipe_trap_notify(vector, regs))) {
		if (root_entry)
			local_irq_restore_nosync(flags);
		return 1;
	}

	if (likely(ipipe_root_domain_p)) {
		/*
		 * If root is not the topmost domain or in case we faulted in
		 * the iret path of x86-32, regs.flags does not match the root
		 * domain state. The fault handler or the low-level return
		 * code may evaluate it. So fix this up, either by the root
		 * state sampled on entry or, if we migrated to root, with the
		 * current state.
		 */
		__fixup_if(root_entry ? raw_irqs_disabled_flags(flags) :
					raw_irqs_disabled(), regs);
	} else {
		/* Detect unhandled faults over non-root domains. */
		struct ipipe_domain *ipd = ipipe_current_domain;

		/* Switch to root so that Linux can handle the fault cleanly. */
		__ipipe_current_domain = ipipe_root_domain;

		ipipe_trace_panic_freeze();

		/* Always warn about user land and unfixable faults. */
		if (user_mode_vm(regs) || !search_exception_tables(instruction_pointer(regs))) {
			printk(KERN_ERR "BUG: Unhandled exception over domain"
			       " %s at 0x%lx - switching to ROOT\n",
			       ipd->name, instruction_pointer(regs));
			dump_stack();
			ipipe_trace_panic_dump();
#ifdef CONFIG_IPIPE_DEBUG
		/* Also report fixable ones when debugging is enabled. */
		} else {
			printk(KERN_WARNING "WARNING: Fixable exception over "
			       "domain %s at 0x%lx - switching to ROOT\n",
			       ipd->name, instruction_pointer(regs));
			dump_stack();
			ipipe_trace_panic_dump();
#endif /* CONFIG_IPIPE_DEBUG */
		}
	}

	if (vector == ex_do_page_fault)
		write_cr2(cr2);

	__ipipe_std_extable[vector](regs, error_code);

	/*
	 * Relevant for 64-bit: Restore root domain state as the low-level
	 * return code will not align it to regs.flags.
	 */
	if (root_entry)
		local_irq_restore_nosync(flags);

	return 0;
}