Exemple #1
0
void smp_message_recv(int msg, struct pt_regs *regs)
{
	atomic_inc(&ipi_recv);
	
	switch( msg ) {
	case PPC_MSG_CALL_FUNCTION:
		smp_call_function_interrupt();
		break;
	case PPC_MSG_RESCHEDULE: 
		current->need_resched = 1;
		break;
#ifdef CONFIG_XMON
	case PPC_MSG_XMON_BREAK:
		xmon(regs);
		break;
#endif /* CONFIG_XMON */
#ifdef CONFIG_KDB
	case PPC_MSG_XMON_BREAK:
	        /* This isn't finished yet, obviously -TAI */
		kdb(KDB_REASON_KEYBOARD,0, (kdb_eframe_t) regs);
		break;
#endif
	default:
		printk("SMP %d: smp_message_recv(): unknown msg %d\n",
		       smp_processor_id(), msg);
		break;
	}
}
Exemple #2
0
KEYMAN::KEYMAN()
{
	for (int i = 0; i < 1024; i++)
	{
		keys[i] = false;
	}
	
	queuedepth = 0;
	
	error_log.open((settings.GetSettingsDir() + "/logs/keyman.log").c_str());
	
	ifstream kdb((settings.GetDataDir() + "/lists/keys").c_str());
	
	if (!kdb)
		error_log << "Couldn't find key database." << endl;
	
	num_keyrecs = utility.iGetParam(kdb);
	//error_log << num_keyrecs << endl;
	
	for (int i = 0; i < num_keyrecs; i++)
	{
		keyrec[i].name = sGetParamNW(kdb);
		//error_log << keyrec[i].name << endl;
		string nk = sGetParamNW(kdb);
		if (strlen(nk.c_str()) == 1)
			keyrec[i].key = nk.c_str()[0];
		else
			keyrec[i].key = atoi(nk.c_str());
		//error_log << keyrec[i].key << endl;
	}
	
	kdb.close();
	
	freecam = true;
}
Exemple #3
0
static notrace __kprobes void
unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
{
#ifdef CONFIG_KDB
	(void)kdb(KDB_REASON_NMI, reason, regs);
#endif /* CONFIG_KDB */

	if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) ==
			NOTIFY_STOP)
		return;
#ifdef CONFIG_MCA
	/*
	 * Might actually be able to figure out what the guilty party
	 * is:
	 */
	if (MCA_bus) {
		mca_handle_nmi();
		return;
	}
#endif
	printk(KERN_EMERG
		"Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
			reason, smp_processor_id());

	printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
	if (panic_on_unrecovered_nmi)
		panic("NMI: Not continuing");

	printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
}
Exemple #4
0
/* May run on IST stack. */
dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
{
#ifdef CONFIG_KDB
	if (kdb(KDB_REASON_BREAK, error_code, regs))
		return;
#endif
#ifdef CONFIG_KPROBES
	if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
			== NOTIFY_STOP)
		return;
#else
	if (notify_die(DIE_TRAP, "int3", regs, error_code, 3, SIGTRAP)
			== NOTIFY_STOP)
		return;
#endif

	preempt_conditional_sti(regs);
	do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
	preempt_conditional_cli(regs);
}
Exemple #5
0
    occa::kernelDatabase loadKernelDatabase(const std::string &kernelName){
      kernelDatabase kdb(kernelName);

      kernelMutex.lock();

      kernelMapIterator it = kernelMap.find(kernelName);

      if(it != kernelMap.end()){
        std::vector<int> &ids = it->second;

        const int idCount = ids.size();

        for(int i = 0; i < idCount; ++i)
          kdb.modelKernelIsAvailable(ids[i]);
      }

      kernelMutex.unlock();

      return kdb;
    }
Exemple #6
0
static  void receive_chars(struct tty_struct *tty, struct pt_regs *regs)
{
    unsigned char ch;
    static unsigned char seen_esc = 0;

    while ( (ch = ia64_ssc(0, 0, 0, 0, SSC_GETCHAR)) ) {
        if ( ch == 27 && seen_esc == 0 ) {
            seen_esc = 1;
            continue;
        } else {
            if ( seen_esc==1 && ch == 'O' ) {
                seen_esc = 2;
                continue;
            } else if ( seen_esc == 2 ) {
                if ( ch == 'P' ) show_state();		/* F1 key */
#ifdef CONFIG_KDB
                if ( ch == 'S' )
                    kdb(KDB_REASON_KEYBOARD, 0, (kdb_eframe_t) regs);
#endif

                seen_esc = 0;
                continue;
            }
        }
        seen_esc = 0;
        if (tty->flip.count >= TTY_FLIPBUF_SIZE) break;

        *tty->flip.char_buf_ptr = ch;

        *tty->flip.flag_buf_ptr = 0;

        tty->flip.flag_buf_ptr++;
        tty->flip.char_buf_ptr++;
        tty->flip.count++;
    }
    tty_flip_buffer_push(tty);
}
/*
 * This function sends a 'generic call function' IPI to all other CPUs
 * in the system.
 *
 * [SUMMARY] Run a function on all other CPUs.
 * <func> The function to run. This must be fast and non-blocking.
 * <info> An arbitrary pointer to pass to the function.
 * <nonatomic> currently unused.
 * <wait> If true, wait (atomically) until function has completed on other CPUs.
 * [RETURNS] 0 on success, else a negative status code. Does not return until
 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
 *
 * You must not call this function with disabled interrupts or from a
 * hardware interrupt handler or from a bottom half handler.
 */
int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
			int wait)

{ 
	struct call_data_struct data;
	int ret = -1, cpus = smp_num_cpus-1;
	int timeout;

	if (!cpus)
		return 0;

	data.func = func;
	data.info = info;
	atomic_set(&data.started, 0);
	data.wait = wait;
	if (wait)
		atomic_set(&data.finished, 0);

	spin_lock_bh(&call_lock);
	call_data = &data;
	/* Send a message to all other CPUs and wait for them to respond */
	smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION, 0, 0);

	/* Wait for response */
	timeout = 8000000;
	while (atomic_read(&data.started) != cpus) {
		HMT_low();
		if (--timeout == 0) {
			printk("smp_call_function on cpu %d: other cpus not responding (%d)\n",
			       smp_processor_id(), atomic_read(&data.started));
#ifdef CONFIG_XMON
                        xmon(0);
#endif
#ifdef CONFIG_KDB
			kdb(KDB_REASON_CALL,0, (kdb_eframe_t) 0);
#endif

#ifdef CONFIG_PPC_ISERIES
			HvCall_terminateMachineSrc();
#endif
			goto out;
		}
		barrier();
		udelay(1);
	}

	if (wait) {
		timeout = 1000000;
		while (atomic_read(&data.finished) != cpus) {
			HMT_low();
			if (--timeout == 0) {
				printk("smp_call_function on cpu %d: other cpus not finishing (%d/%d)\n",
				       smp_processor_id(), atomic_read(&data.finished), atomic_read(&data.started));
#ifdef CONFIG_PPC_ISERIES
				HvCall_terminateMachineSrc();
#endif
				goto out;
			}
			barrier();
			udelay(1);
		}
	}
	ret = 0;

 out:
	call_data = NULL;
	HMT_medium();
	spin_unlock_bh(&call_lock);
	return ret;
}
Exemple #8
0
/*
 * Our handling of the processor debug registers is non-trivial.
 * We do not clear them on entry and exit from the kernel. Therefore
 * it is possible to get a watchpoint trap here from inside the kernel.
 * However, the code in ./ptrace.c has ensured that the user can
 * only set watchpoints on userspace addresses. Therefore the in-kernel
 * watchpoint trap can only occur in code which is reading/writing
 * from user space. Such code must not hold kernel locks (since it
 * can equally take a page fault), therefore it is safe to call
 * force_sig_info even though that claims and releases locks.
 *
 * Code in ./signal.c ensures that the debug control register
 * is restored before we deliver any signal, and therefore that
 * user code runs with the correct debug control register even though
 * we clear it here.
 *
 * Being careful here means that we don't have to be as careful in a
 * lot of more complicated places (task switching can be a bit lazy
 * about restoring all the debug state, and ptrace doesn't have to
 * find every occurrence of the TF bit that could be saved away even
 * by user code)
 *
 * May run on IST stack.
 */
dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
{
	struct task_struct *tsk = current;
	unsigned long condition;
	int si_code;

	get_debugreg(condition, 6);

	/* Catch kmemcheck conditions first of all! */
	if (condition & DR_STEP && kmemcheck_trap(regs))
		return;

	/*
	 * The processor cleared BTF, so don't mark that we need it set.
	 */
	clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR);
	tsk->thread.debugctlmsr = 0;

#ifdef  CONFIG_KDB
	if (kdb(KDB_REASON_DEBUG, error_code, regs))
		return;
#endif  /* CONFIG_KDB */

	if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
						SIGTRAP) == NOTIFY_STOP)
		return;

	/* It's safe to allow irq's after DR6 has been saved */
	preempt_conditional_sti(regs);

	/* Mask out spurious debug traps due to lazy DR7 setting */
	if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
		if (!tsk->thread.debugreg7)
			goto clear_dr7;
	}

#ifdef CONFIG_X86_32
	if (regs->flags & X86_VM_MASK)
		goto debug_vm86;
#endif

	/* Save debug status register where ptrace can see it */
	tsk->thread.debugreg6 = condition;

	/*
	 * Single-stepping through TF: make sure we ignore any events in
	 * kernel space (but re-enable TF when returning to user mode).
	 */
	if (condition & DR_STEP) {
		if (!user_mode(regs))
			goto clear_TF_reenable;
	}

	si_code = get_si_code(condition);
	/* Ok, finally something we can handle */
	send_sigtrap(tsk, regs, error_code, si_code);

	/*
	 * Disable additional traps. They'll be re-enabled when
	 * the signal is delivered.
	 */
clear_dr7:
	set_debugreg(0, 7);
	preempt_conditional_cli(regs);
	return;

#ifdef CONFIG_X86_32
debug_vm86:
	/* reenable preemption: handle_vm86_trap() might sleep */
	dec_preempt_count();
	handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
	conditional_cli(regs);
	return;
#endif

clear_TF_reenable:
	set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
	regs->flags &= ~X86_EFLAGS_TF;
	preempt_conditional_cli(regs);
	return;
}