static          kern_return_t
chudxnu_private_cpu_signal_handler(int request)
{
	chudxnu_cpusig_callback_func_t fn = cpusig_callback_fn;
	
	if (fn) {
	x86_thread_state_t  state;
		mach_msg_type_number_t count = x86_THREAD_STATE_COUNT;

		if (chudxnu_thread_get_state(current_thread(),
					     x86_THREAD_STATE,
					     (thread_state_t) &state, &count,
					     FALSE) == KERN_SUCCESS) {
			KERNEL_DEBUG_CONSTANT(
				MACHDBG_CODE(DBG_MACH_CHUD,
					CHUD_CPUSIG_CALLBACK) | DBG_FUNC_NONE,
				(uint32_t)fn, request, 0, 0, 0);
			return (fn)(
					request, x86_THREAD_STATE,
					(thread_state_t) &state, count);
		} else {
			return KERN_FAILURE;
		}
	}
	return KERN_SUCCESS; //ignored
}
static void
chudxnu_private_interrupt_callback(void *foo)
{
#pragma unused (foo)
	chudxnu_interrupt_callback_func_t fn = interrupt_callback_fn;

	if(fn) {
		boolean_t			oldlevel;
		x86_thread_state_t		state;
		mach_msg_type_number_t		count;

		oldlevel = ml_set_interrupts_enabled(FALSE);

		count = x86_THREAD_STATE_COUNT;
		if(chudxnu_thread_get_state(current_thread(),
					    x86_THREAD_STATE,
					    (thread_state_t)&state,
					    &count,
					    FALSE) == KERN_SUCCESS) {
			(fn)(
				X86_INTERRUPT_PERFMON,
				x86_THREAD_STATE,
				(thread_state_t)&state,
				count);
		}
		ml_set_interrupts_enabled(oldlevel);
	}
}
static kern_return_t
chudxnu_private_trap_callback(
	int trapno,
	void			*regs,
	int			unused1,
	int			unused2)
{
#pragma unused (regs)
#pragma unused (unused1)
#pragma unused (unused2)
	kern_return_t retval = KERN_FAILURE;
	chudxnu_trap_callback_func_t fn = trap_callback_fn;

	if(fn) {
		boolean_t oldlevel;
		x86_thread_state_t state;
		mach_msg_type_number_t count;
		thread_t thread = current_thread();
		
		oldlevel = ml_set_interrupts_enabled(FALSE);
		
		/* prevent reentry into CHUD when dtracing */
		if(thread->t_chud & T_IN_CHUD) {
			/* restore interrupts */
			ml_set_interrupts_enabled(oldlevel);

			return KERN_FAILURE;	// not handled - pass off to dtrace
		}

		/* update the chud state bits */
		thread->t_chud |= T_IN_CHUD;

		count = x86_THREAD_STATE_COUNT;
		
		if(chudxnu_thread_get_state(thread,
				x86_THREAD_STATE,
				(thread_state_t)&state,
				&count,
				FALSE) == KERN_SUCCESS) {
		  
					retval = (fn)(
						trapno,
						x86_THREAD_STATE,
						(thread_state_t)&state,
						count);
		}

		/* no longer in CHUD */
		thread->t_chud &= ~(T_IN_CHUD);

		ml_set_interrupts_enabled(oldlevel);
	}

	return retval;
}
static kern_return_t
chudxnu_private_chud_ast_callback(
	int			trapno,
	void			*regs,
	int			unused1,
	int			unused2)
{
#pragma unused (trapno)
#pragma unused (regs)
#pragma unused (unused1)
#pragma unused (unused2)
	boolean_t	oldlevel = ml_set_interrupts_enabled(FALSE);
	ast_t		*myast = ast_pending();
	kern_return_t	retval = KERN_FAILURE;
	chudxnu_perfmon_ast_callback_func_t fn = perfmon_ast_callback_fn;
    
	if (*myast & AST_CHUD_URGENT) {
		*myast &= ~(AST_CHUD_URGENT | AST_CHUD);
		if ((*myast & AST_PREEMPTION) != AST_PREEMPTION)
			*myast &= ~(AST_URGENT);
		retval = KERN_SUCCESS;
	} else if (*myast & AST_CHUD) {
		*myast &= ~(AST_CHUD);
		retval = KERN_SUCCESS;
	}

	if (fn) {
		x86_thread_state_t state;
		mach_msg_type_number_t count;
		count = x86_THREAD_STATE_COUNT;

		if (chudxnu_thread_get_state(
			current_thread(),
			x86_THREAD_STATE,
			(thread_state_t) &state, &count,
			TRUE) == KERN_SUCCESS) {

			KERNEL_DEBUG_CONSTANT(
				MACHDBG_CODE(DBG_MACH_CHUD,
				    CHUD_AST_CALLBACK) | DBG_FUNC_NONE,
				(uint32_t) fn, 0, 0, 0, 0);

			(fn)(
				x86_THREAD_STATE,
				(thread_state_t) &state,
				count);
		}
	}
    
	ml_set_interrupts_enabled(oldlevel);
	return retval;
}
static void chudxnu_private_cpu_timer_callback(timer_call_param_t param0, timer_call_param_t param1)
{
    int cpu;
    boolean_t oldlevel;
    struct ppc_thread_state64 state;
    mach_msg_type_number_t count;

    oldlevel = ml_set_interrupts_enabled(FALSE);
    cpu = cpu_number();

    count = PPC_THREAD_STATE64_COUNT;
    if(chudxnu_thread_get_state(current_act(), PPC_THREAD_STATE64, (thread_state_t)&state, &count, FALSE)==KERN_SUCCESS) {
        if(cpu_timer_callback_fn[cpu]) {
            (cpu_timer_callback_fn[cpu])(PPC_THREAD_STATE64, (thread_state_t)&state, count);
        }
    }

    ml_set_interrupts_enabled(oldlevel);
}
static void
chudxnu_private_cpu_timer_callback(
	timer_call_param_t param0,
	timer_call_param_t param1)
{
#pragma unused (param0)
#pragma unused (param1)
	chudcpu_data_t			*chud_proc_info;
	boolean_t			oldlevel;
	x86_thread_state_t 		state;
	mach_msg_type_number_t		count;
	chudxnu_cpu_timer_callback_func_t fn;

	oldlevel = ml_set_interrupts_enabled(FALSE);
	chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);

	count = x86_THREAD_STATE_COUNT;
	if (chudxnu_thread_get_state(current_thread(),
				     x86_THREAD_STATE,
				     (thread_state_t)&state,
				     &count,
				     FALSE) == KERN_SUCCESS) {
			fn = chud_proc_info->cpu_timer_callback_fn;
       		if (fn) {
			KERNEL_DEBUG_CONSTANT(
				MACHDBG_CODE(DBG_MACH_CHUD,
					CHUD_TIMER_CALLBACK) | DBG_FUNC_NONE,
				(uint32_t)fn, 0,0,0,0);
				//state.eip, state.cs, 0, 0);
       			(fn)(
				x86_THREAD_STATE,
				(thread_state_t)&state,
				count);
       		} 
	} 
	
	ml_set_interrupts_enabled(oldlevel);
}