Beispiel #1
0
__private_extern__ kern_return_t
chudxnu_task_write(
				   task_t		task,
				   uint64_t	useraddr,
				   void		*kernaddr,
				   vm_size_t	size)
{
	kern_return_t ret = KERN_SUCCESS;
	boolean_t old_level;
	
	if(ml_at_interrupt_context()) {
		return KERN_FAILURE; // can't poke into tasks on interrupt stack
	}

	/*
	 * pmap layer requires interrupts to be on
	 */
	old_level = ml_set_interrupts_enabled(TRUE);
	
	if(current_task()==task) {    
		
		if(copyout(kernaddr, useraddr, size)) {
			ret = KERN_FAILURE;
		}
	} else {
		vm_map_t map = get_task_map(task);
		ret = vm_map_write_user(map, kernaddr, useraddr, size);
	}		
	
	ml_set_interrupts_enabled(old_level);

	return ret;
}
static kern_return_t chudxnu_private_trap_callback(int trapno, struct savearea *ssp, unsigned int dsisr, unsigned int dar)
{
    boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
    int cpu = cpu_number();

    kern_return_t retval = KERN_FAILURE;
    uint32_t trapentry = TRAP_ENTRY_POINT(trapno);

    // ASTs from ihandler go through thandler and are made to look like traps
    if(perfmon_ast_callback_fn && (need_ast[cpu] & AST_PPC_CHUD)) {
        struct ppc_thread_state64 state;
        mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
        chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
        (perfmon_ast_callback_fn)(PPC_THREAD_STATE64, (thread_state_t)&state, count);
        need_ast[cpu] &= ~(AST_PPC_CHUD);
    }

    if(trapentry!=0x0) {
        if(trap_callback_fn) {
            struct ppc_thread_state64 state;
            mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
            chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
            retval = (trap_callback_fn)(trapentry, PPC_THREAD_STATE64, (thread_state_t)&state, count);
        }
    }

    ml_set_interrupts_enabled(oldlevel);

    return retval;
}
static void
chudxnu_private_interrupt_callback(void *foo)
{
#pragma unused (foo)
	chudxnu_interrupt_callback_func_t fn = interrupt_callback_fn;

	if(fn) {
		boolean_t			oldlevel;
		x86_thread_state_t		state;
		mach_msg_type_number_t		count;

		oldlevel = ml_set_interrupts_enabled(FALSE);

		count = x86_THREAD_STATE_COUNT;
		if(chudxnu_thread_get_state(current_thread(),
					    x86_THREAD_STATE,
					    (thread_state_t)&state,
					    &count,
					    FALSE) == KERN_SUCCESS) {
			(fn)(
				X86_INTERRUPT_PERFMON,
				x86_THREAD_STATE,
				(thread_state_t)&state,
				count);
		}
		ml_set_interrupts_enabled(oldlevel);
	}
}
Beispiel #4
0
void
cpu_exit_wait(
	int cpu)
{
    	cpu_data_t	*cdp = cpu_datap(cpu);
	boolean_t	intrs_enabled;
	uint64_t	tsc_timeout;

	/*
	 * Wait until the CPU indicates that it has stopped.
	 * Disable interrupts while the topo lock is held -- arguably
	 * this should always be done but in this instance it can lead to
	 * a timeout if long-running interrupt were to occur here.
	 */
	intrs_enabled = ml_set_interrupts_enabled(FALSE);
	simple_lock(&x86_topo_lock);
	/* Set a generous timeout of several seconds (in TSC ticks) */
	tsc_timeout = rdtsc64() + (10ULL * 1000 * 1000 * 1000);
	while ((cdp->lcpu.state != LCPU_HALT)
	       && (cdp->lcpu.state != LCPU_OFF)
	       && !cdp->lcpu.stopped) {
	    simple_unlock(&x86_topo_lock);
	    ml_set_interrupts_enabled(intrs_enabled);
	    cpu_pause();
	    if (rdtsc64() > tsc_timeout)
		panic("cpu_exit_wait(%d) timeout", cpu);
	    ml_set_interrupts_enabled(FALSE);
	    simple_lock(&x86_topo_lock);
	}
	simple_unlock(&x86_topo_lock);
	ml_set_interrupts_enabled(intrs_enabled);
}
Beispiel #5
0
/* may be called from an IPI */
int
kpc_get_curcpu_counters(uint32_t classes, int *curcpu, uint64_t *buf)
{
	int enabled=0, offset=0;
	uint64_t pmc_mask = 0ULL;

	assert(buf);

	enabled = ml_set_interrupts_enabled(FALSE);

	/* grab counters and CPU number as close as possible */
	if (curcpu)
		*curcpu = current_processor()->cpu_id;

	if (classes & KPC_CLASS_FIXED_MASK) {
		kpc_get_fixed_counters(&buf[offset]);
		offset += kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
	}

	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
		kpc_get_configurable_counters(&buf[offset], pmc_mask);
		offset += kpc_popcount(pmc_mask);
	}

	if (classes & KPC_CLASS_POWER_MASK) {
		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
		kpc_get_configurable_counters(&buf[offset], pmc_mask);
		offset += kpc_popcount(pmc_mask);
	}

	ml_set_interrupts_enabled(enabled);

	return offset;
}
__private_extern__ kern_return_t
chudxnu_cpu_timer_callback_enter(
	chudxnu_cpu_timer_callback_func_t	func,
	uint32_t				time,
	uint32_t				units)
{
	chudcpu_data_t	*chud_proc_info;
	boolean_t	oldlevel;

	oldlevel = ml_set_interrupts_enabled(FALSE);
	chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);

	// cancel any existing callback for this cpu
	timer_call_cancel(&(chud_proc_info->cpu_timer_call));

	chud_proc_info->cpu_timer_callback_fn = func;

	clock_interval_to_deadline(time, units, &(chud_proc_info->t_deadline));
	timer_call_setup(&(chud_proc_info->cpu_timer_call),
			 chudxnu_private_cpu_timer_callback, NULL);
	timer_call_enter(&(chud_proc_info->cpu_timer_call),
			 chud_proc_info->t_deadline);

	KERNEL_DEBUG_CONSTANT(
		MACHDBG_CODE(DBG_MACH_CHUD,
			     CHUD_TIMER_CALLBACK_ENTER) | DBG_FUNC_NONE,
		(uint32_t) func, time, units, 0, 0);

	ml_set_interrupts_enabled(oldlevel);
	return KERN_SUCCESS;
}
Beispiel #7
0
/* get counter values for a thread */
int
kpc_get_curthread_counters(uint32_t *inoutcount, uint64_t *buf)
{
	thread_t thread = current_thread();
	boolean_t enabled;

	/* buffer too small :( */
	if( *inoutcount < kpc_thread_classes_count )
		return EINVAL;

	/* copy data and actual size */
	if( !thread->kpc_buf )
		return EINVAL;

	enabled = ml_set_interrupts_enabled(FALSE);

	/* snap latest version of counters for this thread */
	kpc_update_thread_counters( current_thread() );
	
	/* copy out */
	memcpy( buf, thread->kpc_buf, 
	        kpc_thread_classes_count * sizeof(*buf) );
	*inoutcount = kpc_thread_classes_count;

	ml_set_interrupts_enabled(enabled);

	return 0;
}
Beispiel #8
0
void
astbsd_on(void)
{
	boolean_t	reenable;

	reenable = ml_set_interrupts_enabled(FALSE);
	ast_on_fast(AST_BSD);
	(void)ml_set_interrupts_enabled(reenable);
}
__private_extern__ kern_return_t
chudxnu_get_cpu_interrupt_counters(int cpu, rupt_counters_t *rupts)
{
    if(cpu < 0 || (unsigned int)cpu >= real_ncpus) { // sanity check
        return KERN_FAILURE;
    }

    if(rupts) {
        boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
        cpu_data_t	*per_proc;

        per_proc = cpu_data_ptr[cpu];
		// For now, we'll call an NMI a 'reset' interrupt
        rupts->hwResets = per_proc->cpu_hwIntCnt[T_NMI];
        rupts->hwMachineChecks = per_proc->cpu_hwIntCnt[T_MACHINE_CHECK];
        rupts->hwDSIs = 0;
        rupts->hwISIs = 0;
		// we could accumulate 0x20-0x7f, but that'd likely overflow...
        rupts->hwExternals = 0;
		// This appears to be wrong.
        rupts->hwAlignments = 0; //per_proc->cpu_hwIntCnt[0x11];
        rupts->hwPrograms = 0;
        rupts->hwFloatPointUnavailable = per_proc->cpu_hwIntCnt[T_NO_FPU];
		// osfmk/i386/mp.h
        rupts->hwDecrementers = per_proc->cpu_hwIntCnt[LAPIC_VECTOR(TIMER)];
		// LAPIC_ERROR == IO ERROR??
        rupts->hwIOErrors = per_proc->cpu_hwIntCnt[LAPIC_VECTOR(ERROR)];

		// accumulate all system call types
		// osfmk/mach/i386/syscall_sw.h
        rupts->hwSystemCalls = per_proc->cpu_hwIntCnt[UNIX_INT]  +
			per_proc->cpu_hwIntCnt[MACH_INT] +
			per_proc->cpu_hwIntCnt[MACHDEP_INT] +
			per_proc->cpu_hwIntCnt[DIAG_INT];

        rupts->hwTraces = per_proc->cpu_hwIntCnt[T_DEBUG]; // single steps == traces??
        rupts->hwFloatingPointAssists = 0;
		// osfmk/i386/mp.h
        rupts->hwPerformanceMonitors =
			per_proc->cpu_hwIntCnt[LAPIC_VECTOR(PERFCNT)];
        rupts->hwAltivecs = 0;
        rupts->hwInstBreakpoints = per_proc->cpu_hwIntCnt[T_INT3];
        rupts->hwSystemManagements = 0;
        rupts->hwAltivecAssists = 0;
        rupts->hwThermal = per_proc->cpu_hwIntCnt[LAPIC_VECTOR(THERMAL)];
        rupts->hwSoftPatches = 0;
        rupts->hwMaintenances = 0;
		// Watchpoint == instrumentation
		rupts->hwInstrumentations = per_proc->cpu_hwIntCnt[T_WATCHPOINT]; 

        ml_set_interrupts_enabled(oldlevel);
        return KERN_SUCCESS;
    } else {
        return KERN_FAILURE;
    }
}
static kern_return_t
chudxnu_private_trap_callback(
	int trapno,
	void			*regs,
	int			unused1,
	int			unused2)
{
#pragma unused (regs)
#pragma unused (unused1)
#pragma unused (unused2)
	kern_return_t retval = KERN_FAILURE;
	chudxnu_trap_callback_func_t fn = trap_callback_fn;

	if(fn) {
		boolean_t oldlevel;
		x86_thread_state_t state;
		mach_msg_type_number_t count;
		thread_t thread = current_thread();
		
		oldlevel = ml_set_interrupts_enabled(FALSE);
		
		/* prevent reentry into CHUD when dtracing */
		if(thread->t_chud & T_IN_CHUD) {
			/* restore interrupts */
			ml_set_interrupts_enabled(oldlevel);

			return KERN_FAILURE;	// not handled - pass off to dtrace
		}

		/* update the chud state bits */
		thread->t_chud |= T_IN_CHUD;

		count = x86_THREAD_STATE_COUNT;
		
		if(chudxnu_thread_get_state(thread,
				x86_THREAD_STATE,
				(thread_state_t)&state,
				&count,
				FALSE) == KERN_SUCCESS) {
		  
					retval = (fn)(
						trapno,
						x86_THREAD_STATE,
						(thread_state_t)&state,
						count);
		}

		/* no longer in CHUD */
		thread->t_chud &= ~(T_IN_CHUD);

		ml_set_interrupts_enabled(oldlevel);
	}

	return retval;
}
static kern_return_t
chudxnu_private_chud_ast_callback(
	int			trapno,
	void			*regs,
	int			unused1,
	int			unused2)
{
#pragma unused (trapno)
#pragma unused (regs)
#pragma unused (unused1)
#pragma unused (unused2)
	boolean_t	oldlevel = ml_set_interrupts_enabled(FALSE);
	ast_t		*myast = ast_pending();
	kern_return_t	retval = KERN_FAILURE;
	chudxnu_perfmon_ast_callback_func_t fn = perfmon_ast_callback_fn;
    
	if (*myast & AST_CHUD_URGENT) {
		*myast &= ~(AST_CHUD_URGENT | AST_CHUD);
		if ((*myast & AST_PREEMPTION) != AST_PREEMPTION)
			*myast &= ~(AST_URGENT);
		retval = KERN_SUCCESS;
	} else if (*myast & AST_CHUD) {
		*myast &= ~(AST_CHUD);
		retval = KERN_SUCCESS;
	}

	if (fn) {
		x86_thread_state_t state;
		mach_msg_type_number_t count;
		count = x86_THREAD_STATE_COUNT;

		if (chudxnu_thread_get_state(
			current_thread(),
			x86_THREAD_STATE,
			(thread_state_t) &state, &count,
			TRUE) == KERN_SUCCESS) {

			KERNEL_DEBUG_CONSTANT(
				MACHDBG_CODE(DBG_MACH_CHUD,
				    CHUD_AST_CALLBACK) | DBG_FUNC_NONE,
				(uint32_t) fn, 0, 0, 0, 0);

			(fn)(
				x86_THREAD_STATE,
				(thread_state_t) &state,
				count);
		}
	}
    
	ml_set_interrupts_enabled(oldlevel);
	return retval;
}
Beispiel #12
0
void protoss_stop() {
    if(trace_going || watch_going) {
        begin_debug(); // interrupts disabled
        read_debug(197);
        uint32_t dbgdscr = read_debug(34);
        dbgdscr |= 0x8000; // turn on debug
        write_debug(34, dbgdscr);
        for(int i = 0; i < 16; i++) {
            // bcr and wcr
            write_debug(80 + i, 0);
            write_debug(112 + i, 0);
        }

        dbgdscr = read_debug(34);
        dbgdscr &= ~0x8000;
        write_debug(34, dbgdscr);
        end_debug();
    }

    if(trace_going) {
        trace_going = false;
    }
    
    watch_going = false;

#ifdef WATCH{OINTS
    if(ter_patched) {
        memset(debug_stuff, 0, sizeof(debug_stuff));
        old_ie = ml_set_interrupts_enabled(0);

        for(int i = 0; i < 4; i++) ter_patch_loc[i] = ter_orig[i];
    
        flush_cache(ter_patch_loc, sizeof(ter_orig));

        ter_patched = false;

        ml_set_interrupts_enabled(old_ie);
    }
#endif

    if(prefetch_saved) {
        vector_base()[3+8] = prefetch_saved;
        prefetch_saved = NULL;
    }

    if(data_saved) {
        vector_base()[4+8] = data_saved;
        data_saved = NULL;
    }
}
__private_extern__
kern_return_t chudxnu_perfmon_ast_send(void)
{
    int cpu;
    boolean_t oldlevel;

    oldlevel = ml_set_interrupts_enabled(FALSE);
    cpu = cpu_number();

    need_ast[cpu] |= (AST_PPC_CHUD | AST_URGENT);

    ml_set_interrupts_enabled(oldlevel);
    return KERN_SUCCESS;
}
Beispiel #14
0
int
kpc_get_shadow_counters(boolean_t all_cpus, uint32_t classes,
                        int *curcpu, uint64_t *buf)
{
	int curcpu_id = current_processor()->cpu_id;
	uint32_t cfg_count = kpc_configurable_count(), offset = 0;
	uint64_t pmc_mask = 0ULL;
	boolean_t enabled;

	assert(buf);

	enabled = ml_set_interrupts_enabled(FALSE);

	curcpu_id = current_processor()->cpu_id;
	if (curcpu)
		*curcpu = curcpu_id;

	for (int cpu = 0; cpu < machine_info.logical_cpu_max; ++cpu) {
		/* filter if the caller did not request all cpus */
		if (!all_cpus && (cpu != curcpu_id))
			continue;

		if (classes & KPC_CLASS_FIXED_MASK) {
			uint32_t count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
			memcpy(&buf[offset], &FIXED_SHADOW_CPU(cpu, 0), count * sizeof(uint64_t));
			offset += count;
		}

		if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
			pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);

			for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr)
				if ((1ULL << cfg_ctr) & pmc_mask)
					buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr);
		}

		if (classes & KPC_CLASS_POWER_MASK) {
			pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);

			for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr)
				if ((1ULL << cfg_ctr) & pmc_mask)
					buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr);
		}
	}

	ml_set_interrupts_enabled(enabled);

	return offset;
}
__private_extern__
kern_return_t chudxnu_cpu_timer_callback_cancel(void)
{
    int cpu;
    boolean_t oldlevel;

    oldlevel = ml_set_interrupts_enabled(FALSE);
    cpu = cpu_number();

    timer_call_cancel(&(cpu_timer_call[cpu]));
    t_deadline[cpu] = t_deadline[cpu] | ~(t_deadline[cpu]); // set to max value
    cpu_timer_callback_fn[cpu] = NULL;

    ml_set_interrupts_enabled(oldlevel);
    return KERN_SUCCESS;
}
__private_extern__
kern_return_t chudxnu_cpusig_send(int otherCPU, uint32_t request)
{
    int thisCPU;
    kern_return_t retval = KERN_FAILURE;
    int retries = 0;
    boolean_t oldlevel;
    uint32_t temp[2];

    oldlevel = ml_set_interrupts_enabled(FALSE);
    thisCPU = cpu_number();

    if(thisCPU!=otherCPU) {
        temp[0] = 0xFFFFFFFF;		/* set sync flag */
        temp[1] = request;			/* set request */
        __asm__ volatile("eieio");	/* force order */
        __asm__ volatile("sync");	/* force to memory */

        do {
            retval=cpu_signal(otherCPU, SIGPcpureq, CPRQchud, (uint32_t)&temp);
        } while(retval!=KERN_SUCCESS && (retries++)<16);

        if(retries>=16) {
            retval = KERN_FAILURE;
        } else {
            retval = hw_cpu_sync(temp, LockTimeOut); /* wait for the other processor */
            if(!retval) {
                retval = KERN_FAILURE;
            } else {
                retval = KERN_SUCCESS;
            }
        }
    } else {
Beispiel #17
0
/*
 * Terminate a thread.
 */
kern_return_t
thread_terminate(
	register thread_act_t	act)
{
	kern_return_t	result;

	if (act == THR_ACT_NULL)
		return (KERN_INVALID_ARGUMENT);

	if (	act->task == kernel_task	&&
			act != current_act()			)
		return (KERN_FAILURE);

	result = thread_terminate_internal(act);

	/*
	 * If a kernel thread is terminating itself, force an AST here.
	 * Kernel threads don't normally pass through the AST checking
	 * code - and all threads finish their own termination in the
	 * special handler APC.
	 */
	if (act->task == kernel_task) {
		ml_set_interrupts_enabled(FALSE);
		assert(act == current_act());
		ast_taken(AST_APC, TRUE);
		panic("thread_terminate");
	}

	return (result);
}
Beispiel #18
0
void RealView_timebase_init(void)
{
    assert(gRealviewTimerBase);
    
    timer_configure();
    
    /* disable timer */
    RealView_timer_enabled(FALSE);
    
    /* set timer values and initialize decrementer */
    HARDWARE_REGISTER(gRealviewTimerBase + TIMER_CONTROL) |= TIMER_MODE_FREE_RUNNING;
    HARDWARE_REGISTER(gRealviewTimerBase + TIMER_CONTROL) |= TIMER_SIZE_32_BIT;
    HARDWARE_REGISTER(gRealviewTimerBase + TIMER_CONTROL) |= TIMER_ENABLE;
    HARDWARE_REGISTER(gRealviewTimerBase) = clock_decrementer;

    /* enable irqs so we can get ahold of the timer when it decrements */
    ml_set_interrupts_enabled(TRUE);
    
    /* re-enable timer */
    RealView_timer_enabled(TRUE);
    
    clock_initialized = TRUE;
    
    while(!clock_had_irq)
        barrier();

    kprintf(KPRINTF_PREFIX "RealView Timer initialized, Timer value %llu\n", RealView_timer_value());

    return;
}
Beispiel #19
0
static spl_t
panic_prologue(const char *str)
{
	spl_t	s;

	if (kdebug_enable) {
		ml_set_interrupts_enabled(TRUE);
		kdbg_dump_trace_to_file("/var/tmp/panic.trace");
	}

	s = splhigh();
	disable_preemption();

#if	defined(__i386__) || defined(__x86_64__)
	/* Attempt to display the unparsed panic string */
	const char *tstr = str;

	kprintf("Panic initiated, string: ");
	while (tstr && *tstr)
		kprintf("%c", *tstr++);
	kprintf("\n");
#endif

	panic_safe();

#ifndef __arm__ 	/* xxx show all panic output for now. */
	if( logPanicDataToScreen )
#endif		
		disable_debug_output = FALSE;
	debug_mode = TRUE;

restart:
	PANIC_LOCK();

	if (panicstr) {
		if (cpu_number() != paniccpu) {
			PANIC_UNLOCK();
			/*
			 * Wait until message has been printed to identify correct
			 * cpu that made the first panic.
			 */
			while (panicwait)
				continue;
			goto restart;
	    } else {
			nestedpanic +=1;
			PANIC_UNLOCK();
			Debugger("double panic");
			printf("double panic:  We are hanging here...\n");
			panic_stop();
			/* NOTREACHED */
		}
	}
	panicstr = str;
	paniccpu = cpu_number();
	panicwait = 1;

	PANIC_UNLOCK();
	return(s);
}
Beispiel #20
0
void kprintf(const char *fmt, ...)
{
	va_list   listp;
	boolean_t state;

	if (!disable_serial_output) {
		boolean_t early = FALSE;
		if (rdmsr64(MSR_IA32_GS_BASE) == 0) {
			early = TRUE;
		}
		/* If PE_kputc has not yet been initialized, don't
		 * take any locks, just dump to serial */
		if (!PE_kputc || early) {
			va_start(listp, fmt);
			_doprnt(fmt, &listp, pal_serial_putc, 16);
			va_end(listp);
			return;
		}

		/*
		 * Spin to get kprintf lock but re-enable interrupts while
		 * failing.
		 * This allows interrupts to be handled while waiting but
		 * interrupts are disabled once we have the lock.
		 */
		state = ml_set_interrupts_enabled(FALSE);

		pal_preemption_assert();

		while (!simple_lock_try(&kprintf_lock)) {
			ml_set_interrupts_enabled(state);
			ml_set_interrupts_enabled(FALSE);
		}

		if (cpu_number() != cpu_last_locked) {
			MP_DEBUG_KPRINTF("[cpu%d...]\n", cpu_number());
			cpu_last_locked = cpu_number();
		}

		va_start(listp, fmt);
		_doprnt(fmt, &listp, PE_kputc, 16);
		va_end(listp);

		simple_unlock(&kprintf_lock);
		ml_set_interrupts_enabled(state);
	}
}
Beispiel #21
0
boolean_t
thread_funnel_set(
        funnel_t *	fnl,
	boolean_t	funneled)
{
	thread_t	cur_thread;
	boolean_t	funnel_state_prev;
	boolean_t	intr;
        
	cur_thread = current_thread();
	funnel_state_prev = ((cur_thread->funnel_state & TH_FN_OWNED) == TH_FN_OWNED);

	if (funnel_state_prev != funneled) {
		intr = ml_set_interrupts_enabled(FALSE);

		if (funneled == TRUE) {
			if (cur_thread->funnel_lock)
				panic("Funnel lock called when holding one %p", cur_thread->funnel_lock);
			KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE,
											fnl, 1, 0, 0, 0);
			funnel_lock(fnl);
			KERNEL_DEBUG(0x6032434 | DBG_FUNC_NONE,
											fnl, 1, 0, 0, 0);
			cur_thread->funnel_state |= TH_FN_OWNED;
			cur_thread->funnel_lock = fnl;
		} else {
			if(cur_thread->funnel_lock->fnl_mutex != fnl->fnl_mutex)
				panic("Funnel unlock  when not holding funnel");
			cur_thread->funnel_state &= ~TH_FN_OWNED;
			KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE,
								fnl, 1, 0, 0, 0);

			cur_thread->funnel_lock = THR_FUNNEL_NULL;
			funnel_unlock(fnl);
		}
		(void)ml_set_interrupts_enabled(intr);
	} else {
		/* if we are trying to acquire funnel recursively
		 * check for funnel to be held already
		 */
		if (funneled && (fnl->fnl_mutex != cur_thread->funnel_lock->fnl_mutex)) {
				panic("thread_funnel_set: already holding a different funnel");
		}
	}
	return(funnel_state_prev);
}
// PE_Determine_Clock_Speeds is called by the via driver in IOKit
// It uses the numbers generated by pe_do_clock_test and reports
// the cleaned up values to the rest of the OS.
void PE_Determine_Clock_Speeds(unsigned int via_addr, int num_speeds,
			       unsigned long *speed_list)
{
  boolean_t          oldLevel;
  unsigned long      tmp_bus_speed, tmp_cpu_speed;
  unsigned long long tmp;
  
  oldLevel = ml_set_interrupts_enabled(FALSE);
  pe_do_clock_test(via_addr, num_speeds, speed_list);
  ml_set_interrupts_enabled(oldLevel);
  
  tmp_bus_speed = bus_freq_num / bus_freq_den;
  tmp = ((unsigned long long)bus_freq_num * cpu_pll) / (bus_freq_den * 2);
  tmp_cpu_speed = (unsigned long)tmp;
  
  // Report the bus clock rate as is.
  gPEClockFrequencyInfo.bus_clock_rate_num = bus_freq_num;
  gPEClockFrequencyInfo.bus_clock_rate_den = bus_freq_den;
  
  // pll multipliers are in halfs so set the denominator to 2.
  gPEClockFrequencyInfo.bus_to_cpu_rate_num = cpu_pll;
  gPEClockFrequencyInfo.bus_to_cpu_rate_den = 2;
  
  // The decrementer rate is one fourth the bus rate.
  gPEClockFrequencyInfo.bus_to_dec_rate_num = 1;
  gPEClockFrequencyInfo.bus_to_dec_rate_den = 4;
  
  // Assume that the timebase frequency is derived from the bus clock.
  gPEClockFrequencyInfo.timebase_frequency_num = bus_freq_num;
  gPEClockFrequencyInfo.timebase_frequency_den = bus_freq_den * 4;
  
  // Set the truncated numbers in gPEClockFrequencyInfo.
  gPEClockFrequencyInfo.bus_clock_rate_hz = tmp_bus_speed;
  gPEClockFrequencyInfo.cpu_clock_rate_hz = tmp_cpu_speed;
  gPEClockFrequencyInfo.dec_clock_rate_hz = tmp_bus_speed / 4;
  gPEClockFrequencyInfo.timebase_frequency_hz = tmp_bus_speed / 4;
  
  gPEClockFrequencyInfo.bus_frequency_hz = tmp_bus_speed;
  gPEClockFrequencyInfo.bus_frequency_min_hz = tmp_bus_speed;
  gPEClockFrequencyInfo.bus_frequency_max_hz = tmp_bus_speed;
  gPEClockFrequencyInfo.cpu_frequency_hz = tmp_cpu_speed;
  gPEClockFrequencyInfo.cpu_frequency_min_hz = tmp_cpu_speed;
  gPEClockFrequencyInfo.cpu_frequency_max_hz = tmp_cpu_speed;
  
  PE_call_timebase_callback();
}
Beispiel #23
0
void kprintf(const char *fmt, ...)
{
        va_list   listp;
	boolean_t state;
	
	state = ml_set_interrupts_enabled(FALSE);
	simple_lock(&kprintf_lock);
	
	if (!disable_serial_output) {	
        	va_start(listp, fmt);
        	_doprnt(fmt, &listp, PE_kputc, 16);
        	va_end(listp);
	}
	
	simple_unlock(&kprintf_lock);
	ml_set_interrupts_enabled(state);
}
Beispiel #24
0
/*
 * Debugging interface to the CPU power management code.
 *
 * Note:	Does not need locks because it disables interrupts over
 *		the call.
 */
static int
pmsSysctl(__unused struct sysctl_oid *oidp, __unused void *arg1,
	  __unused int arg2, struct sysctl_req *req)
{
	pmsctl_t	ctl;
	int		error;
	boolean_t	intr;

	if ((error = SYSCTL_IN(req, &ctl, sizeof(ctl))))
		return(error);

	intr = ml_set_interrupts_enabled(FALSE);		/* No interruptions in here */
	error = pmsControl(ctl.request, (user_addr_t)(uintptr_t)ctl.reqaddr, ctl.reqsize);
	(void)ml_set_interrupts_enabled(intr);			/* Restore interruptions */

	return(error);
}
/*
 * We need to disable interrupts while holding the mutex interlock
 * to prevent an IPI intervening.
 * Hence, local helper functions lck_interlock_lock()/lck_interlock_unlock().
 */
static boolean_t
lck_interlock_lock(lck_rw_t *lck)
{
	boolean_t	istate;

	istate = ml_set_interrupts_enabled(FALSE);	
	hw_lock_byte_lock(&lck->lck_rw_interlock);

	return istate;
}
__private_extern__ kern_return_t
chudxnu_perfmon_ast_send_urgent(boolean_t urgent)
{
    boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
	ast_t *myast = ast_pending();

    if(urgent) {
        *myast |= (AST_CHUD_URGENT | AST_URGENT);
    } else {
        *myast |= (AST_CHUD);
    }

    KERNEL_DEBUG_CONSTANT(
	MACHDBG_CODE(DBG_MACH_CHUD, CHUD_AST_SEND) | DBG_FUNC_NONE,
	urgent, 0, 0, 0, 0);

    ml_set_interrupts_enabled(oldlevel);
    return KERN_SUCCESS;
}
static void chudxnu_private_cpu_timer_callback(timer_call_param_t param0, timer_call_param_t param1)
{
    int cpu;
    boolean_t oldlevel;
    struct ppc_thread_state64 state;
    mach_msg_type_number_t count;

    oldlevel = ml_set_interrupts_enabled(FALSE);
    cpu = cpu_number();

    count = PPC_THREAD_STATE64_COUNT;
    if(chudxnu_thread_get_state(current_act(), PPC_THREAD_STATE64, (thread_state_t)&state, &count, FALSE)==KERN_SUCCESS) {
        if(cpu_timer_callback_fn[cpu]) {
            (cpu_timer_callback_fn[cpu])(PPC_THREAD_STATE64, (thread_state_t)&state, count);
        }
    }

    ml_set_interrupts_enabled(oldlevel);
}
__private_extern__
kern_return_t chudxnu_cpu_timer_callback_enter(chudxnu_cpu_timer_callback_func_t func, uint32_t time, uint32_t units)
{
    int cpu;
    boolean_t oldlevel;

    oldlevel = ml_set_interrupts_enabled(FALSE);
    cpu = cpu_number();

    timer_call_cancel(&(cpu_timer_call[cpu])); // cancel any existing callback for this cpu

    cpu_timer_callback_fn[cpu] = func;

    clock_interval_to_deadline(time, units, &(t_deadline[cpu]));
    timer_call_setup(&(cpu_timer_call[cpu]), chudxnu_private_cpu_timer_callback, NULL);
    timer_call_enter(&(cpu_timer_call[cpu]), t_deadline[cpu]);

    ml_set_interrupts_enabled(oldlevel);
    return KERN_SUCCESS;
}
void IOPlatformExpert::sleepKernel(void)
{
#if 0
  long cnt;
  boolean_t intState;
  
  intState = ml_set_interrupts_enabled(false);
  
  for (cnt = 0; cnt < 10000; cnt++) {
    IODelay(1000);
  }
  
  ml_set_interrupts_enabled(intState);
#else
//  PE_initialize_console(0, kPEDisableScreen);
  
  IOCPUSleepKernel();
  
//  PE_initialize_console(0, kPEEnableScreen);
#endif
}
static void
chudxnu_private_cpu_timer_callback(
	timer_call_param_t param0,
	timer_call_param_t param1)
{
#pragma unused (param0)
#pragma unused (param1)
	chudcpu_data_t			*chud_proc_info;
	boolean_t			oldlevel;
	x86_thread_state_t 		state;
	mach_msg_type_number_t		count;
	chudxnu_cpu_timer_callback_func_t fn;

	oldlevel = ml_set_interrupts_enabled(FALSE);
	chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);

	count = x86_THREAD_STATE_COUNT;
	if (chudxnu_thread_get_state(current_thread(),
				     x86_THREAD_STATE,
				     (thread_state_t)&state,
				     &count,
				     FALSE) == KERN_SUCCESS) {
			fn = chud_proc_info->cpu_timer_callback_fn;
       		if (fn) {
			KERNEL_DEBUG_CONSTANT(
				MACHDBG_CODE(DBG_MACH_CHUD,
					CHUD_TIMER_CALLBACK) | DBG_FUNC_NONE,
				(uint32_t)fn, 0,0,0,0);
				//state.eip, state.cs, 0, 0);
       			(fn)(
				x86_THREAD_STATE,
				(thread_state_t)&state,
				count);
       		} 
	} 
	
	ml_set_interrupts_enabled(oldlevel);
}