Example #1
0
File: debug.c Project: UIKit0/xnu
static spl_t
panic_prologue(const char *str)
{
	spl_t	s;

	if (kdebug_enable) {
		ml_set_interrupts_enabled(TRUE);
		kdbg_dump_trace_to_file("/var/tmp/panic.trace");
	}

	s = splhigh();
	disable_preemption();

#if	defined(__i386__) || defined(__x86_64__)
	/* Attempt to display the unparsed panic string */
	const char *tstr = str;

	kprintf("Panic initiated, string: ");
	while (tstr && *tstr)
		kprintf("%c", *tstr++);
	kprintf("\n");
#endif

	panic_safe();

#ifndef __arm__ 	/* xxx show all panic output for now. */
	if( logPanicDataToScreen )
#endif		
		disable_debug_output = FALSE;
	debug_mode = TRUE;

restart:
	PANIC_LOCK();

	if (panicstr) {
		if (cpu_number() != paniccpu) {
			PANIC_UNLOCK();
			/*
			 * Wait until message has been printed to identify correct
			 * cpu that made the first panic.
			 */
			while (panicwait)
				continue;
			goto restart;
	    } else {
			nestedpanic +=1;
			PANIC_UNLOCK();
			Debugger("double panic");
			printf("double panic:  We are hanging here...\n");
			panic_stop();
			/* NOTREACHED */
		}
	}
	panicstr = str;
	paniccpu = cpu_number();
	panicwait = 1;

	PANIC_UNLOCK();
	return(s);
}
Example #2
0
/*
 * Flush pte on all active processors.
 */
void
smp_tlb_flush_pte(vaddr_t va, struct pmap * pm)
{
	sparc64_cpuset_t cpuset;
	struct cpu_info *ci;
	int ctx;
	bool kpm = (pm == pmap_kernel());
	/* Flush our own TLB */
	ctx = pm->pm_ctx[cpu_number()];
	KASSERT(ctx >= 0);
	if (kpm || ctx > 0)
		sp_tlb_flush_pte(va, ctx);

	CPUSET_ASSIGN(cpuset, cpus_active);
	CPUSET_DEL(cpuset, cpu_number());
	if (CPUSET_EMPTY(cpuset))
		return;

	/* Flush others */
	for (ci = cpus; ci != NULL; ci = ci->ci_next) {
		if (CPUSET_HAS(cpuset, ci->ci_index)) {
			CPUSET_DEL(cpuset, ci->ci_index);
			ctx = pm->pm_ctx[ci->ci_index];
			KASSERT(ctx >= 0);
			if (!kpm && ctx == 0)
				continue;
			sparc64_send_ipi(ci->ci_cpuid, smp_tlb_flush_pte_func, va, ctx);
		}
	}
}
Example #3
0
/*
 *	Determine whether the lock in question is owned
 *	by the current thread.
 */
void
usld_lock_held(
	usimple_lock_t	l)
{
	char		*caller = "usimple_lock_held";

	if (!usld_lock_common_checks(l, caller))
		return;

	if (!(l->debug.state & USLOCK_TAKEN))
		panic("%s:  lock 0x%x hasn't been taken",
		      caller, (integer_t) l);
	if (l->debug.lock_thread != (void *) current_thread())
		panic("%s:  lock 0x%x is owned by thread 0x%x", caller,
		      (integer_t) l, (integer_t) l->debug.lock_thread);

#if	MACH_RT
	/*
	 *	The usimple_lock is active, so preemption
	 *	is disabled and the current cpu should
	 *	match the one recorded at lock acquisition time.
	 */
	if (l->debug.lock_cpu != cpu_number())
		panic("%s:  current cpu 0x%x isn't acquiring cpu 0x%x",
		      caller, cpu_number(), (integer_t) l->debug.lock_cpu);
#endif	/* MACH_RT */
}
Example #4
0
File: pmCPU.c Project: CptFrazz/xnu
/*
 * Called when the CPU is idle.  It calls into the power management kext
 * to determine the best way to idle the CPU.
 */
void
machine_idle(void)
{
    cpu_data_t		*my_cpu		= current_cpu_datap();

    if (my_cpu == NULL)
	goto out;

    my_cpu->lcpu.state = LCPU_IDLE;
    DBGLOG(cpu_handle, cpu_number(), MP_IDLE);
    MARK_CPU_IDLE(cpu_number());

    if (pmInitDone) {
	/*
	 * Handle case where ml_set_maxbusdelay() or ml_set_maxintdelay()
	 * were called prior to the CPU PM kext being registered.  We do
	 * this here since we know at this point the values will be first
	 * used since idle is where the decisions using these values is made.
	 */
	if (earlyMaxBusDelay != DELAY_UNSET)
	    ml_set_maxbusdelay((uint32_t)(earlyMaxBusDelay & 0xFFFFFFFF));

	if (earlyMaxIntDelay != DELAY_UNSET)
	    ml_set_maxintdelay(earlyMaxIntDelay);
    }

    if (pmInitDone
	&& pmDispatch != NULL
	&& pmDispatch->MachineIdle != NULL)
	(*pmDispatch->MachineIdle)(0x7FFFFFFFFFFFFFFFULL);
    else {
	/*
	 * If no power management, re-enable interrupts and halt.
	 * This will keep the CPU from spinning through the scheduler
	 * and will allow at least some minimal power savings (but it
	 * cause problems in some MP configurations w.r.t. the APIC
	 * stopping during a GV3 transition).
	 */
	pal_hlt();

	/* Once woken, re-disable interrupts. */
	pal_cli();
    }

    /*
     * Mark the CPU as running again.
     */
    MARK_CPU_ACTIVE(cpu_number());
    DBGLOG(cpu_handle, cpu_number(), MP_UNIDLE);
    my_cpu->lcpu.state = LCPU_RUN;

    /*
     * Re-enable interrupts.
     */
  out:
    pal_sti();
}
Example #5
0
/*
 * This is called once by every CPU on a wake from sleep/hibernate
 * and is meant to re-apply a microcode update that got lost
 * by sleeping.
 */
void
ucode_update_wake()
{
	if (global_update) {
		kprintf("ucode: Re-applying update after wake (CPU #%d)\n", cpu_number());
		update_microcode();
#ifdef DEBUG
	} else {
		kprintf("ucode: No update to apply (CPU #%d)\n", cpu_number());
#endif
	}
}
Example #6
0
void
cpu_startup_common(void)
{
	vaddr_t minaddr, maxaddr;
	char pbuf[9];	/* "99999 MB" */

	pmap_tlb_info_evcnt_attach(&pmap_tlb0_info);

#ifdef MULTIPROCESSOR
	kcpuset_create(&cpus_halted, true);
		KASSERT(cpus_halted != NULL);
	kcpuset_create(&cpus_hatched, true);
		KASSERT(cpus_hatched != NULL);
	kcpuset_create(&cpus_paused, true);
		KASSERT(cpus_paused != NULL);
	kcpuset_create(&cpus_resumed, true);
		KASSERT(cpus_resumed != NULL);
	kcpuset_create(&cpus_running, true);
		KASSERT(cpus_running != NULL);
	kcpuset_set(cpus_hatched, cpu_number());
	kcpuset_set(cpus_running, cpu_number());
#endif

	cpu_hwrena_setup();

	/*
	 * Good {morning,afternoon,evening,night}.
	 */
	printf("%s%s", copyright, version);
	printf("%s\n", cpu_getmodel());
	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
	printf("total memory = %s\n", pbuf);

	minaddr = 0;
	/*
	 * Allocate a submap for physio.
	 */
	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				    VM_PHYS_SIZE, 0, FALSE, NULL);

	/*
	 * (No need to allocate an mbuf cluster submap.  Mbuf clusters
	 * are allocated via the pool allocator, and we use KSEG/XKPHYS to
	 * map those pages.)
	 */

	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
	printf("avail memory = %s\n", pbuf);

#if defined(__mips_n32)
	module_machine = "mips-n32";
#endif
}
Example #7
0
__dead void
error_fatal(struct trapframe *frame)
{
	if (frame->tf_vector == 0)
		printf("\nCPU %d Reset Exception\n", cpu_number());
	else
		printf("\nCPU %d Error Exception\n", cpu_number());

#ifdef DDB
	regdump((struct trapframe*)frame);
#endif
	panic("unrecoverable exception %d", frame->tf_vector);
}
Example #8
0
static void get_cpu_tick_diff_callback(void *data)
{
    tsc_t *values = (tsc_t *) data;
    int cpu = cpu_number();
    values[cpu].raw = rdtsc();
    values[cpu].corrected = mach_absolute_time();
}
Example #9
0
void
smp_rendezvous_cpus(unsigned long map,
	void (* action_func)(void *),
	void *arg)
{
	unsigned int cpumask = 1 << cpu_number();

	if (ncpus == 1) {
		if (action_func != NULL)
			action_func(arg);
		return;
	}

	/* obtain rendezvous lock */
        mtx_enter(&smp_ipi_mtx);

	/* set static function pointers */
	smp_rv_map = map;
	smp_rv_action_func = action_func;
	smp_rv_func_arg = arg;
	smp_rv_waiters[0] = 0;
	smp_rv_waiters[1] = 0;

	/* signal other processors, which will enter the IPI with interrupts off */
	mips64_multicast_ipi(map & ~cpumask, MIPS64_IPI_RENDEZVOUS);

	/* Check if the current CPU is in the map */
	if (map & cpumask)
		smp_rendezvous_action();

	while (smp_rv_waiters[1] != smp_rv_map)
		;
	/* release lock */
	mtx_leave(&smp_ipi_mtx);
}
Example #10
0
void
mips64_ipi_nop(void)
{
#ifdef DEBUG
	printf("mips64_ipi_nop on cpu%d\n", cpu_number());
#endif
}
Example #11
0
void
panic_double_fault64(x86_saved_state_t *sp)
{
	(void)OSCompareAndSwap((UInt32) -1, (UInt32) cpu_number(), (volatile UInt32 *)&panic_double_fault_cpu);
	panic_64(sp, PANIC_DOUBLE_FAULT, "Double fault", FALSE);

}
Example #12
0
/*
 * Broadcast an IPI to all but ourselves.
 */
void
sparc64_broadcast_ipi(ipifunc_t func, uint64_t arg1, uint64_t arg2)
{

	sparc64_multicast_ipi(CPUSET_EXCEPT(cpus_active, cpu_number()), func,
		arg1, arg2);
}
Example #13
0
/**
 * Wrapper between the native darwin per-cpu callback and PFNRTWORKER
 * for the RTMpOnAll API.
 *
 * @param   pvArg   Pointer to the RTMPARGS package.
 */
static void rtmpOnAllDarwinWrapper(void *pvArg)
{
    PRTMPARGS pArgs = (PRTMPARGS)pvArg;
    IPRT_DARWIN_SAVE_EFL_AC();
    pArgs->pfnWorker(cpu_number(), pArgs->pvUser1, pArgs->pvUser2);
    IPRT_DARWIN_RESTORE_EFL_AC();
}
Example #14
0
int
__mp_lock_held(struct __mp_lock *mpl)
{
	struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];

	return (cpu->mplc_ticket == mpl->mpl_ticket && cpu->mplc_depth > 0);
}
Example #15
0
/*
 *	Debug checks on a usimple_lock just after
 *	successfully attempting to acquire it.
 *
 *	Preemption has been disabled by the
 *	lock acquisition attempt, so it's safe
 *	to use cpu_number.
 */
void
usld_lock_try_post(
	usimple_lock_t	l,
	pc_t		pc)
{
	register int	mycpu;
	char	caller[] = "successful usimple_lock_try";

	if (!usld_lock_common_checks(l, caller))
		return;

	if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED))
		panic("%s:  lock 0x%x became uninitialized",
		      caller, (integer_t) l);
	if ((l->debug.state & USLOCK_TAKEN))
		panic("%s:  lock 0x%x became TAKEN by someone else",
		      caller, (integer_t) l);

	mycpu = cpu_number();
	l->debug.lock_thread = (void *) current_thread();
	l->debug.state |= USLOCK_TAKEN;
	l->debug.lock_pc = pc;
	l->debug.lock_cpu = mycpu;

	usl_trace(l, mycpu, pc, caller);
}
Example #16
0
/* ARGSUSED */
void
usld_lock_pre(
	usimple_lock_t	l,
	pc_t		pc)
{
	char	caller[] = "usimple_lock";


	if (!usld_lock_common_checks(l, caller))
		return;

/*
 *	Note that we have a weird case where we are getting a lock when we are]
 *	in the process of putting the system to sleep. We are running with no
 *	current threads, therefore we can't tell if we are trying to retake a lock
 *	we have or someone on the other processor has it.  Therefore we just
 *	ignore this test if the locking thread is 0.
 */

	if ((l->debug.state & USLOCK_TAKEN) && l->debug.lock_thread &&
	    l->debug.lock_thread == (void *) current_thread()) {
		printf("%s:  lock %p already locked (at %p) by",
		      caller, l, l->debug.lock_pc);
		printf(" current thread %p (new attempt at pc %p)\n",
		       l->debug.lock_thread, pc);
		panic("%s", caller);
	}
	mp_disable_preemption();
	usl_trace(l, cpu_number(), pc, caller);
	mp_enable_preemption();
}
Example #17
0
void
cpu_hatch(void)
{
	char *v = (char*)CPUINFO_VA;
	int i;

	for (i = 0; i < 4*PAGE_SIZE; i += sizeof(long))
		flush(v + i);

	cpu_pmap_init(curcpu());
	CPUSET_ADD(cpus_active, cpu_number());
	cpu_reset_fpustate();
	curlwp = curcpu()->ci_data.cpu_idlelwp;
	membar_Sync();

	/* wait for the boot CPU to flip the switch */
	while (sync_tick == 0) {
		/* we do nothing here */
	}
	settick(0);
	if (curcpu()->ci_system_clockrate[0] != 0) {
		setstick(0);
		stickintr_establish(PIL_CLOCK, stickintr);
	} else {
		tickintr_establish(PIL_CLOCK, tickintr);
	}
	spl0();
}
Example #18
0
File: cpu.c Project: Bitesher/xnu
kern_return_t
cpu_start(
	int cpu)
{
	kern_return_t		ret;

	if (cpu == cpu_number()) {
		cpu_machine_init();
		return KERN_SUCCESS;
	}

	/*
	 * Try to bring the CPU back online without a reset.
	 * If the fast restart doesn't succeed, fall back to
	 * the slow way.
	 */
	ret = intel_startCPU_fast(cpu);
	if (ret != KERN_SUCCESS) {
		/*
		 * Should call out through PE.
		 * But take the shortcut here.
		 */
		ret = intel_startCPU(cpu);
	}

	if (ret != KERN_SUCCESS)
		kprintf("cpu: cpu_start(%d) returning failure!\n", cpu);

	return(ret);
}
Example #19
0
/*
 * As long as db_cpu is not -1 or cpu_number(), we know that debugger
 * is active on another cpu.
 */
void
lock_db(void)
{
	int	my_cpu = cpu_number();

	for (;;) {
#if	CONSOLE_ON_MASTER
	    if (my_cpu == master_cpu) {
		db_console();
	    }
#endif /* CONSOLE_ON_MASTER */
	    if (db_cpu != -1 && db_cpu != my_cpu)
		continue;

#if	CONSOLE_ON_MASTER
	    if (my_cpu == master_cpu) {
		if (!simple_lock_try(&db_lock))
		    continue;
	    }
	    else {
		simple_lock(&db_lock);
	    }
#else /* CONSOLE_ON_MASTER */
	    simple_lock(&db_lock);
#endif /* CONSOLE_ON_MASTER */
	    if (db_cpu == -1 || db_cpu == my_cpu)
		break;
	    simple_unlock(&db_lock);
	}
}
Example #20
0
void
db_output_prompt(void)
{
	db_printf("db%s", (db_default_act) ? "t": "");
	db_printf("{%d}", cpu_number());
	db_printf("> ");
}
Example #21
0
void
db_cpuinfo_cmd(db_expr_t addr, int have_addr, db_expr_t count, char *modif)
{
	int i;

	for (i = 0; i < MAXCPUS; i++) {
		if (cpu_info[i] != NULL) {
			db_printf("%c%4d: ", (i == cpu_number()) ? '*' : ' ',
			    CPU_INFO_UNIT(cpu_info[i]));
			switch(cpu_info[i]->ci_ddb_paused) {
			case CI_DDB_RUNNING:
				db_printf("running\n");
				break;
			case CI_DDB_SHOULDSTOP:
				db_printf("stopping\n");
				break;
			case CI_DDB_STOPPED:
				db_printf("stopped\n");
				break;
			case CI_DDB_ENTERDDB:
				db_printf("entering ddb\n");
				break;
			case CI_DDB_INDDB:
				db_printf("ddb\n");
				break;
			default:
				db_printf("? (%d)\n",
				    cpu_info[i]->ci_ddb_paused);
				break;
			}
		}
	}
}
Example #22
0
File: cpu.c Project: argp/xnu
kern_return_t
cpu_start(int cpu)
{
	kprintf("cpu_start() cpu: %d\n", cpu);
	if (cpu == cpu_number()) {
		cpu_machine_init();
		return KERN_SUCCESS;
	} else {
#if     __ARM_SMP__
		cpu_data_t	*cpu_data_ptr;
		thread_t	first_thread;

		cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr;
		cpu_data_ptr->cpu_reset_handler = (vm_offset_t) start_cpu_paddr;

		cpu_data_ptr->cpu_pmap_cpu_data.cpu_user_pmap = NULL;

		if (cpu_data_ptr->cpu_processor->next_thread != THREAD_NULL)
			first_thread = cpu_data_ptr->cpu_processor->next_thread;
		else
			first_thread = cpu_data_ptr->cpu_processor->idle_thread;
		cpu_data_ptr->cpu_active_thread = first_thread;
		first_thread->machine.CpuDatap = cpu_data_ptr;

		flush_dcache((vm_offset_t)&CpuDataEntries[cpu], sizeof(cpu_data_entry_t), FALSE);
		flush_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
		(void) PE_cpu_start(cpu_data_ptr->cpu_id, (vm_offset_t)NULL, (vm_offset_t)NULL);
		return KERN_SUCCESS;
#else
		return KERN_FAILURE;
#endif
	}
}
Example #23
0
/**
 * Wrapper between the native darwin per-cpu callback and PFNRTWORKER
 * for the RTMpOnOthers API.
 *
 * @param   pvArg   Pointer to the RTMPARGS package.
 */
static void rtmpOnOthersDarwinWrapper(void *pvArg)
{
    PRTMPARGS pArgs = (PRTMPARGS)pvArg;
    RTCPUID idCpu = cpu_number();
    if (pArgs->idCpu != idCpu)
        pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
}
Example #24
0
/*
 * Stray interrupt handler.  Clear it if possible.
 * If not, and if we get 10 interrupts in 10 seconds, panic.
 * XXXSMP: We are holding the kernel lock at entry & exit.
 */
void
strayintr(struct clockframe *fp)
{
	static int straytime, nstray;
	char bits[64];
	int timesince;

#if defined(MULTIPROCESSOR)
	/*
	 * XXX
	 *
	 * Don't whine about zs interrupts on MP.  We sometimes get
	 * stray interrupts when polled kernel output on cpu>0 eats
	 * the interrupt and cpu0 sees it.
	 */
#define ZS_INTR_IPL	12
	if (fp->ipl == ZS_INTR_IPL)
		return;
#endif

	snprintb(bits, sizeof(bits), PSR_BITS, fp->psr);
	printf("stray interrupt cpu%d ipl 0x%x pc=0x%x npc=0x%x psr=%s\n",
	    cpu_number(), fp->ipl, fp->pc, fp->npc, bits);

	timesince = time_uptime - straytime;
	if (timesince <= 10) {
		if (++nstray > 10)
			panic("crazy interrupts");
	} else {
		straytime = time_uptime;
		nstray = 1;
	}
}
Example #25
0
/*
 * Leave debugger.
 */
void
db_leave()
{
	int	mycpu = cpu_number();

	/*
	 * If continuing, give up debugger
	 */
	if (db_run_mode == STEP_CONTINUE)
	    db_cpu = -1;

	/*
	 * If I am a slave, drop my slave count.
	 */
	if (db_slave[mycpu])
	    db_slave[mycpu]--;
	if (db_enter_debug)
	    db_printf("db_leave: cpu %d[%d], db_cpu %d, run_mode %d\n",
		      mycpu, db_slave[mycpu], db_cpu, db_run_mode);
	/*
	 * Unlock debugger.
	 */
	unlock_db();

	/*
	 * Drop recursive entry count.
	 */
	db_active[mycpu]--;
}
Example #26
0
/*
 *	Acquire a usimple_lock.
 *
 *	MACH_RT:  Returns with preemption disabled.  Note
 *	that the hw_lock routines are responsible for
 *	maintaining preemption state.
 */
void
usimple_lock(
	usimple_lock_t	l)
{
	int i;
	unsigned int	timeouttb;				/* Used to convert time to timebase ticks */
	pc_t		pc;
#if	ETAP_LOCK_TRACE
	etap_time_t	start_wait_time;
	int		no_miss_info = 0;
#endif	/* ETAP_LOCK_TRACE */
#if	USLOCK_DEBUG
	int		count = 0;
#endif 	/* USLOCK_DEBUG */

	OBTAIN_PC(pc, l);
	USLDBG(usld_lock_pre(l, pc));
#if	ETAP_LOCK_TRACE
	ETAP_TIME_CLEAR(start_wait_time);
#endif	/* ETAP_LOCK_TRACE */

	while (!hw_lock_try(&l->interlock)) {
		ETAPCALL(if (no_miss_info++ == 0)
			start_wait_time = etap_simplelock_miss(l));
		while (hw_lock_held(&l->interlock)) {
			/*
			 *	Spin watching the lock value in cache,
			 *	without consuming external bus cycles.
			 *	On most SMP architectures, the atomic
			 *	instruction(s) used by hw_lock_try
			 *	cost much, much more than an ordinary
			 *	memory read.
			 */
#if	USLOCK_DEBUG
			if (count++ > max_lock_loops
#if	MACH_KDB && NCPUS > 1
			    && l != &kdb_lock
#endif	/* MACH_KDB && NCPUS > 1 */
			    ) {
				if (l == &printf_lock) {
					return;
				}
				mp_disable_preemption();
#if MACH_KDB
				db_printf("cpu %d looping on simple_lock(%x)"
					  "(=%x)",
					  cpu_number(), l,
					  *hw_lock_addr(l->interlock));
				db_printf(" called by %x\n", pc);
#endif
				Debugger("simple lock deadlock detection");
				count = 0;
				mp_enable_preemption();
			}
#endif 	/* USLOCK_DEBUG */
		}
	}
	ETAPCALL(etap_simplelock_hold(l, pc, start_wait_time));
	USLDBG(usld_lock_post(l, pc));
}
Example #27
0
/*
 *	Debug checks on a usimple_lock just before
 *	releasing it.  Note that the caller has not
 *	yet released the hardware lock.
 *
 *	Preemption is still disabled, so there's
 *	no problem using cpu_number.
 */
void
usld_unlock(
	usimple_lock_t	l,
	pc_t		pc)
{
	register int	mycpu;
	char	caller[] = "usimple_unlock";


	if (!usld_lock_common_checks(l, caller))
		return;

	mycpu = cpu_number();

	if (!(l->debug.state & USLOCK_TAKEN))
		panic("%s:  lock 0x%x hasn't been taken",
		      caller, (integer_t) l);
	if (l->debug.lock_thread != (void *) current_thread())
		panic("%s:  unlocking lock 0x%x, owned by thread %p",
		      caller, (integer_t) l, l->debug.lock_thread);
	if (l->debug.lock_cpu != mycpu) {
		printf("%s:  unlocking lock 0x%x on cpu 0x%x",
		       caller, (integer_t) l, mycpu);
		printf(" (acquired on cpu 0x%x)\n", l->debug.lock_cpu);
		panic("%s", caller);
	}
	usl_trace(l, mycpu, pc, caller);

	l->debug.unlock_thread = l->debug.lock_thread;
	l->debug.lock_thread = INVALID_PC;
	l->debug.state &= ~USLOCK_TAKEN;
	l->debug.unlock_pc = pc;
	l->debug.unlock_cpu = mycpu;
}
Example #28
0
void
db_show_all_slocks(void)
{
	unsigned int	i, index;
	int		mycpu = cpu_number();
	usimple_lock_t	l;

	if (uslock_stack_enabled == FALSE)
		return;

#if	0
	if (!mach_slocks_init)
		iprintf("WARNING: simple locks stack may not be accurate\n");
#endif
	assert(uslock_stack_index[mycpu] >= 0);
	assert(uslock_stack_index[mycpu] <= USLOCK_STACK_DEPTH);
	index = uslock_stack_index[mycpu];
	for (i = 0; i < index; ++i) {
		l = uslock_stack[mycpu][i];
		iprintf("%d: ", i);
		db_printsym((vm_offset_t)l, DB_STGY_ANY);
		printf(" <ETAP type 0x%x> ", l->debug.etap_type);
		if (l->debug.lock_pc != INVALID_PC) {
			printf(" locked by ");
			db_printsym((int)l->debug.lock_pc, DB_STGY_PROC);
		}
		printf("\n");
	}
}
Example #29
0
int perfmon_disable(thread_t thread)
{
	struct savearea *sv = thread->machine.pcb;
	int curPMC;
  
	if(!(thread->machine.specFlags & perfMonitor)) {
		return KERN_NO_ACCESS; /* not enabled */
	} else {
		simple_lock(&hw_perfmon_lock);
		hw_perfmon_thread_count--;
		simple_unlock(&hw_perfmon_lock);
		perfmon_release_facility(kernel_task); /* will release if hw_perfmon_thread_count is 0 */
	}
  
	thread->machine.specFlags &= ~perfMonitor; /* disable perf monitor facility for this thread */
	if(thread==current_thread()) {
		PerProcTable[cpu_number()].ppe_vaddr->spcFlags &= ~perfMonitor; /* update per_proc */
	}
	sv->save_mmcr0 = 0;
	sv->save_mmcr1 = 0;
	sv->save_mmcr2 = 0;
  
	for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
		sv->save_pmc[curPMC] = 0;
		thread->machine.pmcovfl[curPMC] = 0;
		thread->machine.perfmonFlags = 0;
	}
  
#ifdef HWPERFMON_DEBUG
	kprintf("perfmon_disable - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
#endif  

	return KERN_SUCCESS;
}
Example #30
0
__private_extern__
kern_return_t chudxnu_cpusig_send(int otherCPU, uint32_t request)
{
    int thisCPU;
    kern_return_t retval = KERN_FAILURE;
    int retries = 0;
    boolean_t oldlevel;
    uint32_t temp[2];

    oldlevel = ml_set_interrupts_enabled(FALSE);
    thisCPU = cpu_number();

    if(thisCPU!=otherCPU) {
        temp[0] = 0xFFFFFFFF;		/* set sync flag */
        temp[1] = request;			/* set request */
        __asm__ volatile("eieio");	/* force order */
        __asm__ volatile("sync");	/* force to memory */

        do {
            retval=cpu_signal(otherCPU, SIGPcpureq, CPRQchud, (uint32_t)&temp);
        } while(retval!=KERN_SUCCESS && (retries++)<16);

        if(retries>=16) {
            retval = KERN_FAILURE;
        } else {
            retval = hw_cpu_sync(temp, LockTimeOut); /* wait for the other processor */
            if(!retval) {
                retval = KERN_FAILURE;
            } else {
                retval = KERN_SUCCESS;
            }
        }
    } else {