Пример #1
0
/*
 *	Routine:	cpu_init
 *	Function:
 */
void
cpu_init(
	void)
{
	struct per_proc_info *proc_info;

	proc_info = getPerProc();

	/*
	 * Restore the TBR.
	 */
	if (proc_info->save_tbu != 0 || proc_info->save_tbl != 0) {
		mttb(0);
		mttbu(proc_info->save_tbu);
		mttb(proc_info->save_tbl);
	}

	proc_info->rtcPop = EndOfAllTime;			/* forget any existing decrementer setting */
	etimer_resync_deadlines();				/* Now that the time base is sort of correct, request the next timer pop */

	proc_info->cpu_type = CPU_TYPE_POWERPC;
	proc_info->cpu_subtype = (cpu_subtype_t)proc_info->pf.rptdProc;
	proc_info->cpu_threadtype = CPU_THREADTYPE_NONE;
	proc_info->running = TRUE;

}
Пример #2
0
/*
 * 	Event timer interrupt.
 *
 * XXX a drawback of this implementation is that events serviced earlier must not set deadlines
 *     that occur before the entire chain completes.
 *
 * XXX a better implementation would use a set of generic callouts and iterate over them
 */
void
etimer_intr(
__unused int inuser,
__unused uint64_t iaddr)
{
	uint64_t		abstime;
	rtclock_timer_t		*mytimer;
	struct per_proc_info	*pp;

	pp = getPerProc();

	mytimer = &pp->rtclock_timer;				/* Point to the event timer */

	abstime = mach_absolute_time();				/* Get the time now */

	/* is it time for power management state change? */	
	if (pp->pms.pmsPop <= abstime) {
	        KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 3) | DBG_FUNC_START, 0, 0, 0, 0, 0);
		pmsStep(1);					/* Yes, advance step */
	        KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 3) | DBG_FUNC_END, 0, 0, 0, 0, 0);

		abstime = mach_absolute_time();			/* Get the time again since we ran a bit */
	}

	/* has a pending clock timer expired? */
	if (mytimer->deadline <= abstime) {			/* Have we expired the deadline? */
		mytimer->has_expired = TRUE;			/* Remember that we popped */
		mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime);
		mytimer->has_expired = FALSE;
	}

	/* schedule our next deadline */
	pp->rtcPop = EndOfAllTime;				/* any real deadline will be earlier */
	etimer_resync_deadlines();
}
Пример #3
0
/*
 *	Routine:	cpu_machine_init
 *	Function:
 */
void
cpu_machine_init(
	void)
{
	struct per_proc_info			*proc_info;
	volatile struct per_proc_info	*mproc_info;


	proc_info = getPerProc();
	mproc_info = PerProcTable[master_cpu].ppe_vaddr;

	if (proc_info != mproc_info) {
		simple_lock(&rht_lock);
		if (rht_state & RHT_WAIT)
			thread_wakeup(&rht_state);
		rht_state &= ~(RHT_BUSY|RHT_WAIT);
		simple_unlock(&rht_lock);
	}

	PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));

	if (proc_info->hibernate) {
		uint32_t	tbu, tbl;

		do {
			tbu = mftbu();
			tbl = mftb();
		} while (mftbu() != tbu);

	    proc_info->hibernate = 0;
	    hibernate_machine_init();

		// hibernate_machine_init() could take minutes and we don't want timeouts
		// to fire as soon as scheduling starts. Reset timebase so it appears
		// no time has elapsed, as it would for regular sleep.
		mttb(0);
		mttbu(tbu);
		mttb(tbl);
	}

	if (proc_info != mproc_info) {
	while (!((mproc_info->cpu_flags) & SignalReady)) 
			continue;
		cpu_sync_timebase();
	}

	ml_init_interrupt();
	if (proc_info != mproc_info)
		simple_lock(&SignalReadyLock);
	proc_info->cpu_flags |= BootDone|SignalReady;
	if (proc_info != mproc_info) {
		if (proc_info->ppXFlags & SignalReadyWait) {
			(void)hw_atomic_and(&proc_info->ppXFlags, ~SignalReadyWait);
			thread_wakeup(&proc_info->cpu_flags);
		}
		simple_unlock(&SignalReadyLock);
		pmsPark();						/* Timers should be cool now, park the power management stepper */
	}
}
Пример #4
0
void
hibernate_vm_unlock(void)
{
    if (getPerProc()->hibernate)
    {
        lck_mtx_unlock(&vm_page_queue_free_lock);
        vm_page_unlock_queues();
    }
}
Пример #5
0
void
hibernate_vm_lock(void)
{
    if (getPerProc()->hibernate)
    {
        vm_page_lock_queues();
        mutex_lock(&vm_page_queue_free_lock);
    }
}
Пример #6
0
void
timer_queue_cancel(
	queue_t			queue,
	uint64_t		deadline,
	uint64_t		new_deadline)
{
	if (queue == &getPerProc()->rtclock_timer.queue) {
		if (deadline < new_deadline)
			etimer_set_deadline(new_deadline);
	}
}
Пример #7
0
/*
 * Set the clock deadline.
 */
void etimer_set_deadline(uint64_t deadline)
{
	rtclock_timer_t		*mytimer;
	spl_t			s;
	struct per_proc_info	*pp;

	s = splclock();					/* no interruptions */
	pp = getPerProc();

	mytimer = &pp->rtclock_timer;			/* Point to the timer itself */
	mytimer->deadline = deadline;			/* Set the new expiration time */

	etimer_resync_deadlines();

	splx(s);
}
Пример #8
0
void ml_ppc_sleep(void)
{
    struct per_proc_info *proc_info;
    boolean_t dohalt;

    proc_info = getPerProc();
    if (!proc_info->hibernate)
    {
	ml_ppc_do_sleep();
	return;
    }

    {
        uint64_t start, end, nsec;

	HIBLOG("mapping_hibernate_flush start\n");
	clock_get_uptime(&start);

	mapping_hibernate_flush();

	clock_get_uptime(&end);
	absolutetime_to_nanoseconds(end - start, &nsec);
	HIBLOG("mapping_hibernate_flush time: %qd ms\n", nsec / 1000000ULL);
    }

    dohalt = hibernate_write_image();

    if (dohalt)
    {
	// off
	HIBLOG("power off\n");
	if (PE_halt_restart) 
	    (*PE_halt_restart)(kPEHaltCPU);
    }
    else
    {
	// sleep
	HIBLOG("sleep\n");

	// should we come back via regular wake, set the state in memory.
	PerProcTable[0].ppe_vaddr->hibernate = 0;

	PE_cpu_machine_quiesce(proc_info->cpu_id);
	return;
    }
}
Пример #9
0
queue_t
timer_queue_assign(
	uint64_t		deadline)
{
	struct per_proc_info	*pp = getPerProc();
	rtclock_timer_t			*timer;

	if (pp->running) {
		timer = &pp->rtclock_timer;

		if (deadline < timer->deadline)
			etimer_set_deadline(deadline);
	}
	else
		timer = &PerProcTable[master_cpu].ppe_vaddr->rtclock_timer;

	return (&timer->queue);
}
Пример #10
0
/*
 * Re-evaluate the outstanding deadlines and select the most proximate.
 *
 * Should be called at splclock.
 */
void
etimer_resync_deadlines(void)
{
	uint64_t		deadline;
	rtclock_timer_t		*mytimer;
	spl_t			s = splclock();		/* No interruptions please */
	struct per_proc_info	*pp;

	pp = getPerProc();

	deadline = ~0ULL;

	/* if we have a clock timer set sooner, pop on that */
	mytimer = &pp->rtclock_timer;			/* Point to the timer itself */
	if (!mytimer->has_expired && mytimer->deadline > 0)
		deadline = mytimer->deadline;

	/* if we have a power management event coming up, how about that? */
	if (pp->pms.pmsPop > 0 && pp->pms.pmsPop < deadline)
		deadline = pp->pms.pmsPop;
	

	if (deadline > 0 && deadline <= pp->rtcPop) {
		int     decr;
		uint64_t now;

		now = mach_absolute_time();
		decr = setPop(deadline);

		if (deadline < now)
		        pp->rtcPop = now + decr;
		else
		        pp->rtcPop = deadline;

		KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1) | DBG_FUNC_NONE, decr, 2, 0, 0, 0);
	}
	splx(s);
}
Пример #11
0
/*
 *	Routine:	cpu_sleep
 *	Function:
 */
void
cpu_sleep(
	void)
{
	struct per_proc_info	*proc_info;
	unsigned int			i;
	unsigned int			wait_ncpus_sleep, ncpus_sleep;
	facility_context		*fowner;

	proc_info = getPerProc();

	proc_info->running = FALSE;

	fowner = proc_info->FPU_owner;					/* Cache this */
	if(fowner) /* If anyone owns FPU, save it */
		fpu_save(fowner);
	proc_info->FPU_owner = NULL;						/* Set no fpu owner now */

	fowner = proc_info->VMX_owner;					/* Cache this */
	if(fowner) vec_save(fowner);					/* If anyone owns vectors, save it */
	proc_info->VMX_owner = NULL;						/* Set no vector owner now */

	if (proc_info->cpu_number == master_cpu)  {
		proc_info->cpu_flags &= BootDone;
		proc_info->interrupts_enabled = 0;
		proc_info->pending_ast = AST_NONE;

		if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
			ml_phys_write((vm_offset_t)&ResetHandler + 0,
					  RESET_HANDLER_START);
			ml_phys_write((vm_offset_t)&ResetHandler + 4,
					  (vm_offset_t)_start_cpu);
			ml_phys_write((vm_offset_t)&ResetHandler + 8,
					  (vm_offset_t)&PerProcTable[master_cpu]);

			__asm__ volatile("sync");
			__asm__ volatile("isync");
		}
Пример #12
0
static int
perfmon_enable(thread_t thread)
{
	struct savearea *sv = thread->machine.pcb;
	kern_return_t retval = KERN_SUCCESS;
	int curPMC;
  
	if(thread->machine.specFlags & perfMonitor) {
		return KERN_SUCCESS; /* already enabled */
	} else if(perfmon_acquire_facility(kernel_task)!=KERN_SUCCESS) {
		return KERN_RESOURCE_SHORTAGE; /* facility is in use */
	} else { /* kernel_task owns the faciltity and this thread has not yet been counted */
		simple_lock(&hw_perfmon_lock);
		hw_perfmon_thread_count++;
		simple_unlock(&hw_perfmon_lock);
	}

	sv->save_mmcr1 = 0;
	sv->save_mmcr2 = 0;
	
	switch(PerProcTable[0].ppe_vaddr->cpu_subtype) {
		case CPU_SUBTYPE_POWERPC_750:
		case CPU_SUBTYPE_POWERPC_7400:
		case CPU_SUBTYPE_POWERPC_7450:
			{
				ppc32_mmcr0_reg_t mmcr0_reg;
		
				mmcr0_reg.value = 0;
				mmcr0_reg.field.disable_counters_always = TRUE;
				mmcr0_reg.field.disable_counters_supervisor = TRUE; /* no choice */
				sv->save_mmcr0 = mmcr0_reg.value;
			}
			break;
		case CPU_SUBTYPE_POWERPC_970:
			{
				ppc64_mmcr0_reg_t mmcr0_reg;
		
				mmcr0_reg.value = 0;
				mmcr0_reg.field.disable_counters_always = TRUE;
				mmcr0_reg.field.disable_counters_supervisor = TRUE; /* no choice */
				sv->save_mmcr0 = mmcr0_reg.value;
			}
			break;
		default:
			retval = KERN_FAILURE;
			break;
	}
  
	if(retval==KERN_SUCCESS) {
		for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
			sv->save_pmc[curPMC] = 0;
			thread->machine.pmcovfl[curPMC] = 0;
		}
		thread->machine.perfmonFlags = 0;
		thread->machine.specFlags |= perfMonitor; /* enable perf monitor facility for this thread */
		if(thread==current_thread()) {
			getPerProc()->spcFlags |= perfMonitor; /* update per_proc */
		}
	}

#ifdef HWPERFMON_DEBUG  
	kprintf("perfmon_enable - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
#endif  

	return retval;
}
Пример #13
0
struct savearea * interrupt(
        int type,
        struct savearea *ssp,
	unsigned int dsisr,
	unsigned int dar)
{
	int	current_cpu;
	struct per_proc_info	*proc_info;
	uint64_t		now;
	thread_t		thread;

	disable_preemption();

	perfCallback fn = perfIntHook;
	if(fn) {							/* Is there a hook? */
		if(fn(type, ssp, dsisr, dar) == KERN_SUCCESS) return ssp;	/* If it succeeds, we are done... */
	}
	
#if CONFIG_DTRACE
	if(tempDTraceIntHook) {							/* Is there a hook? */
		if(tempDTraceIntHook(type, ssp, dsisr, dar) == KERN_SUCCESS) return ssp;	/* If it succeeds, we are done... */
	}
#endif

#if 0
	{
		extern void fctx_text(void);
		fctx_test();
	}
#endif


	current_cpu = cpu_number();
	proc_info = getPerProc();

	switch (type) {

		case T_DECREMENTER:
			KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) | DBG_FUNC_NONE,
				  isync_mfdec(), (unsigned int)ssp->save_srr0, 0, 0, 0);
	
			now = mach_absolute_time();				/* Find out what time it is */
			
			if(now >= proc_info->pms.pmsPop) {		/* Is it time for power management state change? */
				pmsStep(1);							/* Yes, advance step */
				now = mach_absolute_time();			/* Get the time again since we ran a bit */
			}

			thread = current_thread();					/* Find ourselves */
			if(thread->machine.qactTimer != 0) {	/* Is the timer set? */
				if (thread->machine.qactTimer <= now) {	/* It is set, has it popped? */
					thread->machine.qactTimer = 0;		/* Clear single shot timer */
					if((unsigned int)thread->machine.vmmControl & 0xFFFFFFFE) {	/* Are there any virtual machines? */
						vmm_timer_pop(thread);			/* Yes, check out them out... */
					}
				}
			}

			etimer_intr(USER_MODE(ssp->save_srr1), ssp->save_srr0);	/* Handle event timer */
			break;
	
		case T_INTERRUPT:
			/* Call the platform interrupt routine */
			counter(c_incoming_interrupts++);
	
			KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START,
			   current_cpu, (unsigned int)ssp->save_srr0, 0, 0, 0);
	
#if CONFIG_DTRACE && (DEVELOPMENT || DEBUG )
			DTRACE_INT5(interrupt__start, void *, proc_info->interrupt_nub, int, proc_info->interrupt_source, 
						void *, proc_info->interrupt_target, IOInterruptHandler, proc_info->interrupt_handler,
						void *, proc_info->interrupt_refCon);
#endif

			proc_info->interrupt_handler(
				proc_info->interrupt_target, 
				proc_info->interrupt_refCon,
				proc_info->interrupt_nub, 
				proc_info->interrupt_source);
	
#if CONFIG_DTRACE && (DEVELOPMENT || DEBUG )
			DTRACE_INT5(interrupt__complete, void *, proc_info->interrupt_nub, int, proc_info->interrupt_source, 
						void *, proc_info->interrupt_target, IOInterruptHandler, proc_info->interrupt_handler,
						void *, proc_info->interrupt_refCon);
#endif

			KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END,
			   0, 0, 0, 0, 0);
	
			break;
	
		case T_SIGP:
			/* Did the other processor signal us? */ 
			cpu_signal_handler();
			break;
	
		case T_SHUTDOWN:
			cpu_doshutdown();
			panic("returning from cpu_doshutdown()\n");
			break;
	
				
		default:
			if (!Call_Debugger(type, ssp))
				unresolved_kernel_trap(type, ssp, dsisr, dar, NULL);
			break;
	}

	enable_preemption();
	return ssp;
}
Пример #14
0
vm_offset_t dtrace_get_cpu_int_stack_top(void)
{
	return getPerProc()->intstack_top_ss;
}