Exemplo n.º 1
0
/*
 *	Routine:	cpu_machine_init
 *	Function:
 */
void
cpu_machine_init(
	void)
{
	struct per_proc_info			*proc_info;
	volatile struct per_proc_info	*mproc_info;


	proc_info = getPerProc();
	mproc_info = PerProcTable[master_cpu].ppe_vaddr;

	if (proc_info != mproc_info) {
		simple_lock(&rht_lock);
		if (rht_state & RHT_WAIT)
			thread_wakeup(&rht_state);
		rht_state &= ~(RHT_BUSY|RHT_WAIT);
		simple_unlock(&rht_lock);
	}

	PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));

	if (proc_info->hibernate) {
		uint32_t	tbu, tbl;

		do {
			tbu = mftbu();
			tbl = mftb();
		} while (mftbu() != tbu);

	    proc_info->hibernate = 0;
	    hibernate_machine_init();

		// hibernate_machine_init() could take minutes and we don't want timeouts
		// to fire as soon as scheduling starts. Reset timebase so it appears
		// no time has elapsed, as it would for regular sleep.
		mttb(0);
		mttbu(tbu);
		mttb(tbl);
	}

	if (proc_info != mproc_info) {
	while (!((mproc_info->cpu_flags) & SignalReady)) 
			continue;
		cpu_sync_timebase();
	}

	ml_init_interrupt();
	if (proc_info != mproc_info)
		simple_lock(&SignalReadyLock);
	proc_info->cpu_flags |= BootDone|SignalReady;
	if (proc_info != mproc_info) {
		if (proc_info->ppXFlags & SignalReadyWait) {
			(void)hw_atomic_and(&proc_info->ppXFlags, ~SignalReadyWait);
			thread_wakeup(&proc_info->cpu_flags);
		}
		simple_unlock(&SignalReadyLock);
		pmsPark();						/* Timers should be cool now, park the power management stepper */
	}
}
Exemplo n.º 2
0
Arquivo: clock.c Projeto: Prajna/xnu
/*
 * clock_get_calendar_nanotime_nowait
 *
 * Description:	Non-blocking version of clock_get_calendar_nanotime()
 *
 * Notes:	This function operates by separately tracking calendar time
 *		updates using a two element structure to copy the calendar
 *		state, which may be asynchronously modified.  It utilizes
 *		barrier instructions in the tracking process and in the local
 *		stable snapshot process in order to ensure that a consistent
 *		snapshot is used to perform the calculation.
 */
void
clock_get_calendar_nanotime_nowait(
	clock_sec_t			*secs,
	clock_nsec_t		*nanosecs)
{
	int i = 0;
	uint64_t		now;
	struct unlocked_clock_calend stable;

	for (;;) {
		stable = flipflop[i];		/* take snapshot */

		/*
		 * Use a barrier instructions to ensure atomicity.  We AND
		 * off the "in progress" bit to get the current generation
		 * count.
		 */
		(void)hw_atomic_and(&stable.gen, ~(uint32_t)1);

		/*
		 * If an update _is_ in progress, the generation count will be
		 * off by one, if it _was_ in progress, it will be off by two,
		 * and if we caught it at a good time, it will be equal (and
		 * our snapshot is threfore stable).
		 */
		if (flipflop[i].gen == stable.gen)
			break;

		/* Switch to the oher element of the flipflop, and try again. */
		i ^= 1;
	}

	now = mach_absolute_time();

	if (stable.calend.adjdelta < 0) {
		uint32_t	t32;

		if (now > stable.calend.adjstart) {
			t32 = (uint32_t)(now - stable.calend.adjstart);

			if (t32 > stable.calend.adjoffset)
				now -= stable.calend.adjoffset;
			else
				now = stable.calend.adjstart;
		}
	}

	now += stable.calend.offset;

	absolutetime_to_microtime(now, secs, nanosecs);
	*nanosecs *= NSEC_PER_USEC;

	*secs += (clock_sec_t)stable.calend.epoch;
}
Exemplo n.º 3
0
/*
 * Routine:	lck_attr_setdebug
 */
void
lck_attr_cleardebug(
	lck_attr_t	*attr)
{
	(void)hw_atomic_and(&attr->lck_attr_val, ~LCK_ATTR_DEBUG);
}