示例#1
1
/*
 *	Routine:	cpu_start
 *	Function:
 */
kern_return_t
cpu_start(
	int cpu)
{
	struct per_proc_info	*proc_info;
	kern_return_t			ret;
	mapping_t				*mp;

	proc_info = PerProcTable[cpu].ppe_vaddr;

	if (cpu == cpu_number()) {
 	  PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
	  ml_init_interrupt();
	  proc_info->cpu_flags |= BootDone|SignalReady;

	  return KERN_SUCCESS;
	} else {
		proc_info->cpu_flags &= BootDone;
		proc_info->interrupts_enabled = 0;
		proc_info->pending_ast = AST_NONE;
		proc_info->istackptr = proc_info->intstack_top_ss;
		proc_info->rtcPop = EndOfAllTime;
		proc_info->FPU_owner = NULL;
		proc_info->VMX_owner = NULL;
		proc_info->pms.pmsStamp = 0;									/* Dummy transition time */
		proc_info->pms.pmsPop = EndOfAllTime;							/* Set the pop way into the future */
		proc_info->pms.pmsState = pmsParked;							/* Park the stepper */
		proc_info->pms.pmsCSetCmd = pmsCInit;							/* Set dummy initial hardware state */
		mp = (mapping_t *)(&proc_info->ppUMWmp);
		mp->mpFlags = 0x01000000 | mpLinkage | mpPerm | 1;
		mp->mpSpace = invalSpace;

		if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {

			simple_lock(&rht_lock);
			while (rht_state & RHT_BUSY) {
				rht_state |= RHT_WAIT;
				thread_sleep_usimple_lock((event_t)&rht_state,
						    &rht_lock, THREAD_UNINT);
			}
			rht_state |= RHT_BUSY;
			simple_unlock(&rht_lock);

			ml_phys_write((vm_offset_t)&ResetHandler + 0,
					  RESET_HANDLER_START);
			ml_phys_write((vm_offset_t)&ResetHandler + 4,
					  (vm_offset_t)_start_cpu);
			ml_phys_write((vm_offset_t)&ResetHandler + 8,
					  (vm_offset_t)&PerProcTable[cpu]);
		}
/*
 *		Note: we pass the current time to the other processor here. He will load it
 *		as early as possible so that there is a chance that it is close to accurate.
 *		After the machine is up a while, we will officially resync the clocks so
 *		that all processors are the same.  This is just to get close.
 */

		ml_get_timebase((unsigned long long *)&proc_info->ruptStamp);
		
		__asm__ volatile("sync");				/* Commit to storage */
		__asm__ volatile("isync");				/* Wait a second */
		ret = PE_cpu_start(proc_info->cpu_id,
						   proc_info->start_paddr, (vm_offset_t)proc_info);

		if (ret != KERN_SUCCESS) {
			if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
				simple_lock(&rht_lock);
				if (rht_state & RHT_WAIT)
					thread_wakeup(&rht_state);
				rht_state &= ~(RHT_BUSY|RHT_WAIT);
				simple_unlock(&rht_lock);
			};
		} else {
			simple_lock(&SignalReadyLock);
			if (!((*(volatile short *)&proc_info->cpu_flags) & SignalReady)) {
				(void)hw_atomic_or(&proc_info->ppXFlags, SignalReadyWait);
				thread_sleep_simple_lock((event_t)&proc_info->cpu_flags,
				                          &SignalReadyLock, THREAD_UNINT);
			}
			simple_unlock(&SignalReadyLock);

		}
		return(ret);
	}
}
示例#2
0
文件: himem.c 项目: rohsaini/mkunity
vm_offset_t
himem_convert(
	vm_offset_t	phys_addr,
	vm_size_t	length,
	int		io_op,
	hil_t		*hil)
{
	hil_t		h;
	spl_t		ipl;
	vm_offset_t	offset = phys_addr & (I386_PGBYTES - 1);

	assert (offset + length <= I386_PGBYTES);

	ipl = splhi();
	simple_lock(&hil_lock);
	while (!(h = hil_head)) { 
		printf("WARNING: out of HIMEM pages\n");
		thread_sleep_simple_lock((event_t)&hil_head,
					simple_lock_addr(hil_lock), FALSE);
		simple_lock (&hil_lock);
	}
	hil_head = hil_head->next;
	simple_unlock(&hil_lock);
	splx(ipl);
	
	h->high_addr = phys_addr;

	if (io_op == D_WRITE) {
	  bcopy((char *)phystokv(phys_addr), (char *)phystokv(h->low_page + offset),
		length);
	  h->length = 0;
	} else {
	  h->length = length;
	}
	h->offset = offset;

	assert(!*hil || (*hil)->high_addr);

	h->next = *hil;
	*hil = h;
	return(h->low_page + offset);
}
示例#3
0
文件: lock.c 项目: rohsaini/mkunity
void
lock_write(
	register lock_t	* l)
{
        register int	   i;
	start_data_node_t  entry     = {0};
	boolean_t          lock_miss = FALSE;
	unsigned short	   dynamic   = 0;
	unsigned short     trace     = 0;
	etap_time_t	   total_time;
	etap_time_t	   stop_wait_time;
	pc_t		   pc;
#if	MACH_LDEBUG
	int		   decrementer;
#endif	/* MACH_LDEBUG */


	ETAP_STAMP(lock_event_table(l), trace, dynamic);
	ETAP_CREATE_ENTRY(entry, trace);
	MON_ASSIGN_PC(entry->start_pc, pc, trace);

	simple_lock(&l->interlock);

	/*
         *  Link the new start_list entry
         */
	ETAP_LINK_ENTRY(l, entry, trace);

#if	MACH_LDEBUG
	decrementer = DECREMENTER_TIMEOUT;
#endif	/* MACH_LDEBUG */

	/*
	 *	Try to acquire the want_write bit.
	 */
	while (l->want_write) {
		if (!lock_miss) {
			ETAP_CONTENTION_TIMESTAMP(entry, trace);
			lock_miss = TRUE;
		}

		i = lock_wait_time[l->can_sleep ? 1 : 0];
		if (i != 0) {
			simple_unlock(&l->interlock);
#if	MACH_LDEBUG
			if (!--decrementer)
				Debugger("timeout - want_write");
#endif	/* MACH_LDEBUG */
			while (--i != 0 && l->want_write)
				continue;
			simple_lock(&l->interlock);
		}

		if (l->can_sleep && l->want_write) {
			l->waiting = TRUE;
			ETAP_SET_REASON(current_thread(),
					BLOCKED_ON_COMPLEX_LOCK);
			thread_sleep_simple_lock((event_t) l,
					simple_lock_addr(l->interlock), FALSE);
			simple_lock(&l->interlock);
		}
	}
	l->want_write = TRUE;

	/* Wait for readers (and upgrades) to finish */

#if	MACH_LDEBUG
	decrementer = DECREMENTER_TIMEOUT;
#endif	/* MACH_LDEBUG */
	while ((l->read_count != 0) || l->want_upgrade) {
		if (!lock_miss) {
			ETAP_CONTENTION_TIMESTAMP(entry,trace);
			lock_miss = TRUE;
		}

		i = lock_wait_time[l->can_sleep ? 1 : 0];
		if (i != 0) {
			simple_unlock(&l->interlock);
#if	MACH_LDEBUG
			if (!--decrementer)
				Debugger("timeout - wait for readers");
#endif	/* MACH_LDEBUG */
			while (--i != 0 && (l->read_count != 0 ||
					    l->want_upgrade))
				continue;
			simple_lock(&l->interlock);
		}

		if (l->can_sleep && (l->read_count != 0 || l->want_upgrade)) {
			l->waiting = TRUE;
                        ETAP_SET_REASON(current_thread(),
                                        BLOCKED_ON_COMPLEX_LOCK);
			thread_sleep_simple_lock((event_t) l,
				simple_lock_addr(l->interlock), FALSE);
			simple_lock(&l->interlock);
		}
	}

	/*
	 *  do not collect wait data if either the lock
	 *  was free or no wait traces are enabled.
	 */

	if (lock_miss && ETAP_CONTENTION_ENABLED(trace)) {
		ETAP_TIMESTAMP(stop_wait_time);
		ETAP_TOTAL_TIME(total_time,
				stop_wait_time,
				entry->start_wait_time);
		CUM_WAIT_ACCUMULATE(l->cbuff_write, total_time, dynamic, trace);
		MON_DATA_COLLECT(l,
				 entry,
				 total_time,
				 WRITE_LOCK,
				 MON_CONTENTION,
				 trace);
	}

	simple_unlock(&l->interlock);

	/*
	 *  Set start hold time if some type of hold tracing is enabled.
	 *
	 *  Note: if the stop_wait_time was already stamped, use
	 *      it as the start_hold_time instead of doing an
	 *      expensive bus access.
	 *
	 */

	if (lock_miss && ETAP_CONTENTION_ENABLED(trace))
		ETAP_COPY_START_HOLD_TIME(entry, stop_wait_time, trace);
	else
		ETAP_DURATION_TIMESTAMP(entry, trace);

}
示例#4
0
文件: lock.c 项目: rohsaini/mkunity
boolean_t
lock_read_to_write(
	register lock_t	* l)
{
	register int	    i;
	boolean_t	    do_wakeup = FALSE;
	start_data_node_t   entry     = {0};
	boolean_t           lock_miss = FALSE;
	unsigned short      dynamic   = 0;
	unsigned short      trace     = 0;
	etap_time_t	    total_time;
	etap_time_t	    stop_time;
	pc_t		    pc;
#if	MACH_LDEBUG
	int		   decrementer;
#endif	/* MACH_LDEBUG */


	ETAP_STAMP(lock_event_table(l), trace, dynamic);

	simple_lock(&l->interlock);

	l->read_count--;	

	/*
	 *  Since the read lock is lost whether the write lock
	 *  is acquired or not, read hold data is collected here.
	 *  This, of course, is assuming some type of hold
	 *  tracing is enabled.
	 *
	 *  Note: trace is set to zero if the entry does not exist.
	 */

	ETAP_FIND_ENTRY(l, entry, trace);

	if (ETAP_DURATION_ENABLED(trace)) {
		ETAP_TIMESTAMP(stop_time);
		ETAP_TOTAL_TIME(total_time, stop_time, entry->start_hold_time);
		CUM_HOLD_ACCUMULATE(l->cbuff_read, total_time, dynamic, trace);
		MON_ASSIGN_PC(entry->end_pc, pc, trace);
		MON_DATA_COLLECT(l,
				 entry,
				 total_time,
				 READ_LOCK,
				 MON_DURATION,
				 trace);
	}

	if (l->want_upgrade) {
		/*
		 *	Someone else has requested upgrade.
		 *	Since we've released a read lock, wake
		 *	him up.
		 */
		if (l->waiting && (l->read_count == 0)) {
			l->waiting = FALSE;
			do_wakeup = TRUE;
		}

		ETAP_UNLINK_ENTRY(l, entry);
		simple_unlock(&l->interlock);
		ETAP_DESTROY_ENTRY(entry);

		if (do_wakeup)
			thread_wakeup((event_t) l);
		return (TRUE);
	}

	l->want_upgrade = TRUE;

	MON_ASSIGN_PC(entry->start_pc, pc, trace);

#if	MACH_LDEBUG
	decrementer = DECREMENTER_TIMEOUT;
#endif	/* MACH_LDEBUG */
	while (l->read_count != 0) {
	        if (!lock_miss) {
			ETAP_CONTENTION_TIMESTAMP(entry, trace);
			lock_miss = TRUE;
		}

		i = lock_wait_time[l->can_sleep ? 1 : 0];

		if (i != 0) {
			simple_unlock(&l->interlock);
#if	MACH_LDEBUG
			if (!--decrementer)
				Debugger("timeout - read_count");
#endif	/* MACH_LDEBUG */
			while (--i != 0 && l->read_count != 0)
				continue;
			simple_lock(&l->interlock);
		}

		if (l->can_sleep && l->read_count != 0) {
			l->waiting = TRUE;
			thread_sleep_simple_lock((event_t) l,
					simple_lock_addr(l->interlock), FALSE);
			simple_lock(&l->interlock);
		}
	}

	/*
	 *  do not collect wait data if the lock was free
	 *  or if no wait traces are enabled.
	 */

	if (lock_miss && ETAP_CONTENTION_ENABLED(trace)) {
		ETAP_TIMESTAMP (stop_time);
		ETAP_TOTAL_TIME(total_time, stop_time, entry->start_wait_time);
		CUM_WAIT_ACCUMULATE(l->cbuff_write, total_time, dynamic, trace);
		MON_DATA_COLLECT(l,
				 entry,
				 total_time,
				 WRITE_LOCK,
				 MON_CONTENTION,
				 trace);
	}

	simple_unlock(&l->interlock);

	/*
	 *  Set start hold time if some type of hold tracing is enabled
	 *
	 *  Note: if the stop_time was already stamped, use
	 *        it as the new start_hold_time instead of doing
	 *        an expensive VME access.
	 *
	 */

	if (lock_miss && ETAP_CONTENTION_ENABLED(trace))
		ETAP_COPY_START_HOLD_TIME(entry, stop_time, trace);
	else
		ETAP_DURATION_TIMESTAMP(entry, trace);

	return (FALSE);
}