/* * Routine: cpu_start * Function: */ kern_return_t cpu_start( int cpu) { struct per_proc_info *proc_info; kern_return_t ret; mapping_t *mp; proc_info = PerProcTable[cpu].ppe_vaddr; if (cpu == cpu_number()) { PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone)); ml_init_interrupt(); proc_info->cpu_flags |= BootDone|SignalReady; return KERN_SUCCESS; } else { proc_info->cpu_flags &= BootDone; proc_info->interrupts_enabled = 0; proc_info->pending_ast = AST_NONE; proc_info->istackptr = proc_info->intstack_top_ss; proc_info->rtcPop = EndOfAllTime; proc_info->FPU_owner = NULL; proc_info->VMX_owner = NULL; proc_info->pms.pmsStamp = 0; /* Dummy transition time */ proc_info->pms.pmsPop = EndOfAllTime; /* Set the pop way into the future */ proc_info->pms.pmsState = pmsParked; /* Park the stepper */ proc_info->pms.pmsCSetCmd = pmsCInit; /* Set dummy initial hardware state */ mp = (mapping_t *)(&proc_info->ppUMWmp); mp->mpFlags = 0x01000000 | mpLinkage | mpPerm | 1; mp->mpSpace = invalSpace; if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) { simple_lock(&rht_lock); while (rht_state & RHT_BUSY) { rht_state |= RHT_WAIT; thread_sleep_usimple_lock((event_t)&rht_state, &rht_lock, THREAD_UNINT); } rht_state |= RHT_BUSY; simple_unlock(&rht_lock); ml_phys_write((vm_offset_t)&ResetHandler + 0, RESET_HANDLER_START); ml_phys_write((vm_offset_t)&ResetHandler + 4, (vm_offset_t)_start_cpu); ml_phys_write((vm_offset_t)&ResetHandler + 8, (vm_offset_t)&PerProcTable[cpu]); } /* * Note: we pass the current time to the other processor here. He will load it * as early as possible so that there is a chance that it is close to accurate. * After the machine is up a while, we will officially resync the clocks so * that all processors are the same. This is just to get close. */ ml_get_timebase((unsigned long long *)&proc_info->ruptStamp); __asm__ volatile("sync"); /* Commit to storage */ __asm__ volatile("isync"); /* Wait a second */ ret = PE_cpu_start(proc_info->cpu_id, proc_info->start_paddr, (vm_offset_t)proc_info); if (ret != KERN_SUCCESS) { if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) { simple_lock(&rht_lock); if (rht_state & RHT_WAIT) thread_wakeup(&rht_state); rht_state &= ~(RHT_BUSY|RHT_WAIT); simple_unlock(&rht_lock); }; } else { simple_lock(&SignalReadyLock); if (!((*(volatile short *)&proc_info->cpu_flags) & SignalReady)) { (void)hw_atomic_or(&proc_info->ppXFlags, SignalReadyWait); thread_sleep_simple_lock((event_t)&proc_info->cpu_flags, &SignalReadyLock, THREAD_UNINT); } simple_unlock(&SignalReadyLock); } return(ret); } }
static void clock_track_calend_nowait(void) { int i; for (i = 0; i < 2; i++) { struct clock_calend tmp = clock_calend; /* * Set the low bit if the generation count; since we use a * barrier instruction to do this, we are guaranteed that this * will flag an update in progress to an async caller trying * to examine the contents. */ (void)hw_atomic_or(&flipflop[i].gen, 1); flipflop[i].calend = tmp; /* * Increment the generation count to clear the low bit to * signal completion. If a caller compares the generation * count after taking a copy while in progress, the count * will be off by two. */ (void)hw_atomic_add(&flipflop[i].gen, 1); } }
/* * Routine: lck_attr_rw_shared_priority */ void lck_attr_rw_shared_priority( lck_attr_t *attr) { (void)hw_atomic_or(&attr->lck_attr_val, LCK_ATTR_RW_SHARED_PRIORITY); }
/* * Routine: lck_attr_setdebug */ void lck_attr_setdebug( lck_attr_t *attr) { (void)hw_atomic_or(&attr->lck_attr_val, LCK_ATTR_DEBUG); }
void lck_grp_attr_setstat( lck_grp_attr_t *attr) { (void)hw_atomic_or(&attr->grp_attr_val, LCK_GRP_ATTR_STAT); }