/* * Routine: cpu_start * Function: */ kern_return_t cpu_start( int cpu) { struct per_proc_info *proc_info; kern_return_t ret; mapping_t *mp; proc_info = PerProcTable[cpu].ppe_vaddr; if (cpu == cpu_number()) { PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone)); ml_init_interrupt(); proc_info->cpu_flags |= BootDone|SignalReady; return KERN_SUCCESS; } else { proc_info->cpu_flags &= BootDone; proc_info->interrupts_enabled = 0; proc_info->pending_ast = AST_NONE; proc_info->istackptr = proc_info->intstack_top_ss; proc_info->rtcPop = EndOfAllTime; proc_info->FPU_owner = NULL; proc_info->VMX_owner = NULL; proc_info->pms.pmsStamp = 0; /* Dummy transition time */ proc_info->pms.pmsPop = EndOfAllTime; /* Set the pop way into the future */ proc_info->pms.pmsState = pmsParked; /* Park the stepper */ proc_info->pms.pmsCSetCmd = pmsCInit; /* Set dummy initial hardware state */ mp = (mapping_t *)(&proc_info->ppUMWmp); mp->mpFlags = 0x01000000 | mpLinkage | mpPerm | 1; mp->mpSpace = invalSpace; if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) { simple_lock(&rht_lock); while (rht_state & RHT_BUSY) { rht_state |= RHT_WAIT; thread_sleep_usimple_lock((event_t)&rht_state, &rht_lock, THREAD_UNINT); } rht_state |= RHT_BUSY; simple_unlock(&rht_lock); ml_phys_write((vm_offset_t)&ResetHandler + 0, RESET_HANDLER_START); ml_phys_write((vm_offset_t)&ResetHandler + 4, (vm_offset_t)_start_cpu); ml_phys_write((vm_offset_t)&ResetHandler + 8, (vm_offset_t)&PerProcTable[cpu]); } /* * Note: we pass the current time to the other processor here. He will load it * as early as possible so that there is a chance that it is close to accurate. * After the machine is up a while, we will officially resync the clocks so * that all processors are the same. This is just to get close. */ ml_get_timebase((unsigned long long *)&proc_info->ruptStamp); __asm__ volatile("sync"); /* Commit to storage */ __asm__ volatile("isync"); /* Wait a second */ ret = PE_cpu_start(proc_info->cpu_id, proc_info->start_paddr, (vm_offset_t)proc_info); if (ret != KERN_SUCCESS) { if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) { simple_lock(&rht_lock); if (rht_state & RHT_WAIT) thread_wakeup(&rht_state); rht_state &= ~(RHT_BUSY|RHT_WAIT); simple_unlock(&rht_lock); }; } else { simple_lock(&SignalReadyLock); if (!((*(volatile short *)&proc_info->cpu_flags) & SignalReady)) { (void)hw_atomic_or(&proc_info->ppXFlags, SignalReadyWait); thread_sleep_simple_lock((event_t)&proc_info->cpu_flags, &SignalReadyLock, THREAD_UNINT); } simple_unlock(&SignalReadyLock); } return(ret); } }
static unsigned int NOINLINE hw_lock_lock_contended(hw_lock_t lock, uintptr_t data, uint64_t timeout, boolean_t do_panic) { uint64_t end = 0; uintptr_t holder = lock->lock_data; int i; if (timeout == 0) timeout = LOCK_PANIC_TIMEOUT; #if CONFIG_DTRACE uint64_t begin; boolean_t dtrace_enabled = lockstat_probemap[LS_LCK_SPIN_LOCK_SPIN] != 0; if (__improbable(dtrace_enabled)) begin = mach_absolute_time(); #endif for ( ; ; ) { for (i = 0; i < LOCK_SNOOP_SPINS; i++) { cpu_pause(); #if (!__ARM_ENABLE_WFE_) || (LOCK_PRETEST) holder = ordered_load_hw(lock); if (holder != 0) continue; #endif if (atomic_compare_exchange(&lock->lock_data, 0, data, memory_order_acquire_smp, TRUE)) { #if CONFIG_DTRACE if (__improbable(dtrace_enabled)) { uint64_t spintime = mach_absolute_time() - begin; if (spintime > dtrace_spin_threshold) LOCKSTAT_RECORD2(LS_LCK_SPIN_LOCK_SPIN, lock, spintime, dtrace_spin_threshold); } #endif return 1; } } if (end == 0) { end = ml_get_timebase() + timeout; } else if (ml_get_timebase() >= end) break; } if (do_panic) { // Capture the actual time spent blocked, which may be higher than the timeout // if a misbehaving interrupt stole this thread's CPU time. panic("Spinlock timeout after %llu ticks, %p = %lx", (ml_get_timebase() - end + timeout), lock, holder); } return 0; }
void ml_arm_sleep(void) { cpu_data_t *cpu_data_ptr = getCpuDatap(); if (cpu_data_ptr == &BootCpuData) { cpu_data_t *target_cdp; unsigned int cpu; for (cpu=0; cpu < MAX_CPUS; cpu++) { target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; if(target_cdp == (cpu_data_t *)NULL) break; if (target_cdp == cpu_data_ptr) continue; while (target_cdp->cpu_sleep_token != ARM_CPU_ON_SLEEP_PATH); } /* Now that the other cores have entered the sleep path, set * the abstime fixup we'll use when we resume.*/ rtclock_base_abstime = ml_get_timebase(); wake_abstime = rtclock_base_abstime; } else { platform_cache_disable(); CleanPoU_Dcache(); } cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH; #if __ARM_SMP__ && defined(ARMA7) cpu_data_ptr->cpu_CLWFlush_req = 0; cpu_data_ptr->cpu_CLWClean_req = 0; __builtin_arm_dmb(DMB_ISH); cpu_data_ptr->cpu_CLW_active = 0; #endif if (cpu_data_ptr == &BootCpuData) { platform_cache_disable(); platform_cache_shutdown(); bcopy((const void *)suspend_signature, (void *)(IOS_STATE), IOS_STATE_SIZE); } else CleanPoC_DcacheRegion((vm_offset_t) cpu_data_ptr, sizeof(cpu_data_t)); __builtin_arm_dsb(DSB_SY); while (TRUE) { #if __ARM_ENABLE_WFE_ __builtin_arm_wfe(); #endif } /* Spin */ }