/* * Routine: cpu_start * Function: */ kern_return_t cpu_start( int cpu) { struct per_proc_info *proc_info; kern_return_t ret; mapping_t *mp; proc_info = PerProcTable[cpu].ppe_vaddr; if (cpu == cpu_number()) { PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone)); ml_init_interrupt(); proc_info->cpu_flags |= BootDone|SignalReady; return KERN_SUCCESS; } else { proc_info->cpu_flags &= BootDone; proc_info->interrupts_enabled = 0; proc_info->pending_ast = AST_NONE; proc_info->istackptr = proc_info->intstack_top_ss; proc_info->rtcPop = EndOfAllTime; proc_info->FPU_owner = NULL; proc_info->VMX_owner = NULL; proc_info->pms.pmsStamp = 0; /* Dummy transition time */ proc_info->pms.pmsPop = EndOfAllTime; /* Set the pop way into the future */ proc_info->pms.pmsState = pmsParked; /* Park the stepper */ proc_info->pms.pmsCSetCmd = pmsCInit; /* Set dummy initial hardware state */ mp = (mapping_t *)(&proc_info->ppUMWmp); mp->mpFlags = 0x01000000 | mpLinkage | mpPerm | 1; mp->mpSpace = invalSpace; if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) { simple_lock(&rht_lock); while (rht_state & RHT_BUSY) { rht_state |= RHT_WAIT; thread_sleep_usimple_lock((event_t)&rht_state, &rht_lock, THREAD_UNINT); } rht_state |= RHT_BUSY; simple_unlock(&rht_lock); ml_phys_write((vm_offset_t)&ResetHandler + 0, RESET_HANDLER_START); ml_phys_write((vm_offset_t)&ResetHandler + 4, (vm_offset_t)_start_cpu); ml_phys_write((vm_offset_t)&ResetHandler + 8, (vm_offset_t)&PerProcTable[cpu]); } /* * Note: we pass the current time to the other processor here. He will load it * as early as possible so that there is a chance that it is close to accurate. * After the machine is up a while, we will officially resync the clocks so * that all processors are the same. This is just to get close. */ ml_get_timebase((unsigned long long *)&proc_info->ruptStamp); __asm__ volatile("sync"); /* Commit to storage */ __asm__ volatile("isync"); /* Wait a second */ ret = PE_cpu_start(proc_info->cpu_id, proc_info->start_paddr, (vm_offset_t)proc_info); if (ret != KERN_SUCCESS) { if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) { simple_lock(&rht_lock); if (rht_state & RHT_WAIT) thread_wakeup(&rht_state); rht_state &= ~(RHT_BUSY|RHT_WAIT); simple_unlock(&rht_lock); }; } else { simple_lock(&SignalReadyLock); if (!((*(volatile short *)&proc_info->cpu_flags) & SignalReady)) { (void)hw_atomic_or(&proc_info->ppXFlags, SignalReadyWait); thread_sleep_simple_lock((event_t)&proc_info->cpu_flags, &SignalReadyLock, THREAD_UNINT); } simple_unlock(&SignalReadyLock); } return(ret); } }
kern_return_t cpu_start(int cpu) { kprintf("cpu_start() cpu: %d\n", cpu); if (cpu == cpu_number()) { cpu_machine_init(); return KERN_SUCCESS; } else { #if __ARM_SMP__ cpu_data_t *cpu_data_ptr; thread_t first_thread; cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr; cpu_data_ptr->cpu_reset_handler = (vm_offset_t) start_cpu_paddr; cpu_data_ptr->cpu_pmap_cpu_data.cpu_user_pmap = NULL; if (cpu_data_ptr->cpu_processor->next_thread != THREAD_NULL) first_thread = cpu_data_ptr->cpu_processor->next_thread; else first_thread = cpu_data_ptr->cpu_processor->idle_thread; cpu_data_ptr->cpu_active_thread = first_thread; first_thread->machine.CpuDatap = cpu_data_ptr; flush_dcache((vm_offset_t)&CpuDataEntries[cpu], sizeof(cpu_data_entry_t), FALSE); flush_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE); (void) PE_cpu_start(cpu_data_ptr->cpu_id, (vm_offset_t)NULL, (vm_offset_t)NULL); return KERN_SUCCESS; #else return KERN_FAILURE; #endif } }