void init_secondary(int cpu) { struct pcpu *pc; uint32_t loop_counter; #ifndef INTRNG int start = 0, end = 0; #endif uint32_t actlr_mask, actlr_set; pmap_set_tex(); cpuinfo_get_actlr_modifier(&actlr_mask, &actlr_set); reinit_mmu(pmap_kern_ttb, actlr_mask, actlr_set); cpu_setup(); /* Provide stack pointers for other processor modes. */ set_stackptrs(cpu); enable_interrupts(PSR_A); pc = &__pcpu[cpu]; /* * pcpu_init() updates queue, so it should not be executed in parallel * on several cores */ while(mp_naps < (cpu - 1)) ; pcpu_init(pc, cpu, sizeof(struct pcpu)); dpcpu_init(dpcpu[cpu - 1], cpu); #if __ARM_ARCH >= 6 && defined(DDB) dbg_monitor_init_secondary(); #endif /* Signal our startup to BSP */ atomic_add_rel_32(&mp_naps, 1); /* Spin until the BSP releases the APs */ while (!atomic_load_acq_int(&aps_ready)) { #if __ARM_ARCH >= 7 __asm __volatile("wfe"); #endif } /* Initialize curthread */ KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); pc->pc_curthread = pc->pc_idlethread; pc->pc_curpcb = pc->pc_idlethread->td_pcb; set_curthread(pc->pc_idlethread); #ifdef VFP vfp_init(); #endif /* Configure the interrupt controller */ intr_pic_init_secondary(); mtx_lock_spin(&ap_boot_mtx); atomic_add_rel_32(&smp_cpus, 1); if (smp_cpus == mp_ncpus) { /* enable IPI's, tlb shootdown, freezes etc */ atomic_store_rel_int(&smp_started, 1); } mtx_unlock_spin(&ap_boot_mtx); #ifndef INTRNG /* Enable ipi */ #ifdef IPI_IRQ_START start = IPI_IRQ_START; #ifdef IPI_IRQ_END end = IPI_IRQ_END; #else end = IPI_IRQ_START; #endif #endif for (int i = start; i <= end; i++) arm_unmask_irq(i); #endif /* INTRNG */ enable_interrupts(PSR_I); loop_counter = 0; while (smp_started == 0) { DELAY(100); loop_counter++; if (loop_counter == 1000) CTR0(KTR_SMP, "AP still wait for smp_started"); } /* Start per-CPU event timers. */ cpu_initclocks_ap(); CTR0(KTR_SMP, "go into scheduler"); /* Enter the scheduler */ sched_throw(NULL); panic("scheduler returned us to %s", __func__); /* NOTREACHED */ }
/* * AP CPU's call this to initialize themselves. */ void init_secondary(void) { vm_offset_t addr; u_int cpuid; int gsel_tss; /* bootAP is set in start_ap() to our ID. */ PCPU_SET(currentldt, _default_ldt); gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); #if 0 gdt[bootAP * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS; #endif PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */ PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL)); PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16); #if 0 PCPU_SET(tss_gdt, &gdt[bootAP * NGDT + GPROC0_SEL].sd); PCPU_SET(common_tssd, *PCPU_GET(tss_gdt)); #endif PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd); /* * Set to a known state: * Set by mpboot.s: CR0_PG, CR0_PE * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM */ /* * signal our startup to the BSP. */ mp_naps++; /* Spin until the BSP releases the AP's. */ while (!aps_ready) ia32_pause(); /* BSP may have changed PTD while we were waiting */ invltlb(); for (addr = 0; addr < NKPT * NBPDR - 1; addr += PAGE_SIZE) invlpg(addr); /* set up FPU state on the AP */ npxinit(); #if 0 /* set up SSE registers */ enable_sse(); #endif #if 0 && defined(PAE) /* Enable the PTE no-execute bit. */ if ((amd_feature & AMDID_NX) != 0) { uint64_t msr; msr = rdmsr(MSR_EFER) | EFER_NXE; wrmsr(MSR_EFER, msr); } #endif #if 0 /* A quick check from sanity claus */ if (PCPU_GET(apic_id) != lapic_id()) { printf("SMP: cpuid = %d\n", PCPU_GET(cpuid)); printf("SMP: actual apic_id = %d\n", lapic_id()); printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id)); panic("cpuid mismatch! boom!!"); } #endif /* Initialize curthread. */ KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); PCPU_SET(curthread, PCPU_GET(idlethread)); mtx_lock_spin(&ap_boot_mtx); #if 0 /* Init local apic for irq's */ lapic_setup(1); #endif smp_cpus++; cpuid = PCPU_GET(cpuid); CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", cpuid); printf("SMP: AP CPU #%d Launched!\n", cpuid); /* Determine if we are a logical CPU. */ if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0) CPU_SET(cpuid, &logical_cpus_mask); /* Determine if we are a hyperthread. */ if (hyperthreading_cpus > 1 && PCPU_GET(apic_id) % hyperthreading_cpus != 0) CPU_SET(cpuid, &hyperthreading_cpus_mask); #if 0 if (bootverbose) lapic_dump("AP"); #endif if (smp_cpus == mp_ncpus) { /* enable IPI's, tlb shootdown, freezes etc */ atomic_store_rel_int(&smp_started, 1); smp_active = 1; /* historic */ } mtx_unlock_spin(&ap_boot_mtx); /* wait until all the AP's are up */ while (smp_started == 0) ia32_pause(); PCPU_SET(curthread, PCPU_GET(idlethread)); /* Start per-CPU event timers. */ cpu_initclocks_ap(); /* enter the scheduler */ sched_throw(NULL); panic("scheduler returned us to %s", __func__); /* NOTREACHED */ }
void init_secondary(int cpu) { struct pcpu *pc; uint32_t loop_counter; int start = 0, end = 0; cpu_setup(NULL); setttb(pmap_pa); cpu_tlb_flushID(); pc = &__pcpu[cpu]; /* * pcpu_init() updates queue, so it should not be executed in parallel * on several cores */ while(mp_naps < (cpu - 1)) ; pcpu_init(pc, cpu, sizeof(struct pcpu)); dpcpu_init(dpcpu[cpu - 1], cpu); /* Provide stack pointers for other processor modes. */ set_stackptrs(cpu); /* Signal our startup to BSP */ atomic_add_rel_32(&mp_naps, 1); /* Spin until the BSP releases the APs */ while (!aps_ready) ; /* Initialize curthread */ KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); pc->pc_curthread = pc->pc_idlethread; pc->pc_curpcb = pc->pc_idlethread->td_pcb; set_curthread(pc->pc_idlethread); #ifdef VFP pc->pc_cpu = cpu; vfp_init(); #endif mtx_lock_spin(&ap_boot_mtx); atomic_add_rel_32(&smp_cpus, 1); if (smp_cpus == mp_ncpus) { /* enable IPI's, tlb shootdown, freezes etc */ atomic_store_rel_int(&smp_started, 1); } mtx_unlock_spin(&ap_boot_mtx); /* Enable ipi */ #ifdef IPI_IRQ_START start = IPI_IRQ_START; #ifdef IPI_IRQ_END end = IPI_IRQ_END; #else end = IPI_IRQ_START; #endif #endif for (int i = start; i <= end; i++) arm_unmask_irq(i); enable_interrupts(PSR_I); loop_counter = 0; while (smp_started == 0) { DELAY(100); loop_counter++; if (loop_counter == 1000) CTR0(KTR_SMP, "AP still wait for smp_started"); } /* Start per-CPU event timers. */ cpu_initclocks_ap(); CTR0(KTR_SMP, "go into scheduler"); platform_mp_init_secondary(); /* Enter the scheduler */ sched_throw(NULL); panic("scheduler returned us to %s", __func__); /* NOTREACHED */ }