/* * Further secondary CPU initialization. * * We are now running on our startup stack, with proper page tables. * There is nothing to do but display some details about the CPU and its CMMUs. */ void secondary_main() { struct cpu_info *ci = curcpu(); int s; cpu_configuration_print(0); ncpus++; sched_init_cpu(ci); nanouptime(&ci->ci_schedstate.spc_runtime); ci->ci_curproc = NULL; ci->ci_randseed = (arc4random() & 0x7fffffff) + 1; /* * Release cpu_hatch_mutex to let other secondary processors * have a chance to run. */ hatch_pending_count--; __cpu_simple_unlock(&cpu_hatch_mutex); /* wait for cpu_boot_secondary_processors() */ __cpu_simple_lock(&cpu_boot_mutex); __cpu_simple_unlock(&cpu_boot_mutex); spl0(); SCHED_LOCK(s); set_psr(get_psr() & ~PSR_IND); SET(ci->ci_flags, CIF_ALIVE); cpu_switchto(NULL, sched_chooseproc()); }
void cpu_boot_secondary_processors(void) { struct cpu_info *ci; u_long i; for (i = 0; i < MAXCPUS; i++) { ci = cpu_info[i]; if (ci == NULL) continue; if (ci->ci_idle_pcb == NULL) continue; if ((ci->ci_flags & CPUF_PRESENT) == 0) continue; if (ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY)) continue; sched_init_cpu(ci); ci->ci_randseed = (arc4random() & 0x7fffffff) + 1; cpu_boot_secondary(ci); } }
void cpu_attach(struct device *parent, struct device *self, void *aux) { struct cpu_softc *sc = (void *) self; struct cpu_attach_args *caa = aux; struct cpu_info *ci; #if defined(MULTIPROCESSOR) int cpunum = sc->sc_dev.dv_unit; vaddr_t kstack; struct pcb *pcb; #endif /* * If we're an Application Processor, allocate a cpu_info * structure, otherwise use the primary's. */ if (caa->cpu_role == CPU_ROLE_AP) { ci = malloc(sizeof(*ci), M_DEVBUF, M_WAITOK|M_ZERO); #if defined(MULTIPROCESSOR) if (cpu_info[cpunum] != NULL) panic("cpu at apic id %d already attached?", cpunum); cpu_info[cpunum] = ci; #endif #ifdef TRAPLOG ci->ci_tlog_base = malloc(sizeof(struct tlog), M_DEVBUF, M_WAITOK); #endif } else { ci = &cpu_info_primary; #if defined(MULTIPROCESSOR) if (caa->cpu_number != lapic_cpu_number()) { panic("%s: running cpu is at apic %d" " instead of at expected %d", sc->sc_dev.dv_xname, lapic_cpu_number(), caa->cpu_number); } #endif } ci->ci_self = ci; sc->sc_info = ci; ci->ci_dev = self; ci->ci_apicid = caa->cpu_number; #ifdef MULTIPROCESSOR ci->ci_cpuid = cpunum; #else ci->ci_cpuid = 0; /* False for APs, but they're not used anyway */ #endif ci->ci_func = caa->cpu_func; simple_lock_init(&ci->ci_slock); #if defined(MULTIPROCESSOR) /* * Allocate USPACE pages for the idle PCB and stack. * XXX should we just sleep here? */ kstack = (vaddr_t)km_alloc(USPACE, &kv_any, &kp_zero, &kd_nowait); if (kstack == 0) { if (caa->cpu_role != CPU_ROLE_AP) { panic("cpu_attach: unable to allocate idle stack for" " primary"); } printf("%s: unable to allocate idle stack\n", sc->sc_dev.dv_xname); return; } pcb = ci->ci_idle_pcb = (struct pcb *)kstack; pcb->pcb_kstack = kstack + USPACE - 16; pcb->pcb_rbp = pcb->pcb_rsp = kstack + USPACE - 16; pcb->pcb_pmap = pmap_kernel(); pcb->pcb_cr0 = rcr0(); pcb->pcb_cr3 = pcb->pcb_pmap->pm_pdirpa; #endif /* further PCB init done later. */ printf(": "); switch (caa->cpu_role) { case CPU_ROLE_SP: printf("(uniprocessor)\n"); ci->ci_flags |= CPUF_PRESENT | CPUF_SP | CPUF_PRIMARY; cpu_intr_init(ci); identifycpu(ci); cpu_init(ci); break; case CPU_ROLE_BP: printf("apid %d (boot processor)\n", caa->cpu_number); ci->ci_flags |= CPUF_PRESENT | CPUF_BSP | CPUF_PRIMARY; cpu_intr_init(ci); identifycpu(ci); cpu_init(ci); #if NLAPIC > 0 /* * Enable local apic */ lapic_enable(); lapic_calibrate_timer(ci); #endif #if NIOAPIC > 0 ioapic_bsp_id = caa->cpu_number; #endif break; case CPU_ROLE_AP: /* * report on an AP */ printf("apid %d (application processor)\n", caa->cpu_number); #if defined(MULTIPROCESSOR) cpu_intr_init(ci); gdt_alloc_cpu(ci); sched_init_cpu(ci); cpu_start_secondary(ci); ncpus++; if (ci->ci_flags & CPUF_PRESENT) { ci->ci_next = cpu_info_list->ci_next; cpu_info_list->ci_next = ci; } #else printf("%s: not started\n", sc->sc_dev.dv_xname); #endif break; default: panic("unknown processor type??"); } cpu_vm_init(ci); #if defined(MULTIPROCESSOR) if (mp_verbose) { printf("%s: kstack at 0x%lx for %d bytes\n", sc->sc_dev.dv_xname, kstack, USPACE); printf("%s: idle pcb at %p, idle sp at 0x%lx\n", sc->sc_dev.dv_xname, pcb, pcb->pcb_rsp); } #endif }
void cpu_attach(struct device *parent, struct device *self, void *aux) { struct cpu_info *ci = (struct cpu_info *)self; struct cpu_attach_args *caa = (struct cpu_attach_args *)aux; #ifdef MULTIPROCESSOR int cpunum = ci->ci_dev.dv_unit; vaddr_t kstack; struct pcb *pcb; #endif if (caa->cpu_role == CPU_ROLE_AP) { #ifdef MULTIPROCESSOR if (cpu_info[cpunum] != NULL) panic("cpu at apic id %d already attached?", cpunum); cpu_info[cpunum] = ci; #endif } else { ci = &cpu_info_primary; #ifdef MULTIPROCESSOR if (caa->cpu_number != lapic_cpu_number()) { panic("%s: running cpu is at apic %d" " instead of at expected %d", self->dv_xname, lapic_cpu_number(), caa->cpu_number); } #endif bcopy(self, &ci->ci_dev, sizeof *self); } ci->ci_self = ci; ci->ci_apicid = caa->cpu_number; #ifdef MULTIPROCESSOR ci->ci_cpuid = cpunum; #else ci->ci_cpuid = 0; /* False for APs, so what, they're not used */ #endif ci->ci_signature = caa->cpu_signature; ci->ci_feature_flags = caa->feature_flags; ci->ci_func = caa->cpu_func; #ifdef MULTIPROCESSOR /* * Allocate UPAGES contiguous pages for the idle PCB and stack. */ kstack = uvm_km_alloc(kernel_map, USPACE); if (kstack == 0) { if (cpunum == 0) { /* XXX */ panic("cpu_attach: unable to allocate idle stack for" " primary"); } printf("%s: unable to allocate idle stack\n", ci->ci_dev.dv_xname); return; } pcb = ci->ci_idle_pcb = (struct pcb *)kstack; memset(pcb, 0, USPACE); pcb->pcb_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL); pcb->pcb_tss.tss_esp0 = kstack + USPACE - 16 - sizeof (struct trapframe); pcb->pcb_tss.tss_esp = kstack + USPACE - 16 - sizeof (struct trapframe); pcb->pcb_pmap = pmap_kernel(); pcb->pcb_cr3 = pcb->pcb_pmap->pm_pdirpa; #endif ci->ci_curpmap = pmap_kernel(); /* further PCB init done later. */ printf(": "); switch (caa->cpu_role) { case CPU_ROLE_SP: printf("(uniprocessor)\n"); ci->ci_flags |= CPUF_PRESENT | CPUF_SP | CPUF_PRIMARY; identifycpu(ci); #ifdef MTRR mem_range_attach(); #endif cpu_init(ci); cpu_init_mwait(&ci->ci_dev); break; case CPU_ROLE_BP: printf("apid %d (boot processor)\n", caa->cpu_number); ci->ci_flags |= CPUF_PRESENT | CPUF_BSP | CPUF_PRIMARY; identifycpu(ci); #ifdef MTRR mem_range_attach(); #endif cpu_init(ci); #if NLAPIC > 0 /* * Enable local apic */ lapic_enable(); lapic_calibrate_timer(ci); #endif #if NIOAPIC > 0 ioapic_bsp_id = caa->cpu_number; #endif cpu_init_mwait(&ci->ci_dev); break; case CPU_ROLE_AP: /* * report on an AP */ printf("apid %d (application processor)\n", caa->cpu_number); #ifdef MULTIPROCESSOR gdt_alloc_cpu(ci); ci->ci_flags |= CPUF_PRESENT | CPUF_AP; identifycpu(ci); sched_init_cpu(ci); ci->ci_next = cpu_info_list->ci_next; cpu_info_list->ci_next = ci; ncpus++; #endif break; default: panic("unknown processor type??"); } #ifdef MULTIPROCESSOR if (mp_verbose) { printf("%s: kstack at 0x%lx for %d bytes\n", ci->ci_dev.dv_xname, kstack, USPACE); printf("%s: idle pcb at %p, idle sp at 0x%x\n", ci->ci_dev.dv_xname, pcb, pcb->pcb_esp); } #endif }
struct cpu_info * alloc_cpuinfo(struct mainbus_attach_args *ma) { paddr_t pa0, pa; vaddr_t va, va0; vsize_t sz = 8 * PAGE_SIZE; int portid; struct cpu_info *cpi, *ci; extern paddr_t cpu0paddr; portid = getpropint(ma->ma_node, "upa-portid", -1); if (portid == -1) portid = getpropint(ma->ma_node, "portid", -1); if (portid == -1) portid = getpropint(ma->ma_node, "cpuid", -1); if (portid == -1 && ma->ma_nreg > 0) portid = (ma->ma_reg[0].ur_paddr >> 32) & 0x0fffffff; if (portid == -1) panic("alloc_cpuinfo: portid"); for (cpi = cpus; cpi != NULL; cpi = cpi->ci_next) if (cpi->ci_upaid == portid) return cpi; va = uvm_km_valloc_align(kernel_map, sz, 8 * PAGE_SIZE, 0); if (va == 0) panic("alloc_cpuinfo: no virtual space"); va0 = va; pa0 = cpu0paddr; cpu0paddr += sz; for (pa = pa0; pa < cpu0paddr; pa += PAGE_SIZE, va += PAGE_SIZE) pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE); pmap_update(pmap_kernel()); cpi = (struct cpu_info *)(va0 + CPUINFO_VA - INTSTACK); memset((void *)va0, 0, sz); /* * Initialize cpuinfo structure. * * Arrange pcb, idle stack and interrupt stack in the same * way as is done for the boot CPU in pmap.c. */ cpi->ci_next = NULL; cpi->ci_curproc = NULL; cpi->ci_number = ncpus++; cpi->ci_upaid = portid; cpi->ci_fpproc = NULL; #ifdef MULTIPROCESSOR cpi->ci_spinup = cpu_hatch; /* XXX */ #else cpi->ci_spinup = NULL; #endif cpi->ci_initstack = cpi; cpi->ci_paddr = pa0; #ifdef SUN4V cpi->ci_mmfsa = pa0; #endif cpi->ci_self = cpi; cpi->ci_node = ma->ma_node; sched_init_cpu(cpi); /* * Finally, add itself to the list of active cpus. */ for (ci = cpus; ci->ci_next != NULL; ci = ci->ci_next) ; ci->ci_next = cpi; return (cpi); }