/* * Map the local APIC and setup necessary interrupt vectors. */ void lapic_init(vm_paddr_t addr) { u_int regs[4]; int i, arat; /* Map the local APIC and setup the spurious interrupt handler. */ KASSERT(trunc_page(addr) == addr, ("local APIC not aligned on a page boundary")); lapic_paddr = addr; lapic = pmap_mapdev(addr, sizeof(lapic_t)); setidt(APIC_SPURIOUS_INT, IDTVEC(spuriousint), SDT_APIC, SEL_KPL, GSEL_APIC); /* Perform basic initialization of the BSP's local APIC. */ lapic_enable(); /* Set BSP's per-CPU local APIC ID. */ PCPU_SET(apic_id, lapic_id()); /* Local APIC timer interrupt. */ setidt(APIC_TIMER_INT, IDTVEC(timerint), SDT_APIC, SEL_KPL, GSEL_APIC); /* Local APIC error interrupt. */ setidt(APIC_ERROR_INT, IDTVEC(errorint), SDT_APIC, SEL_KPL, GSEL_APIC); /* XXX: Thermal interrupt */ /* Local APIC CMCI. */ setidt(APIC_CMC_INT, IDTVEC(cmcint), SDT_APICT, SEL_KPL, GSEL_APIC); if ((resource_int_value("apic", 0, "clock", &i) != 0 || i != 0)) { arat = 0; /* Intel CPUID 0x06 EAX[2] set if APIC timer runs in C3. */ if (cpu_vendor_id == CPU_VENDOR_INTEL && cpu_high >= 6) { do_cpuid(0x06, regs); if ((regs[0] & CPUTPM1_ARAT) != 0) arat = 1; } bzero(&lapic_et, sizeof(lapic_et)); lapic_et.et_name = "LAPIC"; lapic_et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT | ET_FLAGS_PERCPU; lapic_et.et_quality = 600; if (!arat) { lapic_et.et_flags |= ET_FLAGS_C3STOP; lapic_et.et_quality -= 200; } lapic_et.et_frequency = 0; /* We don't know frequency yet, so trying to guess. */ lapic_et.et_min_period.sec = 0; lapic_et.et_min_period.frac = 0x00001000LL << 32; lapic_et.et_max_period.sec = 1; lapic_et.et_max_period.frac = 0; lapic_et.et_start = lapic_et_start; lapic_et.et_stop = lapic_et_stop; lapic_et.et_priv = NULL; et_register(&lapic_et); } }
/* * The CPU ends up here when it's ready to run * XXX should share some of this with init386 in machdep.c * for now it jumps into an infinite loop. */ void cpu_hatch(void *v) { struct cpu_info *ci = (struct cpu_info *)v; int s; cpu_init_idt(); lapic_enable(); lapic_startclock(); lapic_set_lvt(); gdt_init_cpu(ci); lldt(0); npxinit(ci); cpu_init(ci); /* Re-initialise memory range handling on AP */ if (mem_range_softc.mr_op != NULL) mem_range_softc.mr_op->initAP(&mem_range_softc); s = splhigh(); /* XXX prevent softints from running here.. */ lapic_tpr = 0; enable_intr(); if (mp_verbose) printf("%s: CPU at apid %ld running\n", ci->ci_dev.dv_xname, ci->ci_cpuid); nanouptime(&ci->ci_schedstate.spc_runtime); splx(s); SCHED_LOCK(s); cpu_switchto(NULL, sched_chooseproc()); }
void lapic_setup(int boot) { struct lapic *la; u_int32_t maxlvt; register_t saveintr; char buf[MAXCOMLEN + 1]; la = &lapics[lapic_id()]; KASSERT(la->la_present, ("missing APIC structure")); saveintr = intr_disable(); maxlvt = (lapic->version & APIC_VER_MAXLVT) >> MAXLVTSHIFT; /* Initialize the TPR to allow all interrupts. */ lapic_set_tpr(0); /* Setup spurious vector and enable the local APIC. */ lapic_enable(); /* Program LINT[01] LVT entries. */ lapic->lvt_lint0 = lvt_mode(la, LVT_LINT0, lapic->lvt_lint0); lapic->lvt_lint1 = lvt_mode(la, LVT_LINT1, lapic->lvt_lint1); /* Program the PMC LVT entry if present. */ if (maxlvt >= LVT_PMC) lapic->lvt_pcint = lvt_mode(la, LVT_PMC, lapic->lvt_pcint); /* Program timer LVT and setup handler. */ la->lvt_timer_cache = lapic->lvt_timer = lvt_mode(la, LVT_TIMER, lapic->lvt_timer); if (boot) { snprintf(buf, sizeof(buf), "cpu%d:timer", PCPU_GET(cpuid)); intrcnt_add(buf, &la->la_timer_count); } /* Setup the timer if configured. */ if (la->la_timer_mode != 0) { KASSERT(la->la_timer_period != 0, ("lapic%u: zero divisor", lapic_id())); lapic_timer_set_divisor(lapic_timer_divisor); if (la->la_timer_mode == 1) lapic_timer_periodic(la, la->la_timer_period, 1); else lapic_timer_oneshot(la, la->la_timer_period, 1); } /* Program error LVT and clear any existing errors. */ lapic->lvt_error = lvt_mode(la, LVT_ERROR, lapic->lvt_error); lapic->esr = 0; /* XXX: Thermal LVT */ /* Program the CMCI LVT entry if present. */ if (maxlvt >= LVT_CMCI) lapic->lvt_cmci = lvt_mode(la, LVT_CMCI, lapic->lvt_cmci); intr_restore(saveintr); }
/* * The CPU ends up here when its ready to run * This is called from code in mptramp.s; at this point, we are running * in the idle pcb/idle stack of the new cpu. When this function returns, * this processor will enter the idle loop and start looking for work. * * XXX should share some of this with init386 in machdep.c */ void cpu_hatch(void *v) { struct cpu_info *ci = (struct cpu_info *)v; int s; cpu_init_msrs(ci); cpu_probe_features(ci); cpu_feature &= ci->ci_feature_flags; #ifdef DEBUG if (ci->ci_flags & CPUF_PRESENT) panic("%s: already running!?", ci->ci_dev->dv_xname); #endif ci->ci_flags |= CPUF_PRESENT; lapic_enable(); lapic_initclocks(); while ((ci->ci_flags & CPUF_GO) == 0) delay(10); #ifdef DEBUG if (ci->ci_flags & CPUF_RUNNING) panic("%s: already running!?", ci->ci_dev->dv_xname); #endif lcr0(ci->ci_idle_pcb->pcb_cr0); cpu_init_idt(); lapic_set_lvt(); gdt_init_cpu(ci); fpuinit(ci); lldt(GSYSSEL(GLDT_SEL, SEL_KPL)); cpu_init(ci); s = splhigh(); lcr8(0); enable_intr(); microuptime(&ci->ci_schedstate.spc_runtime); splx(s); SCHED_LOCK(s); cpu_switchto(NULL, sched_chooseproc()); }
/* * The CPU ends up here when its ready to run * This is called from code in mptramp.s; at this point, we are running * in the idle pcb/idle stack of the new cpu. When this function returns, * this processor will enter the idle loop and start looking for work. * * XXX should share some of this with init386 in machdep.c */ void cpu_hatch(void *v) { struct cpu_info *ci = (struct cpu_info *)v; int s; cpu_init_msrs(ci); #ifdef DEBUG if (ci->ci_flags & CPUF_PRESENT) panic("%s: already running!?", ci->ci_dev->dv_xname); #endif ci->ci_flags |= CPUF_PRESENT; lapic_enable(); lapic_startclock(); if ((ci->ci_flags & CPUF_IDENTIFIED) == 0) { /* * We need to wait until we can identify, otherwise dmesg * output will be messy. */ while ((ci->ci_flags & CPUF_IDENTIFY) == 0) delay(10); identifycpu(ci); /* Signal we're done */ atomic_clearbits_int(&ci->ci_flags, CPUF_IDENTIFY); /* Prevent identifycpu() from running again */ atomic_setbits_int(&ci->ci_flags, CPUF_IDENTIFIED); } while ((ci->ci_flags & CPUF_GO) == 0) delay(10); #ifdef DEBUG if (ci->ci_flags & CPUF_RUNNING) panic("%s: already running!?", ci->ci_dev->dv_xname); #endif lcr0(ci->ci_idle_pcb->pcb_cr0); cpu_init_idt(); lapic_set_lvt(); gdt_init_cpu(ci); fpuinit(ci); lldt(0); cpu_init(ci); s = splhigh(); lcr8(0); enable_intr(); microuptime(&ci->ci_schedstate.spc_runtime); splx(s); SCHED_LOCK(s); cpu_switchto(NULL, sched_chooseproc()); }
void cpu_attach(struct device *parent, struct device *self, void *aux) { struct cpu_softc *sc = (void *) self; struct cpu_attach_args *caa = aux; struct cpu_info *ci; #if defined(MULTIPROCESSOR) int cpunum = sc->sc_dev.dv_unit; vaddr_t kstack; struct pcb *pcb; #endif /* * If we're an Application Processor, allocate a cpu_info * structure, otherwise use the primary's. */ if (caa->cpu_role == CPU_ROLE_AP) { ci = malloc(sizeof(*ci), M_DEVBUF, M_WAITOK|M_ZERO); #if defined(MULTIPROCESSOR) if (cpu_info[cpunum] != NULL) panic("cpu at apic id %d already attached?", cpunum); cpu_info[cpunum] = ci; #endif #ifdef TRAPLOG ci->ci_tlog_base = malloc(sizeof(struct tlog), M_DEVBUF, M_WAITOK); #endif } else { ci = &cpu_info_primary; #if defined(MULTIPROCESSOR) if (caa->cpu_number != lapic_cpu_number()) { panic("%s: running cpu is at apic %d" " instead of at expected %d", sc->sc_dev.dv_xname, lapic_cpu_number(), caa->cpu_number); } #endif } ci->ci_self = ci; sc->sc_info = ci; ci->ci_dev = self; ci->ci_apicid = caa->cpu_number; #ifdef MULTIPROCESSOR ci->ci_cpuid = cpunum; #else ci->ci_cpuid = 0; /* False for APs, but they're not used anyway */ #endif ci->ci_func = caa->cpu_func; simple_lock_init(&ci->ci_slock); #if defined(MULTIPROCESSOR) /* * Allocate USPACE pages for the idle PCB and stack. * XXX should we just sleep here? */ kstack = (vaddr_t)km_alloc(USPACE, &kv_any, &kp_zero, &kd_nowait); if (kstack == 0) { if (caa->cpu_role != CPU_ROLE_AP) { panic("cpu_attach: unable to allocate idle stack for" " primary"); } printf("%s: unable to allocate idle stack\n", sc->sc_dev.dv_xname); return; } pcb = ci->ci_idle_pcb = (struct pcb *)kstack; pcb->pcb_kstack = kstack + USPACE - 16; pcb->pcb_rbp = pcb->pcb_rsp = kstack + USPACE - 16; pcb->pcb_pmap = pmap_kernel(); pcb->pcb_cr0 = rcr0(); pcb->pcb_cr3 = pcb->pcb_pmap->pm_pdirpa; #endif /* further PCB init done later. */ printf(": "); switch (caa->cpu_role) { case CPU_ROLE_SP: printf("(uniprocessor)\n"); ci->ci_flags |= CPUF_PRESENT | CPUF_SP | CPUF_PRIMARY; cpu_intr_init(ci); identifycpu(ci); cpu_init(ci); break; case CPU_ROLE_BP: printf("apid %d (boot processor)\n", caa->cpu_number); ci->ci_flags |= CPUF_PRESENT | CPUF_BSP | CPUF_PRIMARY; cpu_intr_init(ci); identifycpu(ci); cpu_init(ci); #if NLAPIC > 0 /* * Enable local apic */ lapic_enable(); lapic_calibrate_timer(ci); #endif #if NIOAPIC > 0 ioapic_bsp_id = caa->cpu_number; #endif break; case CPU_ROLE_AP: /* * report on an AP */ printf("apid %d (application processor)\n", caa->cpu_number); #if defined(MULTIPROCESSOR) cpu_intr_init(ci); gdt_alloc_cpu(ci); sched_init_cpu(ci); cpu_start_secondary(ci); ncpus++; if (ci->ci_flags & CPUF_PRESENT) { ci->ci_next = cpu_info_list->ci_next; cpu_info_list->ci_next = ci; } #else printf("%s: not started\n", sc->sc_dev.dv_xname); #endif break; default: panic("unknown processor type??"); } cpu_vm_init(ci); #if defined(MULTIPROCESSOR) if (mp_verbose) { printf("%s: kstack at 0x%lx for %d bytes\n", sc->sc_dev.dv_xname, kstack, USPACE); printf("%s: idle pcb at %p, idle sp at 0x%lx\n", sc->sc_dev.dv_xname, pcb, pcb->pcb_rsp); } #endif }
/* * Map the local APIC and setup necessary interrupt vectors. */ static void native_lapic_init(vm_paddr_t addr) { uint32_t ver; u_int regs[4]; int i, arat; /* * Enable x2APIC mode if possible. Map the local APIC * registers page. * * Keep the LAPIC registers page mapped uncached for x2APIC * mode too, to have direct map page attribute set to * uncached. This is needed to work around CPU errata present * on all Intel processors. */ KASSERT(trunc_page(addr) == addr, ("local APIC not aligned on a page boundary")); lapic_paddr = addr; lapic_map = pmap_mapdev(addr, PAGE_SIZE); if (x2apic_mode) { native_lapic_enable_x2apic(); lapic_map = NULL; } /* Setup the spurious interrupt handler. */ setidt(APIC_SPURIOUS_INT, IDTVEC(spuriousint), SDT_APIC, SEL_KPL, GSEL_APIC); /* Perform basic initialization of the BSP's local APIC. */ lapic_enable(); /* Set BSP's per-CPU local APIC ID. */ PCPU_SET(apic_id, lapic_id()); /* Local APIC timer interrupt. */ setidt(APIC_TIMER_INT, IDTVEC(timerint), SDT_APIC, SEL_KPL, GSEL_APIC); /* Local APIC error interrupt. */ setidt(APIC_ERROR_INT, IDTVEC(errorint), SDT_APIC, SEL_KPL, GSEL_APIC); /* XXX: Thermal interrupt */ /* Local APIC CMCI. */ setidt(APIC_CMC_INT, IDTVEC(cmcint), SDT_APICT, SEL_KPL, GSEL_APIC); if ((resource_int_value("apic", 0, "clock", &i) != 0 || i != 0)) { arat = 0; /* Intel CPUID 0x06 EAX[2] set if APIC timer runs in C3. */ if (cpu_vendor_id == CPU_VENDOR_INTEL && cpu_high >= 6) { do_cpuid(0x06, regs); if ((regs[0] & CPUTPM1_ARAT) != 0) arat = 1; } bzero(&lapic_et, sizeof(lapic_et)); lapic_et.et_name = "LAPIC"; lapic_et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT | ET_FLAGS_PERCPU; lapic_et.et_quality = 600; if (!arat) { lapic_et.et_flags |= ET_FLAGS_C3STOP; lapic_et.et_quality -= 200; } lapic_et.et_frequency = 0; /* We don't know frequency yet, so trying to guess. */ lapic_et.et_min_period = 0x00001000LL; lapic_et.et_max_period = SBT_1S; lapic_et.et_start = lapic_et_start; lapic_et.et_stop = lapic_et_stop; lapic_et.et_priv = NULL; et_register(&lapic_et); } /* * Set lapic_eoi_suppression after lapic_enable(), to not * enable suppression in the hardware prematurely. Note that * we by default enable suppression even when system only has * one IO-APIC, since EOI is broadcasted to all APIC agents, * including CPUs, otherwise. */ ver = lapic_read32(LAPIC_VERSION); if ((ver & APIC_VER_EOI_SUPPRESSION) != 0) { lapic_eoi_suppression = 1; TUNABLE_INT_FETCH("hw.lapic_eoi_suppression", &lapic_eoi_suppression); } }
void lapic_setup() { lapic_base = msr_read(MSR_APIC_BASE) & ~0xfff; lapic_enable(); }
void cpu_attach(struct device *parent, struct device *self, void *aux) { struct cpu_info *ci = (struct cpu_info *)self; struct cpu_attach_args *caa = (struct cpu_attach_args *)aux; #ifdef MULTIPROCESSOR int cpunum = ci->ci_dev.dv_unit; vaddr_t kstack; struct pcb *pcb; #endif if (caa->cpu_role == CPU_ROLE_AP) { #ifdef MULTIPROCESSOR if (cpu_info[cpunum] != NULL) panic("cpu at apic id %d already attached?", cpunum); cpu_info[cpunum] = ci; #endif } else { ci = &cpu_info_primary; #ifdef MULTIPROCESSOR if (caa->cpu_number != lapic_cpu_number()) { panic("%s: running cpu is at apic %d" " instead of at expected %d", self->dv_xname, lapic_cpu_number(), caa->cpu_number); } #endif bcopy(self, &ci->ci_dev, sizeof *self); } ci->ci_self = ci; ci->ci_apicid = caa->cpu_number; #ifdef MULTIPROCESSOR ci->ci_cpuid = cpunum; #else ci->ci_cpuid = 0; /* False for APs, so what, they're not used */ #endif ci->ci_signature = caa->cpu_signature; ci->ci_feature_flags = caa->feature_flags; ci->ci_func = caa->cpu_func; #ifdef MULTIPROCESSOR /* * Allocate UPAGES contiguous pages for the idle PCB and stack. */ kstack = uvm_km_alloc(kernel_map, USPACE); if (kstack == 0) { if (cpunum == 0) { /* XXX */ panic("cpu_attach: unable to allocate idle stack for" " primary"); } printf("%s: unable to allocate idle stack\n", ci->ci_dev.dv_xname); return; } pcb = ci->ci_idle_pcb = (struct pcb *)kstack; memset(pcb, 0, USPACE); pcb->pcb_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL); pcb->pcb_tss.tss_esp0 = kstack + USPACE - 16 - sizeof (struct trapframe); pcb->pcb_tss.tss_esp = kstack + USPACE - 16 - sizeof (struct trapframe); pcb->pcb_pmap = pmap_kernel(); pcb->pcb_cr3 = pcb->pcb_pmap->pm_pdirpa; #endif ci->ci_curpmap = pmap_kernel(); /* further PCB init done later. */ printf(": "); switch (caa->cpu_role) { case CPU_ROLE_SP: printf("(uniprocessor)\n"); ci->ci_flags |= CPUF_PRESENT | CPUF_SP | CPUF_PRIMARY; identifycpu(ci); #ifdef MTRR mem_range_attach(); #endif cpu_init(ci); cpu_init_mwait(&ci->ci_dev); break; case CPU_ROLE_BP: printf("apid %d (boot processor)\n", caa->cpu_number); ci->ci_flags |= CPUF_PRESENT | CPUF_BSP | CPUF_PRIMARY; identifycpu(ci); #ifdef MTRR mem_range_attach(); #endif cpu_init(ci); #if NLAPIC > 0 /* * Enable local apic */ lapic_enable(); lapic_calibrate_timer(ci); #endif #if NIOAPIC > 0 ioapic_bsp_id = caa->cpu_number; #endif cpu_init_mwait(&ci->ci_dev); break; case CPU_ROLE_AP: /* * report on an AP */ printf("apid %d (application processor)\n", caa->cpu_number); #ifdef MULTIPROCESSOR gdt_alloc_cpu(ci); ci->ci_flags |= CPUF_PRESENT | CPUF_AP; identifycpu(ci); sched_init_cpu(ci); ci->ci_next = cpu_info_list->ci_next; cpu_info_list->ci_next = ci; ncpus++; #endif break; default: panic("unknown processor type??"); } #ifdef MULTIPROCESSOR if (mp_verbose) { printf("%s: kstack at 0x%lx for %d bytes\n", ci->ci_dev.dv_xname, kstack, USPACE); printf("%s: idle pcb at %p, idle sp at 0x%x\n", ci->ci_dev.dv_xname, pcb, pcb->pcb_esp); } #endif }