static inline void pmap_tlbstat_count(struct pmap *pm, vaddr_t va, tlbwhy_t why) { #ifdef TLBSTATS const cpuid_t cid = cpu_index(curcpu()); bool local = false, remote = false; if (va != (vaddr_t)-1LL) { atomic_inc_64(&tlbstat_single_req.ev_count); } if (pm == pmap_kernel()) { atomic_inc_64(&tlbstat_kernel[why].ev_count); return; } if (va >= VM_MAXUSER_ADDRESS) { remote = kcpuset_isotherset(pm->pm_kernel_cpus, cid); local = kcpuset_isset(pm->pm_kernel_cpus, cid); } remote |= kcpuset_isotherset(pm->pm_cpus, cid); local |= kcpuset_isset(pm->pm_cpus, cid); if (local) { atomic_inc_64(&tlbstat_local[why].ev_count); } if (remote) { atomic_inc_64(&tlbstat_remote[why].ev_count); } #endif }
/* * Pause this cpu */ void cpu_pause(struct reg *regsp) { int s = splhigh(); cpuid_t cii = cpu_index(curcpu()); if (__predict_false(cold)) return; do { kcpuset_atomic_set(cpus_paused, cii); do { ; } while (kcpuset_isset(cpus_paused, cii)); kcpuset_atomic_set(cpus_resumed, cii); #if defined(DDB) if (ddb_running_on_this_cpu_p()) cpu_Debugger(); if (ddb_running_on_any_cpu_p()) continue; #endif } while (false); splx(s); }
void cpu_debug_dump(void) { CPU_INFO_ITERATOR cii; struct cpu_info *ci; char running, hatched, paused, resumed, halted; db_printf("CPU CPUID STATE CPUINFO CPL INT MTX IPIS\n"); for (CPU_INFO_FOREACH(cii, ci)) { hatched = (kcpuset_isset(cpus_hatched, cpu_index(ci)) ? 'H' : '-'); running = (kcpuset_isset(cpus_running, cpu_index(ci)) ? 'R' : '-'); paused = (kcpuset_isset(cpus_paused, cpu_index(ci)) ? 'P' : '-'); resumed = (kcpuset_isset(cpus_resumed, cpu_index(ci)) ? 'r' : '-'); halted = (kcpuset_isset(cpus_halted, cpu_index(ci)) ? 'h' : '-'); db_printf("%3d 0x%03lx %c%c%c%c%c %p " "%3d %3d %3d " "0x%02" PRIx64 "/0x%02" PRIx64 "\n", cpu_index(ci), ci->ci_cpuid, running, hatched, paused, resumed, halted, ci, ci->ci_cpl, ci->ci_idepth, ci->ci_mtx_count, ci->ci_active_ipis, ci->ci_request_ipis); } }
static int ingenic_send_ipi(struct cpu_info *ci, int tag) { uint32_t msg; msg = 1 << tag; mutex_enter(&ingenic_ipi_lock); if (kcpuset_isset(cpus_running, cpu_index(ci))) { if (cpu_index(ci) == 0) { MTC0(msg, CP0_CORE_MBOX, 0); } else { MTC0(msg, CP0_CORE_MBOX, 1); } } mutex_exit(&ingenic_ipi_lock); return 0; }
struct intrids_handler * interrupt_construct_intrids(const kcpuset_t *cpuset) { struct intr_source *is; struct intrids_handler *ii_handler; intrid_t *ids; int i, irq, count; if (kcpuset_iszero(cpuset)) return NULL; if (!kcpuset_isset(cpuset, 0)) /* XXX */ return NULL; count = 0; for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) { if (is->is_hand != NULL) count++; } ii_handler = kmem_zalloc(sizeof(int) + sizeof(intrid_t) * count, KM_SLEEP); if (ii_handler == NULL) return NULL; ii_handler->iih_nids = count; if (count == 0) return ii_handler; ids = ii_handler->iih_intrids; i = 0; for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) { /* Ignore devices attached after counting "count". */ if (i >= count) break; if (is->is_hand == NULL) continue; strncpy(ids[i], is->is_source, sizeof(intrid_t)); i++; } return ii_handler; }
static inline void pmap_tlb_processpacket(pmap_tlb_packet_t *tp, kcpuset_t *target) { int err = 0; if (!kcpuset_match(target, kcpuset_attached)) { const struct cpu_info * const self = curcpu(); CPU_INFO_ITERATOR cii; struct cpu_info *lci; for (CPU_INFO_FOREACH(cii, lci)) { const cpuid_t lcid = cpu_index(lci); if (__predict_false(lci == self) || !kcpuset_isset(target, lcid)) { continue; } err |= x86_ipi(LAPIC_TLB_VECTOR, lci->ci_cpuid, LAPIC_DLMODE_FIXED); } } else {
void cpu_hatch(struct cpu_info *ci) { struct pmap_tlb_info * const ti = ci->ci_tlb_info; /* * Invalidate all the TLB enties (even wired ones) and then reserve * space for the wired TLB entries. */ mips3_cp0_wired_write(0); tlb_invalidate_all(); mips3_cp0_wired_write(ti->ti_wired); /* * Setup HWRENA and USERLOCAL COP0 registers (MIPSxxR2). */ cpu_hwrena_setup(); /* * If we are using register zero relative addressing to access cpu_info * in the exception vectors, enter that mapping into TLB now. */ if (ci->ci_tlb_slot >= 0) { const uint32_t tlb_lo = MIPS3_PG_G|MIPS3_PG_V | mips3_paddr_to_tlbpfn((vaddr_t)ci); const struct tlbmask tlbmask = { .tlb_hi = -PAGE_SIZE | KERNEL_PID, #if (PGSHIFT & 1) .tlb_lo0 = tlb_lo, .tlb_lo1 = tlb_lo + MIPS3_PG_NEXT, #else .tlb_lo0 = 0, .tlb_lo1 = tlb_lo, #endif .tlb_mask = -1, }; tlb_invalidate_addr(tlbmask.tlb_hi, KERNEL_PID); tlb_write_entry(ci->ci_tlb_slot, &tlbmask); } /* * Flush the icache just be sure. */ mips_icache_sync_all(); /* * Let this CPU do its own initialization (for things that have to be * done on the local CPU). */ (*mips_locoresw.lsw_cpu_init)(ci); // Show this CPU as present. atomic_or_ulong(&ci->ci_flags, CPUF_PRESENT); /* * Announce we are hatched */ kcpuset_atomic_set(cpus_hatched, cpu_index(ci)); /* * Now wait to be set free! */ while (! kcpuset_isset(cpus_running, cpu_index(ci))) { /* spin, spin, spin */ } /* * initialize the MIPS count/compare clock */ mips3_cp0_count_write(ci->ci_data.cpu_cc_skew); KASSERT(ci->ci_cycles_per_hz != 0); ci->ci_next_cp0_clk_intr = ci->ci_data.cpu_cc_skew + ci->ci_cycles_per_hz; mips3_cp0_compare_write(ci->ci_next_cp0_clk_intr); ci->ci_data.cpu_cc_skew = 0; /* * Let this CPU do its own post-running initialization * (for things that have to be done on the local CPU). */ (*mips_locoresw.lsw_cpu_run)(ci); /* * Now turn on interrupts (and verify they are on). */ spl0(); KASSERTMSG(ci->ci_cpl == IPL_NONE, "cpl %d", ci->ci_cpl); KASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE); kcpuset_atomic_set(pmap_kernel()->pm_onproc, cpu_index(ci)); kcpuset_atomic_set(pmap_kernel()->pm_active, cpu_index(ci)); /* * And do a tail call to idle_loop */ idle_loop(NULL); } void cpu_boot_secondary_processors(void) { CPU_INFO_ITERATOR cii; struct cpu_info *ci; for (CPU_INFO_FOREACH(cii, ci)) { if (CPU_IS_PRIMARY(ci)) continue; KASSERT(ci->ci_data.cpu_idlelwp); /* * Skip this CPU if it didn't sucessfully hatch. */ if (!kcpuset_isset(cpus_hatched, cpu_index(ci))) continue; ci->ci_data.cpu_cc_skew = mips3_cp0_count_read(); atomic_or_ulong(&ci->ci_flags, CPUF_RUNNING); kcpuset_set(cpus_running, cpu_index(ci)); // Spin until the cpu calls idle_loop for (u_int i = 0; i < 100; i++) { if (kcpuset_isset(cpus_running, cpu_index(ci))) break; delay(1000); } } }
bool cpu_is_paused(cpuid_t cii) { return !cold && kcpuset_isset(cpus_paused, cii); }