static int mpc7xxx_read_pmc(int cpu, int ri, pmc_value_t *v) { struct pmc *pm; pmc_value_t tmp; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < MPC7XXX_MAX_PMCS, ("[powerpc,%d] illegal row index %d", __LINE__, ri)); pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc; KASSERT(pm, ("[core,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri)); tmp = mpc7xxx_pmcn_read(ri); PMCDBG(MDP,REA,2,"ppc-read id=%d -> %jd", ri, tmp); if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) *v = POWERPC_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp); else *v = tmp; return 0; }
static int ucp_write_pmc(int cpu, int ri, pmc_value_t v) { struct pmc *pm; struct uncore_cpu *cc; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[uncore,%d] illegal cpu value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < uncore_ucp_npmc, ("[uncore,%d] illegal row index %d", __LINE__, ri)); cc = uncore_pcpu[cpu]; pm = cc->pc_uncorepmcs[ri].phw_pmc; KASSERT(pm, ("[uncore,%d] cpu%d ri%d no configured PMC to stop", __LINE__, cpu, ri)); PMCDBG(MDP,WRI,1, "ucp-write cpu=%d ri=%d msr=0x%x v=%jx", cpu, ri, UCP_PMC0 + ri, v); if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) v = ucp_reload_count_to_perfctr_value(v); /* * Write the new value to the counter. The counter will be in * a stopped state when the pcd_write() entry point is called. */ wrmsr(UCP_PMC0 + ri, v); return (0); }
static int p4_release_pmc(int cpu, int ri, struct pmc *pm) { enum pmc_p4escr escr; struct p4_cpu *pc; KASSERT(ri >= 0 && ri < P4_NPMCS, ("[p4,%d] illegal row-index %d", __LINE__, ri)); escr = pm->pm_md.pm_p4.pm_p4_escr; PMCDBG(MDP,REL,1, "p4-release cpu=%d ri=%d escr=%d", cpu, ri, escr); if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) { pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)]; KASSERT(pc->pc_p4pmcs[ri].phw_pmc == NULL, ("[p4,%d] releasing configured PMC ri=%d", __LINE__, ri)); P4_ESCR_UNMARK_ROW_STANDALONE(escr); KASSERT(pc->pc_escrs[escr] == ri, ("[p4,%d] escr[%d] not allocated to ri %d", __LINE__, escr, ri)); pc->pc_escrs[escr] = P4_INVALID_PMC_INDEX; /* mark as free */ } else P4_ESCR_UNMARK_ROW_THREAD(escr); return (0); }
static int ucf_write_pmc(int cpu, int ri, pmc_value_t v) { struct uncore_cpu *cc; struct pmc *pm; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[uncore,%d] illegal cpu value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < uncore_ucf_npmc, ("[uncore,%d] illegal row-index %d", __LINE__, ri)); cc = uncore_pcpu[cpu]; pm = cc->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc; KASSERT(pm, ("[uncore,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri)); if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) v = ucf_reload_count_to_perfctr_value(v); wrmsr(UCF_CTRL, 0); /* Turn off fixed counters */ wrmsr(UCF_CTR0 + ri, v); wrmsr(UCF_CTRL, cc->pc_ucfctrl); PMCDBG(MDP,WRI,1, "ucf-write cpu=%d ri=%d v=%jx ucfctrl=%jx ", cpu, ri, v, (uintmax_t) rdmsr(UCF_CTRL)); return (0); }
static int ucp_read_pmc(int cpu, int ri, pmc_value_t *v) { struct pmc *pm; pmc_value_t tmp; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[uncore,%d] illegal cpu value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < uncore_ucp_npmc, ("[uncore,%d] illegal row-index %d", __LINE__, ri)); pm = uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc; KASSERT(pm, ("[uncore,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri)); tmp = rdmsr(UCP_PMC0 + ri); if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) *v = ucp_perfctr_value_to_reload_count(tmp); else *v = tmp; PMCDBG(MDP,REA,1, "ucp-read cpu=%d ri=%d msr=0x%x -> v=%jx", cpu, ri, ri, *v); return (0); }
static int p4_read_pmc(int cpu, int ri, pmc_value_t *v) { struct pmc *pm; pmc_value_t tmp; struct p4_cpu *pc; enum pmc_mode mode; struct p4pmc_descr *pd; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[p4,%d] illegal CPU value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < P4_NPMCS, ("[p4,%d] illegal row-index %d", __LINE__, ri)); pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)]; pm = pc->pc_p4pmcs[ri].phw_pmc; pd = &p4_pmcdesc[ri]; KASSERT(pm != NULL, ("[p4,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__, cpu, ri)); KASSERT(pd->pm_descr.pd_class == PMC_TO_CLASS(pm), ("[p4,%d] class mismatch pd %d != id class %d", __LINE__, pd->pm_descr.pd_class, PMC_TO_CLASS(pm))); mode = PMC_TO_MODE(pm); PMCDBG(MDP,REA,1, "p4-read cpu=%d ri=%d mode=%d", cpu, ri, mode); KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P4, ("[p4,%d] unknown PMC class %d", __LINE__, pd->pm_descr.pd_class)); tmp = rdmsr(p4_pmcdesc[ri].pm_pmc_msr); if (PMC_IS_VIRTUAL_MODE(mode)) { if (tmp < P4_PCPU_HW_VALUE(pc,ri,cpu)) /* 40 bit overflow */ tmp += (P4_PERFCTR_MASK + 1) - P4_PCPU_HW_VALUE(pc,ri,cpu); else tmp -= P4_PCPU_HW_VALUE(pc,ri,cpu); tmp += P4_PCPU_PMC_VALUE(pc,ri,cpu); } if (PMC_IS_SAMPLING_MODE(mode)) /* undo transformation */ *v = P4_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp); else *v = tmp; PMCDBG(MDP,REA,2, "p4-read -> %jx", *v); return (0); }
int pmc_soft_intr(struct pmckern_soft *ks) { struct pmc *pm; struct soft_cpu *pc; int ri, processed, error, user_mode; KASSERT(ks->pm_cpu >= 0 && ks->pm_cpu < pmc_cpu_max(), ("[soft,%d] CPU %d out of range", __LINE__, ks->pm_cpu)); processed = 0; pc = soft_pcpu[ks->pm_cpu]; for (ri = 0; ri < SOFT_NPMCS; ri++) { pm = pc->soft_hw[ri].phw_pmc; if (pm == NULL || pm->pm_state != PMC_STATE_RUNNING || pm->pm_event != ks->pm_ev) { continue; } processed = 1; pc->soft_values[ri]++; if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { user_mode = TRAPF_USERMODE(ks->pm_tf); error = pmc_process_interrupt(ks->pm_cpu, PMC_SR, pm, ks->pm_tf, user_mode); if (error) { soft_stop_pmc(ks->pm_cpu, ri); continue; } if (user_mode) { /* If in user mode setup AST to process * callchain out of interrupt context. */ curthread->td_flags |= TDF_ASTPENDING; } } } atomic_add_int(processed ? &pmc_stats.pm_intr_processed : &pmc_stats.pm_intr_ignored, 1); return (processed); }
static int arm64_intr(struct trapframe *tf) { struct arm64_cpu *pc; int retval, ri; struct pmc *pm; int error; int reg, cpu; cpu = curcpu; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[arm64,%d] CPU %d out of range", __LINE__, cpu)); retval = 0; pc = arm64_pcpu[cpu]; for (ri = 0; ri < arm64_npmcs; ri++) { pm = arm64_pcpu[cpu]->pc_arm64pmcs[ri].phw_pmc; if (pm == NULL) continue; if (!PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) continue; /* Check if counter is overflowed */ reg = (1 << ri); if ((READ_SPECIALREG(PMOVSCLR_EL0) & reg) == 0) continue; /* Clear Overflow Flag */ WRITE_SPECIALREG(PMOVSCLR_EL0, reg); isb(); retval = 1; /* Found an interrupting PMC. */ if (pm->pm_state != PMC_STATE_RUNNING) continue; error = pmc_process_interrupt(PMC_HR, pm, tf); if (error) arm64_stop_pmc(cpu, ri); /* Reload sampling count */ arm64_write_pmc(cpu, ri, pm->pm_sc.pm_reloadcount); } return (retval); }
static int p4_write_pmc(int cpu, int ri, pmc_value_t v) { enum pmc_mode mode; struct pmc *pm; struct p4_cpu *pc; const struct pmc_hw *phw; const struct p4pmc_descr *pd; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < P4_NPMCS, ("[amd,%d] illegal row-index %d", __LINE__, ri)); pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)]; phw = &pc->pc_p4pmcs[ri]; pm = phw->phw_pmc; pd = &p4_pmcdesc[ri]; KASSERT(pm != NULL, ("[p4,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__, cpu, ri)); mode = PMC_TO_MODE(pm); PMCDBG(MDP,WRI,1, "p4-write cpu=%d ri=%d mode=%d v=%jx", cpu, ri, mode, v); /* * write the PMC value to the register/saved value: for * sampling mode PMCs, the value to be programmed into the PMC * counter is -(C+1) where 'C' is the requested sample rate. */ if (PMC_IS_SAMPLING_MODE(mode)) v = P4_RELOAD_COUNT_TO_PERFCTR_VALUE(v); if (PMC_IS_SYSTEM_MODE(mode)) wrmsr(pd->pm_pmc_msr, v); else P4_PCPU_PMC_VALUE(pc,ri,cpu) = v; return (0); }
static int mips_write_pmc(int cpu, int ri, pmc_value_t v) { struct pmc *pm; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[mips,%d] illegal CPU value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < mips_npmcs, ("[mips,%d] illegal row-index %d", __LINE__, ri)); pm = mips_pcpu[cpu]->pc_mipspmcs[ri].phw_pmc; if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) v = (1UL << (mips_pmc_spec.ps_counter_width - 1)) - v; PMCDBG(MDP,WRI,1,"mips-write cpu=%d ri=%d v=%jx", cpu, ri, v); mips_pmcn_write(ri, v); return 0; }
static int mips24k_write_pmc(int cpu, int ri, pmc_value_t v) { struct pmc *pm; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[mips,%d] illegal CPU value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < mips24k_npmcs, ("[mips,%d] illegal row-index %d", __LINE__, ri)); pm = mips24k_pcpu[cpu]->pc_mipspmcs[ri].phw_pmc; if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) v = MIPS24K_RELOAD_COUNT_TO_PERFCTR_VALUE(v); PMCDBG(MDP,WRI,1,"mips-write cpu=%d ri=%d v=%jx", cpu, ri, v); mips24k_pmcn_write(ri, v); return 0; }
static int arm64_write_pmc(int cpu, int ri, pmc_value_t v) { struct pmc *pm; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[arm64,%d] illegal CPU value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < arm64_npmcs, ("[arm64,%d] illegal row-index %d", __LINE__, ri)); pm = arm64_pcpu[cpu]->pc_arm64pmcs[ri].phw_pmc; if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) v = ARMV8_RELOAD_COUNT_TO_PERFCTR_VALUE(v); PMCDBG3(MDP, WRI, 1, "arm64-write cpu=%d ri=%d v=%jx", cpu, ri, v); arm64_pmcn_write(ri, v); return 0; }
static int mpc7xxx_write_pmc(int cpu, int ri, pmc_value_t v) { struct pmc *pm; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < MPC7XXX_MAX_PMCS, ("[powerpc,%d] illegal row-index %d", __LINE__, ri)); pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc; if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) v = POWERPC_RELOAD_COUNT_TO_PERFCTR_VALUE(v); PMCDBG(MDP,WRI,1,"powerpc-write cpu=%d ri=%d v=%jx", cpu, ri, v); mpc7xxx_pmcn_write(ri, v); return 0; }
static int mips24k_read_pmc(int cpu, int ri, pmc_value_t *v) { struct pmc *pm; pmc_value_t tmp; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[mips,%d] illegal CPU value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < mips24k_npmcs, ("[mips,%d] illegal row index %d", __LINE__, ri)); pm = mips24k_pcpu[cpu]->pc_mipspmcs[ri].phw_pmc; tmp = mips24k_pmcn_read(ri); PMCDBG(MDP,REA,2,"mips-read id=%d -> %jd", ri, tmp); if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) *v = MIPS24K_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp); else *v = tmp; return 0; }
static int mips_read_pmc(int cpu, int ri, pmc_value_t *v) { struct pmc *pm; pmc_value_t tmp; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[mips,%d] illegal CPU value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < mips_npmcs, ("[mips,%d] illegal row index %d", __LINE__, ri)); pm = mips_pcpu[cpu]->pc_mipspmcs[ri].phw_pmc; tmp = mips_pmcn_read(ri); PMCDBG(MDP,REA,2,"mips-read id=%d -> %jd", ri, tmp); if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) *v = tmp - (1UL << (mips_pmc_spec.ps_counter_width - 1)); else *v = tmp; return 0; }
static int arm64_read_pmc(int cpu, int ri, pmc_value_t *v) { pmc_value_t tmp; struct pmc *pm; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[arm64,%d] illegal CPU value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < arm64_npmcs, ("[arm64,%d] illegal row index %d", __LINE__, ri)); pm = arm64_pcpu[cpu]->pc_arm64pmcs[ri].phw_pmc; tmp = arm64_pmcn_read(ri); PMCDBG2(MDP, REA, 2, "arm64-read id=%d -> %jd", ri, tmp); if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) *v = ARMV8_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp); else *v = tmp; return 0; }
static int p4_start_pmc(int cpu, int ri) { int rc; struct pmc *pm; struct p4_cpu *pc; struct p4pmc_descr *pd; uint32_t cccrvalue, cccrtbits, escrvalue, escrmsr, escrtbits; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[p4,%d] illegal CPU value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < P4_NPMCS, ("[p4,%d] illegal row-index %d", __LINE__, ri)); pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)]; pm = pc->pc_p4pmcs[ri].phw_pmc; pd = &p4_pmcdesc[ri]; KASSERT(pm != NULL, ("[p4,%d] starting cpu%d,pmc%d with null pmc", __LINE__, cpu, ri)); PMCDBG(MDP,STA,1, "p4-start cpu=%d ri=%d", cpu, ri); KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P4, ("[p4,%d] wrong PMC class %d", __LINE__, pd->pm_descr.pd_class)); /* retrieve the desired CCCR/ESCR values from the PMC */ cccrvalue = pm->pm_md.pm_p4.pm_p4_cccrvalue; escrvalue = pm->pm_md.pm_p4.pm_p4_escrvalue; escrmsr = pm->pm_md.pm_p4.pm_p4_escrmsr; /* extract and zero the logical processor selection bits */ cccrtbits = cccrvalue & P4_CCCR_OVF_PMI_T0; escrtbits = escrvalue & (P4_ESCR_T0_OS|P4_ESCR_T0_USR); cccrvalue &= ~P4_CCCR_OVF_PMI_T0; escrvalue &= ~(P4_ESCR_T0_OS|P4_ESCR_T0_USR); if (P4_CPU_IS_HTT_SECONDARY(cpu)) { /* shift T0 bits to T1 position */ cccrtbits <<= 1; escrtbits >>= 2; } /* start system mode PMCs directly */ if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) { wrmsr(escrmsr, escrvalue | escrtbits); wrmsr(pd->pm_cccr_msr, cccrvalue | cccrtbits | P4_CCCR_ENABLE); return 0; } /* * Thread mode PMCs * * On HTT machines, the same PMC could be scheduled on the * same physical CPU twice (once for each logical CPU), for * example, if two threads of a multi-threaded process get * scheduled on the same CPU. * */ mtx_lock_spin(&pc->pc_mtx); rc = P4_PCPU_GET_RUNCOUNT(pc,ri); KASSERT(rc == 0 || rc == 1, ("[p4,%d] illegal runcount cpu=%d ri=%d rc=%d", __LINE__, cpu, ri, rc)); if (rc == 0) { /* 1st CPU and the non-HTT case */ KASSERT(P4_PMC_IS_STOPPED(pd->pm_cccr_msr), ("[p4,%d] cpu=%d ri=%d cccr=0x%x not stopped", __LINE__, cpu, ri, pd->pm_cccr_msr)); /* write out the low 40 bits of the saved value to hardware */ wrmsr(pd->pm_pmc_msr, P4_PCPU_PMC_VALUE(pc,ri,cpu) & P4_PERFCTR_MASK); } else if (rc == 1) { /* 2nd CPU */ /* * Stop the PMC and retrieve the CCCR and ESCR values * from their MSRs, and turn on the additional T[0/1] * bits for the 2nd CPU. */ cccrvalue = rdmsr(pd->pm_cccr_msr); wrmsr(pd->pm_cccr_msr, cccrvalue & ~P4_CCCR_ENABLE); /* check that the configuration bits read back match the PMC */ KASSERT((cccrvalue & P4_CCCR_Tx_MASK) == (pm->pm_md.pm_p4.pm_p4_cccrvalue & P4_CCCR_Tx_MASK), ("[p4,%d] Extra CCCR bits cpu=%d rc=%d ri=%d " "cccr=0x%x PMC=0x%x", __LINE__, cpu, rc, ri, cccrvalue & P4_CCCR_Tx_MASK, pm->pm_md.pm_p4.pm_p4_cccrvalue & P4_CCCR_Tx_MASK)); KASSERT(cccrvalue & P4_CCCR_ENABLE, ("[p4,%d] 2nd cpu rc=%d cpu=%d ri=%d not running", __LINE__, rc, cpu, ri)); KASSERT((cccrvalue & cccrtbits) == 0, ("[p4,%d] CCCR T0/T1 mismatch rc=%d cpu=%d ri=%d" "cccrvalue=0x%x tbits=0x%x", __LINE__, rc, cpu, ri, cccrvalue, cccrtbits)); escrvalue = rdmsr(escrmsr); KASSERT((escrvalue & P4_ESCR_Tx_MASK) == (pm->pm_md.pm_p4.pm_p4_escrvalue & P4_ESCR_Tx_MASK), ("[p4,%d] Extra ESCR bits cpu=%d rc=%d ri=%d " "escr=0x%x pm=0x%x", __LINE__, cpu, rc, ri, escrvalue & P4_ESCR_Tx_MASK, pm->pm_md.pm_p4.pm_p4_escrvalue & P4_ESCR_Tx_MASK)); KASSERT((escrvalue & escrtbits) == 0, ("[p4,%d] ESCR T0/T1 mismatch rc=%d cpu=%d ri=%d " "escrmsr=0x%x escrvalue=0x%x tbits=0x%x", __LINE__, rc, cpu, ri, escrmsr, escrvalue, escrtbits)); } /* Enable the correct bits for this CPU. */ escrvalue |= escrtbits; cccrvalue |= cccrtbits | P4_CCCR_ENABLE; /* Save HW value at the time of starting hardware */ P4_PCPU_HW_VALUE(pc,ri,cpu) = rdmsr(pd->pm_pmc_msr); /* Program the ESCR and CCCR and start the PMC */ wrmsr(escrmsr, escrvalue); wrmsr(pd->pm_cccr_msr, cccrvalue); ++rc; P4_PCPU_SET_RUNCOUNT(pc,ri,rc); mtx_unlock_spin(&pc->pc_mtx); PMCDBG(MDP,STA,2,"p4-start cpu=%d rc=%d ri=%d escr=%d " "escrmsr=0x%x escrvalue=0x%x cccr_config=0x%x v=%jx", cpu, rc, ri, pm->pm_md.pm_p4.pm_p4_escr, escrmsr, escrvalue, cccrvalue, P4_PCPU_HW_VALUE(pc,ri,cpu)); return (0); }
static int mpc7xxx_intr(int cpu, struct trapframe *tf) { int i, error, retval; uint32_t config; struct pmc *pm; struct powerpc_cpu *pac; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[powerpc,%d] out of range CPU %d", __LINE__, cpu)); PMCDBG(MDP,INT,1, "cpu=%d tf=%p um=%d", cpu, (void *) tf, TRAPF_USERMODE(tf)); retval = 0; pac = powerpc_pcpu[cpu]; config = mfspr(SPR_MMCR0) & ~SPR_MMCR0_FC; /* * look for all PMCs that have interrupted: * - look for a running, sampling PMC which has overflowed * and which has a valid 'struct pmc' association * * If found, we call a helper to process the interrupt. */ for (i = 0; i < MPC7XXX_MAX_PMCS; i++) { if ((pm = pac->pc_ppcpmcs[i].phw_pmc) == NULL || !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { continue; } if (!MPC7XXX_PMC_HAS_OVERFLOWED(i)) continue; retval = 1; /* Found an interrupting PMC. */ if (pm->pm_state != PMC_STATE_RUNNING) continue; /* Stop the counter if logging fails. */ error = pmc_process_interrupt(cpu, PMC_HR, pm, tf, TRAPF_USERMODE(tf)); if (error != 0) mpc7xxx_stop_pmc(cpu, i); /* reload count. */ mpc7xxx_write_pmc(cpu, i, pm->pm_sc.pm_reloadcount); } atomic_add_int(retval ? &pmc_stats.pm_intr_processed : &pmc_stats.pm_intr_ignored, 1); /* Re-enable PERF exceptions. */ if (retval) mtspr(SPR_MMCR0, config | SPR_MMCR0_PMXE); return (retval); }
static int p4_stop_pmc(int cpu, int ri) { int rc; uint32_t cccrvalue, cccrtbits, escrvalue, escrmsr, escrtbits; struct pmc *pm; struct p4_cpu *pc; struct p4pmc_descr *pd; pmc_value_t tmp; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[p4,%d] illegal CPU value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < P4_NPMCS, ("[p4,%d] illegal row index %d", __LINE__, ri)); pd = &p4_pmcdesc[ri]; pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)]; pm = pc->pc_p4pmcs[ri].phw_pmc; KASSERT(pm != NULL, ("[p4,%d] null pmc for cpu%d, ri%d", __LINE__, cpu, ri)); PMCDBG(MDP,STO,1, "p4-stop cpu=%d ri=%d", cpu, ri); if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) { wrmsr(pd->pm_cccr_msr, pm->pm_md.pm_p4.pm_p4_cccrvalue & ~P4_CCCR_ENABLE); return (0); } /* * Thread mode PMCs. * * On HTT machines, this PMC may be in use by two threads * running on two logical CPUS. Thus we look at the * 'runcount' field and only turn off the appropriate TO/T1 * bits (and keep the PMC running) if two logical CPUs were * using the PMC. * */ /* bits to mask */ cccrtbits = P4_CCCR_OVF_PMI_T0; escrtbits = P4_ESCR_T0_OS | P4_ESCR_T0_USR; if (P4_CPU_IS_HTT_SECONDARY(cpu)) { cccrtbits <<= 1; escrtbits >>= 2; } mtx_lock_spin(&pc->pc_mtx); rc = P4_PCPU_GET_RUNCOUNT(pc,ri); KASSERT(rc == 2 || rc == 1, ("[p4,%d] illegal runcount cpu=%d ri=%d rc=%d", __LINE__, cpu, ri, rc)); --rc; P4_PCPU_SET_RUNCOUNT(pc,ri,rc); /* Stop this PMC */ cccrvalue = rdmsr(pd->pm_cccr_msr); wrmsr(pd->pm_cccr_msr, cccrvalue & ~P4_CCCR_ENABLE); escrmsr = pm->pm_md.pm_p4.pm_p4_escrmsr; escrvalue = rdmsr(escrmsr); /* The current CPU should be running on this PMC */ KASSERT(escrvalue & escrtbits, ("[p4,%d] ESCR T0/T1 mismatch cpu=%d rc=%d ri=%d escrmsr=0x%x " "escrvalue=0x%x tbits=0x%x", __LINE__, cpu, rc, ri, escrmsr, escrvalue, escrtbits)); KASSERT(PMC_IS_COUNTING_MODE(PMC_TO_MODE(pm)) || (cccrvalue & cccrtbits), ("[p4,%d] CCCR T0/T1 mismatch cpu=%d ri=%d cccrvalue=0x%x " "tbits=0x%x", __LINE__, cpu, ri, cccrvalue, cccrtbits)); /* get the current hardware reading */ tmp = rdmsr(pd->pm_pmc_msr); if (rc == 1) { /* need to keep the PMC running */ escrvalue &= ~escrtbits; cccrvalue &= ~cccrtbits; wrmsr(escrmsr, escrvalue); wrmsr(pd->pm_cccr_msr, cccrvalue); } mtx_unlock_spin(&pc->pc_mtx); PMCDBG(MDP,STO,2, "p4-stop cpu=%d rc=%d ri=%d escrmsr=0x%x " "escrval=0x%x cccrval=0x%x v=%jx", cpu, rc, ri, escrmsr, escrvalue, cccrvalue, tmp); if (tmp < P4_PCPU_HW_VALUE(pc,ri,cpu)) /* 40 bit counter overflow */ tmp += (P4_PERFCTR_MASK + 1) - P4_PCPU_HW_VALUE(pc,ri,cpu); else tmp -= P4_PCPU_HW_VALUE(pc,ri,cpu); P4_PCPU_PMC_VALUE(pc,ri,cpu) += tmp; return 0; }
static int mips_pmc_intr(int cpu, struct trapframe *tf) { int error; int retval, ri; struct pmc *pm; struct mips_cpu *pc; uint32_t r0, r2; pmc_value_t r; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[mips,%d] CPU %d out of range", __LINE__, cpu)); retval = 0; pc = mips_pcpu[cpu]; /* Stop PMCs without clearing the counter */ r0 = mips_rd_perfcnt0(); mips_wr_perfcnt0(r0 & ~(0x1f)); r2 = mips_rd_perfcnt2(); mips_wr_perfcnt2(r2 & ~(0x1f)); for (ri = 0; ri < mips_npmcs; ri++) { pm = mips_pcpu[cpu]->pc_mipspmcs[ri].phw_pmc; if (pm == NULL) continue; if (! PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) continue; r = mips_pmcn_read(ri); /* If bit 31 is set, the counter has overflowed */ if ((r & (1UL << (mips_pmc_spec.ps_counter_width - 1))) == 0) continue; retval = 1; if (pm->pm_state != PMC_STATE_RUNNING) continue; error = pmc_process_interrupt(cpu, PMC_HR, pm, tf, TRAPF_USERMODE(tf)); if (error) { /* Clear/disable the relevant counter */ if (ri == 0) r0 = 0; else if (ri == 1) r2 = 0; mips_stop_pmc(cpu, ri); } /* Reload sampling count */ mips_write_pmc(cpu, ri, pm->pm_sc.pm_reloadcount); } /* * Re-enable the PMC counters where they left off. * * Any counter which overflowed will have its sample count * reloaded in the loop above. */ mips_wr_perfcnt0(r0); mips_wr_perfcnt2(r2); return retval; }
static int p4_allocate_pmc(int cpu, int ri, struct pmc *pm, const struct pmc_op_pmcallocate *a) { int found, n, m; uint32_t caps, cccrvalue, escrvalue, tflags; enum pmc_p4escr escr; struct p4_cpu *pc; struct p4_event_descr *pevent; const struct p4pmc_descr *pd; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[p4,%d] illegal CPU %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < P4_NPMCS, ("[p4,%d] illegal row-index value %d", __LINE__, ri)); pd = &p4_pmcdesc[ri]; PMCDBG(MDP,ALL,1, "p4-allocate ri=%d class=%d pmccaps=0x%x " "reqcaps=0x%x", ri, pd->pm_descr.pd_class, pd->pm_descr.pd_caps, pm->pm_caps); /* check class */ if (pd->pm_descr.pd_class != a->pm_class) return (EINVAL); /* check requested capabilities */ caps = a->pm_caps; if ((pd->pm_descr.pd_caps & caps) != caps) return (EPERM); /* * If the system has HTT enabled, and the desired allocation * mode is process-private, and the PMC row disposition is not * FREE (0), decline the allocation. */ if (p4_system_has_htt && PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)) && pmc_getrowdisp(ri) != 0) return (EBUSY); KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P4, ("[p4,%d] unknown PMC class %d", __LINE__, pd->pm_descr.pd_class)); if (pm->pm_event < PMC_EV_P4_FIRST || pm->pm_event > PMC_EV_P4_LAST) return (EINVAL); if ((pevent = p4_find_event(pm->pm_event)) == NULL) return (ESRCH); PMCDBG(MDP,ALL,2, "pevent={ev=%d,escrsel=0x%x,cccrsel=0x%x,isti=%d}", pevent->pm_event, pevent->pm_escr_eventselect, pevent->pm_cccr_select, pevent->pm_is_ti_event); /* * Some PMC events are 'thread independent'and therefore * cannot be used for process-private modes if HTT is being * used. */ if (P4_EVENT_IS_TI(pevent) && PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)) && p4_system_has_htt) return (EINVAL); pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)]; found = 0; /* look for a suitable ESCR for this event */ for (n = 0; n < P4_MAX_ESCR_PER_EVENT && !found; n++) { if ((escr = pevent->pm_escrs[n]) == P4_ESCR_NONE) break; /* out of ESCRs */ /* * Check ESCR row disposition. * * If the request is for a system-mode PMC, then the * ESCR row should not be in process-virtual mode, and * should also be free on the current CPU. */ if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) { if (P4_ESCR_ROW_DISP_IS_THREAD(escr) || pc->pc_escrs[escr] != P4_INVALID_PMC_INDEX) continue; } /* * If the request is for a process-virtual PMC, and if * HTT is not enabled, we can use an ESCR row that is * either FREE or already in process mode. * * If HTT is enabled, then we need to ensure that a * given ESCR is never allocated to two PMCS that * could run simultaneously on the two logical CPUs of * a CPU package. We ensure this be only allocating * ESCRs from rows marked as 'FREE'. */ if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))) { if (p4_system_has_htt) { if (!P4_ESCR_ROW_DISP_IS_FREE(escr)) continue; } else if (P4_ESCR_ROW_DISP_IS_STANDALONE(escr)) continue; } /* * We found a suitable ESCR for this event. Now check if * this escr can work with the PMC at row-index 'ri'. */ for (m = 0; m < P4_MAX_PMC_PER_ESCR; m++) if (p4_escrs[escr].pm_pmcs[m] == pd->pm_pmcnum) { found = 1; break; } } if (found == 0) return (ESRCH); KASSERT((int) escr >= 0 && escr < P4_NESCR, ("[p4,%d] illegal ESCR value %d", __LINE__, escr)); /* mark ESCR row mode */ if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) { pc->pc_escrs[escr] = ri; /* mark ESCR as in use on this cpu */ P4_ESCR_MARK_ROW_STANDALONE(escr); } else { KASSERT(pc->pc_escrs[escr] == P4_INVALID_PMC_INDEX, ("[p4,%d] escr[%d] already in use", __LINE__, escr)); P4_ESCR_MARK_ROW_THREAD(escr); } pm->pm_md.pm_p4.pm_p4_escrmsr = p4_escrs[escr].pm_escr_msr; pm->pm_md.pm_p4.pm_p4_escr = escr; cccrvalue = P4_CCCR_TO_ESCR_SELECT(pevent->pm_cccr_select); escrvalue = P4_ESCR_TO_EVENT_SELECT(pevent->pm_escr_eventselect); /* CCCR fields */ if (caps & PMC_CAP_THRESHOLD) cccrvalue |= (a->pm_md.pm_p4.pm_p4_cccrconfig & P4_CCCR_THRESHOLD_MASK) | P4_CCCR_COMPARE; if (caps & PMC_CAP_EDGE) cccrvalue |= P4_CCCR_EDGE; if (caps & PMC_CAP_INVERT) cccrvalue |= P4_CCCR_COMPLEMENT; if (p4_system_has_htt) cccrvalue |= a->pm_md.pm_p4.pm_p4_cccrconfig & P4_CCCR_ACTIVE_THREAD_MASK; else /* no HTT; thread field should be '11b' */ cccrvalue |= P4_CCCR_TO_ACTIVE_THREAD(0x3); if (caps & PMC_CAP_CASCADE) cccrvalue |= P4_CCCR_CASCADE; /* On HTT systems the PMI T0 field may get moved to T1 at pmc start */ if (caps & PMC_CAP_INTERRUPT) cccrvalue |= P4_CCCR_OVF_PMI_T0; /* ESCR fields */ if (caps & PMC_CAP_QUALIFIER) escrvalue |= a->pm_md.pm_p4.pm_p4_escrconfig & P4_ESCR_EVENT_MASK_MASK; if (caps & PMC_CAP_TAGGING) escrvalue |= (a->pm_md.pm_p4.pm_p4_escrconfig & P4_ESCR_TAG_VALUE_MASK) | P4_ESCR_TAG_ENABLE; if (caps & PMC_CAP_QUALIFIER) escrvalue |= (a->pm_md.pm_p4.pm_p4_escrconfig & P4_ESCR_EVENT_MASK_MASK); /* HTT: T0_{OS,USR} bits may get moved to T1 at pmc start */ tflags = 0; if (caps & PMC_CAP_SYSTEM) tflags |= P4_ESCR_T0_OS; if (caps & PMC_CAP_USER) tflags |= P4_ESCR_T0_USR; if (tflags == 0) tflags = (P4_ESCR_T0_OS|P4_ESCR_T0_USR); escrvalue |= tflags; pm->pm_md.pm_p4.pm_p4_cccrvalue = cccrvalue; pm->pm_md.pm_p4.pm_p4_escrvalue = escrvalue; PMCDBG(MDP,ALL,2, "p4-allocate cccrsel=0x%x cccrval=0x%x " "escr=%d escrmsr=0x%x escrval=0x%x", pevent->pm_cccr_select, cccrvalue, escr, pm->pm_md.pm_p4.pm_p4_escrmsr, escrvalue); return (0); }
static int p4_intr(int cpu, struct trapframe *tf) { uint32_t cccrval, ovf_mask, ovf_partner; int did_interrupt, error, ri; struct p4_cpu *pc; struct pmc *pm; pmc_value_t v; PMCDBG(MDP,INT, 1, "cpu=%d tf=0x%p um=%d", cpu, (void *) tf, TRAPF_USERMODE(tf)); pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)]; ovf_mask = P4_CPU_IS_HTT_SECONDARY(cpu) ? P4_CCCR_OVF_PMI_T1 : P4_CCCR_OVF_PMI_T0; ovf_mask |= P4_CCCR_OVF; if (p4_system_has_htt) ovf_partner = P4_CPU_IS_HTT_SECONDARY(cpu) ? P4_CCCR_OVF_PMI_T0 : P4_CCCR_OVF_PMI_T1; else ovf_partner = 0; did_interrupt = 0; if (p4_system_has_htt) P4_PCPU_ACQ_INTR_SPINLOCK(pc); /* * Loop through all CCCRs, looking for ones that have * interrupted this CPU. */ for (ri = 0; ri < P4_NPMCS; ri++) { /* * Check if our partner logical CPU has already marked * this PMC has having interrupted it. If so, reset * the flag and process the interrupt, but leave the * hardware alone. */ if (p4_system_has_htt && P4_PCPU_GET_INTRFLAG(pc,ri)) { P4_PCPU_SET_INTRFLAG(pc,ri,0); did_interrupt = 1; /* * Ignore de-configured or stopped PMCs. * Ignore PMCs not in sampling mode. */ pm = pc->pc_p4pmcs[ri].phw_pmc; if (pm == NULL || pm->pm_state != PMC_STATE_RUNNING || !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { continue; } (void) pmc_process_interrupt(cpu, PMC_HR, pm, tf, TRAPF_USERMODE(tf)); continue; } /* * Fresh interrupt. Look for the CCCR_OVF bit * and the OVF_Tx bit for this logical * processor being set. */ cccrval = rdmsr(P4_CCCR_MSR_FIRST + ri); if ((cccrval & ovf_mask) != ovf_mask) continue; /* * If the other logical CPU would also have been * interrupted due to the PMC being shared, record * this fact in the per-cpu saved interrupt flag * bitmask. */ if (p4_system_has_htt && (cccrval & ovf_partner)) P4_PCPU_SET_INTRFLAG(pc, ri, 1); v = rdmsr(P4_PERFCTR_MSR_FIRST + ri); PMCDBG(MDP,INT, 2, "ri=%d v=%jx", ri, v); /* Stop the counter, and reset the overflow bit */ cccrval &= ~(P4_CCCR_OVF | P4_CCCR_ENABLE); wrmsr(P4_CCCR_MSR_FIRST + ri, cccrval); did_interrupt = 1; /* * Ignore de-configured or stopped PMCs. Ignore PMCs * not in sampling mode. */ pm = pc->pc_p4pmcs[ri].phw_pmc; if (pm == NULL || pm->pm_state != PMC_STATE_RUNNING || !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { continue; } /* * Process the interrupt. Re-enable the PMC if * processing was successful. */ error = pmc_process_interrupt(cpu, PMC_HR, pm, tf, TRAPF_USERMODE(tf)); /* * Only the first processor executing the NMI handler * in a HTT pair will restart a PMC, and that too * only if there were no errors. */ v = P4_RELOAD_COUNT_TO_PERFCTR_VALUE( pm->pm_sc.pm_reloadcount); wrmsr(P4_PERFCTR_MSR_FIRST + ri, v); if (error == 0) wrmsr(P4_CCCR_MSR_FIRST + ri, cccrval | P4_CCCR_ENABLE); } /* allow the other CPU to proceed */ if (p4_system_has_htt) P4_PCPU_REL_INTR_SPINLOCK(pc); /* * On Intel P4 CPUs, the PMC 'pcint' entry in the LAPIC gets * masked when a PMC interrupts the CPU. We need to unmask * the interrupt source explicitly. */ if (did_interrupt) lapic_reenable_pmc(); atomic_add_int(did_interrupt ? &pmc_stats.pm_intr_processed : &pmc_stats.pm_intr_ignored, 1); return (did_interrupt); }