static int ucf_allocate_pmc(int cpu, int ri, struct pmc *pm, const struct pmc_op_pmcallocate *a) { enum pmc_event ev; uint32_t caps, flags; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[uncore,%d] illegal CPU %d", __LINE__, cpu)); PMCDBG(MDP,ALL,1, "ucf-allocate ri=%d reqcaps=0x%x", ri, pm->pm_caps); if (ri < 0 || ri > uncore_ucf_npmc) return (EINVAL); caps = a->pm_caps; if (a->pm_class != PMC_CLASS_UCF || (caps & UCF_PMC_CAPS) != caps) return (EINVAL); ev = pm->pm_event; if (ev < PMC_EV_UCF_FIRST || ev > PMC_EV_UCF_LAST) return (EINVAL); flags = UCF_EN; pm->pm_md.pm_ucf.pm_ucf_ctrl = (flags << (ri * 4)); PMCDBG(MDP,ALL,2, "ucf-allocate config=0x%jx", (uintmax_t) pm->pm_md.pm_ucf.pm_ucf_ctrl); return (0); }
static int ucf_start_pmc(int cpu, int ri) { struct pmc *pm; struct uncore_cpu *ucfc; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[uncore,%d] illegal CPU value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < uncore_ucf_npmc, ("[uncore,%d] illegal row-index %d", __LINE__, ri)); PMCDBG(MDP,STA,1,"ucf-start cpu=%d ri=%d", cpu, ri); ucfc = uncore_pcpu[cpu]; pm = ucfc->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc; ucfc->pc_ucfctrl |= pm->pm_md.pm_ucf.pm_ucf_ctrl; wrmsr(UCF_CTRL, ucfc->pc_ucfctrl); do { ucfc->pc_resync = 0; ucfc->pc_globalctrl |= (1ULL << (ri + UCF_OFFSET)); wrmsr(UC_GLOBAL_CTRL, ucfc->pc_globalctrl); } while (ucfc->pc_resync != 0); PMCDBG(MDP,STA,1,"ucfctrl=%x(%x) globalctrl=%jx(%jx)", ucfc->pc_ucfctrl, (uint32_t) rdmsr(UCF_CTRL), ucfc->pc_globalctrl, rdmsr(UC_GLOBAL_CTRL)); return (0); }
static int ucp_start_pmc(int cpu, int ri) { struct pmc *pm; uint32_t evsel; struct uncore_cpu *cc; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[uncore,%d] illegal CPU value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < uncore_ucp_npmc, ("[uncore,%d] illegal row-index %d", __LINE__, ri)); cc = uncore_pcpu[cpu]; pm = cc->pc_uncorepmcs[ri].phw_pmc; KASSERT(pm, ("[uncore,%d] starting cpu%d,ri%d with no pmc configured", __LINE__, cpu, ri)); PMCDBG(MDP,STA,1, "ucp-start cpu=%d ri=%d", cpu, ri); evsel = pm->pm_md.pm_ucp.pm_ucp_evsel; PMCDBG(MDP,STA,2, "ucp-start/2 cpu=%d ri=%d evselmsr=0x%x evsel=0x%x", cpu, ri, SELECTSEL(uncore_cputype) + ri, evsel); /* Event specific configuration. */ switch (pm->pm_event) { case PMC_EV_UCP_EVENT_0CH_04H_E: case PMC_EV_UCP_EVENT_0CH_08H_E: wrmsr(MSR_GQ_SNOOP_MESF,0x2); break; case PMC_EV_UCP_EVENT_0CH_04H_F: case PMC_EV_UCP_EVENT_0CH_08H_F: wrmsr(MSR_GQ_SNOOP_MESF,0x8); break; case PMC_EV_UCP_EVENT_0CH_04H_M: case PMC_EV_UCP_EVENT_0CH_08H_M: wrmsr(MSR_GQ_SNOOP_MESF,0x1); break; case PMC_EV_UCP_EVENT_0CH_04H_S: case PMC_EV_UCP_EVENT_0CH_08H_S: wrmsr(MSR_GQ_SNOOP_MESF,0x4); break; default: break; } wrmsr(SELECTSEL(uncore_cputype) + ri, evsel); do { cc->pc_resync = 0; cc->pc_globalctrl |= (1ULL << ri); wrmsr(UC_GLOBAL_CTRL, cc->pc_globalctrl); } while (cc->pc_resync != 0); return (0); }
static int p4_read_pmc(int cpu, int ri, pmc_value_t *v) { struct pmc *pm; pmc_value_t tmp; struct p4_cpu *pc; enum pmc_mode mode; struct p4pmc_descr *pd; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[p4,%d] illegal CPU value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < P4_NPMCS, ("[p4,%d] illegal row-index %d", __LINE__, ri)); pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)]; pm = pc->pc_p4pmcs[ri].phw_pmc; pd = &p4_pmcdesc[ri]; KASSERT(pm != NULL, ("[p4,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__, cpu, ri)); KASSERT(pd->pm_descr.pd_class == PMC_TO_CLASS(pm), ("[p4,%d] class mismatch pd %d != id class %d", __LINE__, pd->pm_descr.pd_class, PMC_TO_CLASS(pm))); mode = PMC_TO_MODE(pm); PMCDBG(MDP,REA,1, "p4-read cpu=%d ri=%d mode=%d", cpu, ri, mode); KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P4, ("[p4,%d] unknown PMC class %d", __LINE__, pd->pm_descr.pd_class)); tmp = rdmsr(p4_pmcdesc[ri].pm_pmc_msr); if (PMC_IS_VIRTUAL_MODE(mode)) { if (tmp < P4_PCPU_HW_VALUE(pc,ri,cpu)) /* 40 bit overflow */ tmp += (P4_PERFCTR_MASK + 1) - P4_PCPU_HW_VALUE(pc,ri,cpu); else tmp -= P4_PCPU_HW_VALUE(pc,ri,cpu); tmp += P4_PCPU_PMC_VALUE(pc,ri,cpu); } if (PMC_IS_SAMPLING_MODE(mode)) /* undo transformation */ *v = P4_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp); else *v = tmp; PMCDBG(MDP,REA,2, "p4-read -> %jx", *v); return (0); }
static int p4_release_pmc(int cpu, int ri, struct pmc *pm) { enum pmc_p4escr escr; struct p4_cpu *pc; KASSERT(ri >= 0 && ri < P4_NPMCS, ("[p4,%d] illegal row-index %d", __LINE__, ri)); escr = pm->pm_md.pm_p4.pm_p4_escr; PMCDBG(MDP,REL,1, "p4-release cpu=%d ri=%d escr=%d", cpu, ri, escr); if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) { pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)]; KASSERT(pc->pc_p4pmcs[ri].phw_pmc == NULL, ("[p4,%d] releasing configured PMC ri=%d", __LINE__, ri)); P4_ESCR_UNMARK_ROW_STANDALONE(escr); KASSERT(pc->pc_escrs[escr] == ri, ("[p4,%d] escr[%d] not allocated to ri %d", __LINE__, escr, ri)); pc->pc_escrs[escr] = P4_INVALID_PMC_INDEX; /* mark as free */ } else P4_ESCR_UNMARK_ROW_THREAD(escr); return (0); }
static int ucf_write_pmc(int cpu, int ri, pmc_value_t v) { struct uncore_cpu *cc; struct pmc *pm; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[uncore,%d] illegal cpu value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < uncore_ucf_npmc, ("[uncore,%d] illegal row-index %d", __LINE__, ri)); cc = uncore_pcpu[cpu]; pm = cc->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc; KASSERT(pm, ("[uncore,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri)); if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) v = ucf_reload_count_to_perfctr_value(v); wrmsr(UCF_CTRL, 0); /* Turn off fixed counters */ wrmsr(UCF_CTR0 + ri, v); wrmsr(UCF_CTRL, cc->pc_ucfctrl); PMCDBG(MDP,WRI,1, "ucf-write cpu=%d ri=%d v=%jx ucfctrl=%jx ", cpu, ri, v, (uintmax_t) rdmsr(UCF_CTRL)); return (0); }
static int mpc7xxx_pcpu_init(struct pmc_mdep *md, int cpu) { int first_ri, i; struct pmc_cpu *pc; struct powerpc_cpu *pac; struct pmc_hw *phw; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[powerpc,%d] wrong cpu number %d", __LINE__, cpu)); PMCDBG(MDP,INI,1,"powerpc-init cpu=%d", cpu); powerpc_pcpu[cpu] = pac = malloc(sizeof(struct powerpc_cpu), M_PMC, M_WAITOK|M_ZERO); pac->pc_ppcpmcs = malloc(sizeof(struct pmc_hw) * MPC7XXX_MAX_PMCS, M_PMC, M_WAITOK|M_ZERO); pac->pc_class = PMC_CLASS_PPC7450; pc = pmc_pcpu[cpu]; first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_PPC7450].pcd_ri; KASSERT(pc != NULL, ("[powerpc,%d] NULL per-cpu pointer", __LINE__)); for (i = 0, phw = pac->pc_ppcpmcs; i < MPC7XXX_MAX_PMCS; i++, phw++) { phw->phw_state = PMC_PHW_FLAG_IS_ENABLED | PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i); phw->phw_pmc = NULL; pc->pc_hwpmcs[i + first_ri] = phw; } /* Clear the MMCRs, and set FC, to disable all PMCs. */ mtspr(SPR_MMCR0, SPR_MMCR0_FC | SPR_MMCR0_PMXE | SPR_MMCR0_FCECE | SPR_MMCR0_PMC1CE | SPR_MMCR0_PMCNCE); mtspr(SPR_MMCR1, 0); return 0; }
static int ucp_read_pmc(int cpu, int ri, pmc_value_t *v) { struct pmc *pm; pmc_value_t tmp; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[uncore,%d] illegal cpu value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < uncore_ucp_npmc, ("[uncore,%d] illegal row-index %d", __LINE__, ri)); pm = uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc; KASSERT(pm, ("[uncore,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri)); tmp = rdmsr(UCP_PMC0 + ri); if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) *v = ucp_perfctr_value_to_reload_count(tmp); else *v = tmp; PMCDBG(MDP,REA,1, "ucp-read cpu=%d ri=%d msr=0x%x -> v=%jx", cpu, ri, ri, *v); return (0); }
static int ucp_write_pmc(int cpu, int ri, pmc_value_t v) { struct pmc *pm; struct uncore_cpu *cc; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[uncore,%d] illegal cpu value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < uncore_ucp_npmc, ("[uncore,%d] illegal row index %d", __LINE__, ri)); cc = uncore_pcpu[cpu]; pm = cc->pc_uncorepmcs[ri].phw_pmc; KASSERT(pm, ("[uncore,%d] cpu%d ri%d no configured PMC to stop", __LINE__, cpu, ri)); PMCDBG(MDP,WRI,1, "ucp-write cpu=%d ri=%d msr=0x%x v=%jx", cpu, ri, UCP_PMC0 + ri, v); if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) v = ucp_reload_count_to_perfctr_value(v); /* * Write the new value to the counter. The counter will be in * a stopped state when the pcd_write() entry point is called. */ wrmsr(UCP_PMC0 + ri, v); return (0); }
void pmclog_process_pmcallocate(struct pmc *pm) { struct pmc_owner *po; struct pmc_soft *ps; po = pm->pm_owner; PMCDBG(LOG,ALL,1, "pm=%p", pm); if (PMC_TO_CLASS(pm) == PMC_CLASS_SOFT) { PMCLOG_RESERVE(po, PMCALLOCATEDYN, sizeof(struct pmclog_pmcallocatedyn)); PMCLOG_EMIT32(pm->pm_id); PMCLOG_EMIT32(pm->pm_event); PMCLOG_EMIT32(pm->pm_flags); ps = pmc_soft_ev_acquire(pm->pm_event); if (ps != NULL) PMCLOG_EMITSTRING(ps->ps_ev.pm_ev_name,PMC_NAME_MAX); else PMCLOG_EMITNULLSTRING(PMC_NAME_MAX); pmc_soft_ev_release(ps); PMCLOG_DESPATCH(po); } else { PMCLOG_RESERVE(po, PMCALLOCATE, sizeof(struct pmclog_pmcallocate)); PMCLOG_EMIT32(pm->pm_id); PMCLOG_EMIT32(pm->pm_event); PMCLOG_EMIT32(pm->pm_flags); PMCLOG_DESPATCH(po); } }
int pmc_uncore_initialize(struct pmc_mdep *md, int maxcpu) { uncore_cputype = md->pmd_cputype; uncore_pmcmask = 0; /* * Initialize programmable counters. */ uncore_ucp_npmc = 8; uncore_ucp_width = 48; uncore_pmcmask |= ((1ULL << uncore_ucp_npmc) - 1); ucp_initialize(md, maxcpu, uncore_ucp_npmc, uncore_ucp_width); /* * Initialize fixed function counters, if present. */ uncore_ucf_ri = uncore_ucp_npmc; uncore_ucf_npmc = 1; uncore_ucf_width = 48; ucf_initialize(md, maxcpu, uncore_ucf_npmc, uncore_ucf_width); uncore_pmcmask |= ((1ULL << uncore_ucf_npmc) - 1) << SELECTOFF(uncore_cputype); PMCDBG(MDP,INI,1,"uncore-init pmcmask=0x%jx ucfri=%d", uncore_pmcmask, uncore_ucf_ri); uncore_pcpu = malloc(sizeof(*uncore_pcpu) * maxcpu, M_PMC, M_ZERO | M_WAITOK); return (0); }
uint32_t mips_get_perfctl(int cpu, int ri, uint32_t event, uint32_t caps) { cvmx_core_perf_control_t control; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[mips,%d] illegal CPU value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < mips_npmcs, ("[mips,%d] illegal row index %d", __LINE__, ri)); control.s.event = event; if (caps & PMC_CAP_SYSTEM) { control.s.k = 1; control.s.s = 1; control.s.ex = 1; } if (caps & PMC_CAP_USER) control.s.u = 1; if ((caps & (PMC_CAP_USER | PMC_CAP_SYSTEM)) == 0) { control.s.k = 1; control.s.s = 1; control.s.u = 1; control.s.ex = 1; } if (caps & PMC_CAP_INTERRUPT) control.s.ie = 1; PMCDBG(MDP,ALL,2,"mips-allocate ri=%d -> config=0x%x", ri, control.u32); return (control.u32); }
int pmclog_close(struct pmc_owner *po) { PMCDBG(LOG,CLO,1, "po=%p", po); mtx_lock(&pmc_kthread_mtx); /* * Schedule the current buffer. */ mtx_lock_spin(&po->po_mtx); if (po->po_curbuf) pmclog_schedule_io(po); else wakeup_one(po); mtx_unlock_spin(&po->po_mtx); /* * Initiate shutdown: no new data queued, * thread will close file on last block. */ po->po_flags |= PMC_PO_SHUTDOWN; mtx_unlock(&pmc_kthread_mtx); return (0); }
static int pmclog_get_buffer(struct pmc_owner *po) { struct pmclog_buffer *plb; mtx_assert(&po->po_mtx, MA_OWNED); KASSERT(po->po_curbuf == NULL, ("[pmclog,%d] po=%p current buffer still valid", __LINE__, po)); mtx_lock_spin(&pmc_bufferlist_mtx); if ((plb = TAILQ_FIRST(&pmc_bufferlist)) != NULL) TAILQ_REMOVE(&pmc_bufferlist, plb, plb_next); mtx_unlock_spin(&pmc_bufferlist_mtx); PMCDBG(LOG,GTB,1, "po=%p plb=%p", po, plb); #ifdef DEBUG if (plb) KASSERT(plb->plb_ptr == plb->plb_base && plb->plb_base < plb->plb_fence, ("[pmclog,%d] po=%p buffer invariants: ptr=%p " "base=%p fence=%p", __LINE__, po, plb->plb_ptr, plb->plb_base, plb->plb_fence)); #endif po->po_curbuf = plb; /* update stats */ atomic_add_int(&pmc_stats.pm_buffer_requests, 1); if (plb == NULL) atomic_add_int(&pmc_stats.pm_buffer_requests_failed, 1); return (plb ? 0 : ENOMEM); }
static void pmclog_schedule_io(struct pmc_owner *po) { KASSERT(po->po_curbuf != NULL, ("[pmclog,%d] schedule_io with null buffer po=%p", __LINE__, po)); KASSERT(po->po_curbuf->plb_ptr >= po->po_curbuf->plb_base, ("[pmclog,%d] buffer invariants po=%p ptr=%p base=%p", __LINE__, po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_base)); KASSERT(po->po_curbuf->plb_ptr <= po->po_curbuf->plb_fence, ("[pmclog,%d] buffer invariants po=%p ptr=%p fenc=%p", __LINE__, po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_fence)); PMCDBG(LOG,SIO, 1, "po=%p", po); mtx_assert(&po->po_mtx, MA_OWNED); /* * Add the current buffer to the tail of the buffer list and * wakeup the helper. */ TAILQ_INSERT_TAIL(&po->po_logbuffers, po->po_curbuf, plb_next); po->po_curbuf = NULL; wakeup_one(po); }
static void ucf_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth) { struct pmc_classdep *pcd; KASSERT(md != NULL, ("[ucf,%d] md is NULL", __LINE__)); PMCDBG(MDP,INI,1, "%s", "ucf-initialize"); pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF]; pcd->pcd_caps = UCF_PMC_CAPS; pcd->pcd_class = PMC_CLASS_UCF; pcd->pcd_num = npmc; pcd->pcd_ri = md->pmd_npmc; pcd->pcd_width = pmcwidth; pcd->pcd_allocate_pmc = ucf_allocate_pmc; pcd->pcd_config_pmc = ucf_config_pmc; pcd->pcd_describe = ucf_describe; pcd->pcd_get_config = ucf_get_config; pcd->pcd_get_msr = NULL; pcd->pcd_pcpu_fini = uncore_pcpu_noop; pcd->pcd_pcpu_init = uncore_pcpu_noop; pcd->pcd_read_pmc = ucf_read_pmc; pcd->pcd_release_pmc = ucf_release_pmc; pcd->pcd_start_pmc = ucf_start_pmc; pcd->pcd_stop_pmc = ucf_stop_pmc; pcd->pcd_write_pmc = ucf_write_pmc; md->pmd_npmc += npmc; }
static int mpc7xxx_read_pmc(int cpu, int ri, pmc_value_t *v) { struct pmc *pm; pmc_value_t tmp; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < MPC7XXX_MAX_PMCS, ("[powerpc,%d] illegal row index %d", __LINE__, ri)); pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc; KASSERT(pm, ("[core,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri)); tmp = mpc7xxx_pmcn_read(ri); PMCDBG(MDP,REA,2,"ppc-read id=%d -> %jd", ri, tmp); if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) *v = POWERPC_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp); else *v = tmp; return 0; }
static int ucp_stop_pmc(int cpu, int ri) { struct pmc *pm; struct uncore_cpu *cc; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[uncore,%d] illegal cpu value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < uncore_ucp_npmc, ("[uncore,%d] illegal row index %d", __LINE__, ri)); cc = uncore_pcpu[cpu]; pm = cc->pc_uncorepmcs[ri].phw_pmc; KASSERT(pm, ("[uncore,%d] cpu%d ri%d no configured PMC to stop", __LINE__, cpu, ri)); PMCDBG(MDP,STO,1, "ucp-stop cpu=%d ri=%d", cpu, ri); /* stop hw. */ wrmsr(SELECTSEL(uncore_cputype) + ri, 0); do { cc->pc_resync = 0; cc->pc_globalctrl &= ~(1ULL << ri); wrmsr(UC_GLOBAL_CTRL, cc->pc_globalctrl); } while (cc->pc_resync != 0); return (0); }
static int p4_pcpu_fini(struct pmc_mdep *md, int cpu) { int first_ri, i; struct p4_cpu *p4c; struct pmc_cpu *pc; PMCDBG(MDP,INI,0, "p4-cleanup cpu=%d", cpu); pc = pmc_pcpu[cpu]; first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_P4].pcd_ri; for (i = 0; i < P4_NPMCS; i++) pc->pc_hwpmcs[i + first_ri] = NULL; if (!pmc_cpu_is_primary(cpu) && (cpu & 1)) return (0); p4c = p4_pcpu[cpu]; KASSERT(p4c != NULL, ("[p4,%d] NULL pcpu", __LINE__)); /* Turn off all PMCs on this CPU */ for (i = 0; i < P4_NPMCS - 1; i++) wrmsr(P4_CCCR_MSR_FIRST + i, rdmsr(P4_CCCR_MSR_FIRST + i) & ~P4_CCCR_ENABLE); mtx_destroy(&p4c->pc_mtx); free(p4c, M_PMC); p4_pcpu[cpu] = NULL; return (0); }
struct pmc_mdep * pmc_mips24k_initialize() { struct pmc_mdep *pmc_mdep; struct pmc_classdep *pcd; /* * Read the counter control registers from CP0 * to determine the number of available PMCs. * The control registers use bit 31 as a "more" bit. * * XXX: With the current macros it is hard to read the * CP0 registers in any varied way. */ mips24k_npmcs = 2; PMCDBG(MDP,INI,1,"mips-init npmcs=%d", mips24k_npmcs); /* * Allocate space for pointers to PMC HW descriptors and for * the MDEP structure used by MI code. */ mips24k_pcpu = malloc(sizeof(struct mips24k_cpu *) * pmc_cpu_max(), M_PMC, M_WAITOK|M_ZERO); /* Just one class */ pmc_mdep = malloc(sizeof(struct pmc_mdep) + sizeof(struct pmc_classdep), M_PMC, M_WAITOK|M_ZERO); pmc_mdep->pmd_cputype = PMC_CPU_MIPS_24K; pmc_mdep->pmd_nclass = 1; pcd = &pmc_mdep->pmd_classdep[PMC_MDEP_CLASS_INDEX_MIPS24K]; pcd->pcd_caps = MIPS24K_PMC_CAPS; pcd->pcd_class = PMC_CLASS_MIPS24K; pcd->pcd_num = mips24k_npmcs; pcd->pcd_ri = pmc_mdep->pmd_npmc; pcd->pcd_width = 32; /* XXX: Fix for 64 bit MIPS */ pcd->pcd_allocate_pmc = mips24k_allocate_pmc; pcd->pcd_config_pmc = mips24k_config_pmc; pcd->pcd_pcpu_fini = mips24k_pcpu_fini; pcd->pcd_pcpu_init = mips24k_pcpu_init; pcd->pcd_describe = mips24k_describe; pcd->pcd_get_config = mips24k_get_config; pcd->pcd_read_pmc = mips24k_read_pmc; pcd->pcd_release_pmc = mips24k_release_pmc; pcd->pcd_start_pmc = mips24k_start_pmc; pcd->pcd_stop_pmc = mips24k_stop_pmc; pcd->pcd_write_pmc = mips24k_write_pmc; pmc_mdep->pmd_intr = mips24k_intr; pmc_mdep->pmd_switch_in = mips24k_switch_in; pmc_mdep->pmd_switch_out = mips24k_switch_out; pmc_mdep->pmd_npmc += mips24k_npmcs; return (pmc_mdep); }
void pmc_uncore_finalize(struct pmc_mdep *md) { PMCDBG(MDP,INI,1, "%s", "uncore-finalize"); free(uncore_pcpu, M_PMC); uncore_pcpu = NULL; }
struct pmc_mdep * pmc_mips_initialize() { struct pmc_mdep *pmc_mdep; struct pmc_classdep *pcd; /* * TODO: Use More bit of PerfCntlX register to detect actual * number of counters */ mips_npmcs = 2; PMCDBG(MDP,INI,1,"mips-init npmcs=%d", mips_npmcs); /* * Allocate space for pointers to PMC HW descriptors and for * the MDEP structure used by MI code. */ mips_pcpu = malloc(sizeof(struct mips_cpu *) * pmc_cpu_max(), M_PMC, M_WAITOK|M_ZERO); /* Just one class */ pmc_mdep = malloc(sizeof(struct pmc_mdep) + sizeof(struct pmc_classdep), M_PMC, M_WAITOK|M_ZERO); pmc_mdep->pmd_cputype = mips_pmc_spec.ps_cputype; pmc_mdep->pmd_nclass = 1; pcd = &pmc_mdep->pmd_classdep[PMC_MDEP_CLASS_INDEX_MIPS]; pcd->pcd_caps = mips_pmc_spec.ps_capabilities; pcd->pcd_class = mips_pmc_spec.ps_cpuclass; pcd->pcd_num = mips_npmcs; pcd->pcd_ri = pmc_mdep->pmd_npmc; pcd->pcd_width = mips_pmc_spec.ps_counter_width; pcd->pcd_allocate_pmc = mips_allocate_pmc; pcd->pcd_config_pmc = mips_config_pmc; pcd->pcd_pcpu_fini = mips_pcpu_fini; pcd->pcd_pcpu_init = mips_pcpu_init; pcd->pcd_describe = mips_describe; pcd->pcd_get_config = mips_get_config; pcd->pcd_read_pmc = mips_read_pmc; pcd->pcd_release_pmc = mips_release_pmc; pcd->pcd_start_pmc = mips_start_pmc; pcd->pcd_stop_pmc = mips_stop_pmc; pcd->pcd_write_pmc = mips_write_pmc; pmc_mdep->pmd_intr = mips_pmc_intr; pmc_mdep->pmd_switch_in = mips_pmc_switch_in; pmc_mdep->pmd_switch_out = mips_pmc_switch_out; pmc_mdep->pmd_npmc += mips_npmcs; return (pmc_mdep); }
static int p4_get_msr(int ri, uint32_t *msr) { KASSERT(ri >= 0 && ri < P4_NPMCS, ("[p4,%d] ri %d out of range", __LINE__, ri)); *msr = p4_pmcdesc[ri].pm_pmc_msr - P4_PERFCTR_MSR_FIRST; PMCDBG(MDP,OPS, 1, "ri=%d getmsr=0x%x", ri, *msr); return 0; }
void pmclog_process_pmcdetach(struct pmc *pm, pid_t pid) { struct pmc_owner *po; PMCDBG(LOG,ATT,1,"!pm=%p pid=%d", pm, pid); po = pm->pm_owner; PMCLOG_RESERVE(po, PMCDETACH, sizeof(struct pmclog_pmcdetach)); PMCLOG_EMIT32(pm->pm_id); PMCLOG_EMIT32(pid); PMCLOG_DESPATCH(po); }
static int ucp_start_pmc(int cpu, int ri) { struct pmc *pm; uint32_t evsel; struct uncore_cpu *cc; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[uncore,%d] illegal CPU value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < uncore_ucp_npmc, ("[uncore,%d] illegal row-index %d", __LINE__, ri)); cc = uncore_pcpu[cpu]; pm = cc->pc_uncorepmcs[ri].phw_pmc; KASSERT(pm, ("[uncore,%d] starting cpu%d,ri%d with no pmc configured", __LINE__, cpu, ri)); PMCDBG(MDP,STA,1, "ucp-start cpu=%d ri=%d", cpu, ri); evsel = pm->pm_md.pm_ucp.pm_ucp_evsel; PMCDBG(MDP,STA,2, "ucp-start/2 cpu=%d ri=%d evselmsr=0x%x evsel=0x%x", cpu, ri, UCP_EVSEL0 + ri, evsel); wrmsr(UCP_EVSEL0 + ri, evsel); do { cc->pc_resync = 0; cc->pc_globalctrl |= (1ULL << ri); wrmsr(UC_GLOBAL_CTRL, cc->pc_globalctrl); } while (cc->pc_resync != 0); return (0); }
static int ucf_release_pmc(int cpu, int ri, struct pmc *pmc) { PMCDBG(MDP,REL,1, "ucf-release cpu=%d ri=%d pm=%p", cpu, ri, pmc); KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[uncore,%d] illegal CPU value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < uncore_ucf_npmc, ("[uncore,%d] illegal row-index %d", __LINE__, ri)); KASSERT(uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc == NULL, ("[uncore,%d] PHW pmc non-NULL", __LINE__)); return (0); }
int pmclog_deconfigure_log(struct pmc_owner *po) { int error; struct pmclog_buffer *lb; PMCDBG(LOG,CFG,1, "de-config po=%p", po); if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) return (EINVAL); KASSERT(po->po_sscount == 0, ("[pmclog,%d] po=%p still owning SS PMCs", __LINE__, po)); KASSERT(po->po_file != NULL, ("[pmclog,%d] po=%p no log file", __LINE__, po)); /* stop the kthread, this will reset the 'OWNS_LOGFILE' flag */ pmclog_stop_kthread(po); KASSERT(po->po_kthread == NULL, ("[pmclog,%d] po=%p kthread not stopped", __LINE__, po)); /* return all queued log buffers to the global pool */ while ((lb = TAILQ_FIRST(&po->po_logbuffers)) != NULL) { TAILQ_REMOVE(&po->po_logbuffers, lb, plb_next); PMCLOG_INIT_BUFFER_DESCRIPTOR(lb); mtx_lock_spin(&pmc_bufferlist_mtx); TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next); mtx_unlock_spin(&pmc_bufferlist_mtx); } /* return the 'current' buffer to the global pool */ if ((lb = po->po_curbuf) != NULL) { PMCLOG_INIT_BUFFER_DESCRIPTOR(lb); mtx_lock_spin(&pmc_bufferlist_mtx); TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next); mtx_unlock_spin(&pmc_bufferlist_mtx); } /* drop a reference to the fd */ error = fdrop(po->po_file, curthread); po->po_file = NULL; po->po_error = 0; return (error); }
static int mips24k_allocate_pmc(int cpu, int ri, struct pmc *pm, const struct pmc_op_pmcallocate *a) { enum pmc_event pe; uint32_t caps, config, counter; int i; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[mips,%d] illegal CPU value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < mips24k_npmcs, ("[mips,%d] illegal row index %d", __LINE__, ri)); caps = a->pm_caps; if (a->pm_class != PMC_CLASS_MIPS24K) return (EINVAL); pe = a->pm_ev; for (i = 0; i < mips24k_event_codes_size; i++) { if (mips24k_event_codes[i].pe_ev == pe) { config = mips24k_event_codes[i].pe_code; counter = mips24k_event_codes[i].pe_counter; break; } } if (i == mips24k_event_codes_size) return (EINVAL); if ((counter != MIPS24K_ALL) && (counter != ri)) return (EINVAL); config <<= MIPS24K_PMC_SELECT; if (caps & PMC_CAP_SYSTEM) config |= (MIPS24K_PMC_SUPER_ENABLE | MIPS24K_PMC_KERNEL_ENABLE); if (caps & PMC_CAP_USER) config |= MIPS24K_PMC_USER_ENABLE; if ((caps & (PMC_CAP_USER | PMC_CAP_SYSTEM)) == 0) config |= MIPS24K_PMC_ENABLE; pm->pm_md.pm_mips24k.pm_mips24k_evsel = config; PMCDBG(MDP,ALL,2,"mips-allocate ri=%d -> config=0x%x", ri, config); return 0; }
int pmclog_process_userlog(struct pmc_owner *po, struct pmc_op_writelog *wl) { int error; PMCDBG(LOG,WRI,1, "writelog po=%p ud=0x%x", po, wl->pm_userdata); error = 0; PMCLOG_RESERVE_WITH_ERROR(po, USERDATA, sizeof(struct pmclog_userdata)); PMCLOG_EMIT32(wl->pm_userdata); PMCLOG_DESPATCH(po); error: return (error); }
static int ucp_config_pmc(int cpu, int ri, struct pmc *pm) { KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[uncore,%d] illegal CPU %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < uncore_ucp_npmc, ("[uncore,%d] illegal row-index %d", __LINE__, ri)); PMCDBG(MDP,CFG,1, "ucp-config cpu=%d ri=%d pm=%p", cpu, ri, pm); KASSERT(uncore_pcpu[cpu] != NULL, ("[uncore,%d] null per-cpu %d", __LINE__, cpu)); uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc = pm; return (0); }