static void stop_pmu(void * param) { u32 pmcr_value; /* disable all counters */ pmcr_value = A9_Read_PMCR(); pmcr_value &= ~0x1; A9_Write_PMCR(pmcr_value); A9_Write_CNTENCLR(0xffffffff); /* clear overflow flags */ A9_Write_OVSR(0xffffffff); /* disable all interrupts */ A9_Write_INTENCLR(0xffffffff); /* We need to save the PMU counter value for calibration result before calling free_pmu() * because free_pmu() may cause these registers be modified by IPM */ reg_value[COUNTER_A9_PMU_CCNT] += A9_ReadPMUCounter(COUNTER_A9_PMU_CCNT); reg_value[COUNTER_A9_PMU_PMN0] += A9_ReadPMUCounter(COUNTER_A9_PMU_PMN0); reg_value[COUNTER_A9_PMU_PMN1] += A9_ReadPMUCounter(COUNTER_A9_PMU_PMN1); reg_value[COUNTER_A9_PMU_PMN2] += A9_ReadPMUCounter(COUNTER_A9_PMU_PMN2); reg_value[COUNTER_A9_PMU_PMN3] += A9_ReadPMUCounter(COUNTER_A9_PMU_PMN3); reg_value[COUNTER_A9_PMU_PMN4] += A9_ReadPMUCounter(COUNTER_A9_PMU_PMN4); reg_value[COUNTER_A9_PMU_PMN5] += A9_ReadPMUCounter(COUNTER_A9_PMU_PMN5); }
static void resume_pmu(void * param) { u32 pmcr_value; /* enable all counters */ pmcr_value = A9_Read_PMCR(); pmcr_value |= 0x1; A9_Write_PMCR(pmcr_value); }
static void pause_pmu(void * param) { u32 pmcr_value; /* disable all counters */ pmcr_value = A9_Read_PMCR(); pmcr_value &= ~0x1; A9_Write_PMCR(pmcr_value); }
static irqreturn_t px_pmu_isr(unsigned int pid, unsigned int tid, struct pt_regs * const regs, unsigned int cpu, unsigned long long ts) { u32 pmcr_value; u32 flag_value; unsigned int i; unsigned int reg_id; bool buffer_full = false; char ** bt_buffer = &per_cpu(g_bt_buffer, cpu); PXD32_CSS_Call_Stack_V2 *css_data = (PXD32_CSS_Call_Stack_V2 *)*bt_buffer; /* disable the counters */ pmcr_value = A9_Read_PMCR(); pmcr_value &= ~0x1; A9_Write_PMCR(pmcr_value); /* clear the overflow flag */ flag_value = A9_Read_OVSR(); A9_Write_OVSR(0xffffffff); backtrace(regs, cpu, pid, tid, css_data); if ((flag_value & 0x80000000) && es[COUNTER_A9_PMU_CCNT].enabled) { reg_id = COUNTER_A9_PMU_CCNT; /* ccnt overflow */ if (es[reg_id].calibration == false) { /* write css data in non-calibration mode */ if (!buffer_full) { fill_css_data_head(css_data, pid, tid, reg_id, ts); buffer_full |= write_css_data(cpu, css_data); } } else { /* calculate the overflow count in calibration mode */ es[reg_id].overflow++; } A9_WritePMUCounter(reg_id, es[reg_id].reset_value); } for (i=0; i<A9_PMN_NUM; i++) { if (flag_value & (0x1 << i)) { switch (i) { case 0: reg_id = COUNTER_A9_PMU_PMN0; break; case 1: reg_id = COUNTER_A9_PMU_PMN1; break; case 2: reg_id = COUNTER_A9_PMU_PMN2; break; case 3: reg_id = COUNTER_A9_PMU_PMN3; break; case 4: reg_id = COUNTER_A9_PMU_PMN4; break; case 5: reg_id = COUNTER_A9_PMU_PMN5; break; default: break; } if (es[reg_id].calibration == false) { /* write css data in non-calibration mode */ if (!buffer_full) { fill_css_data_head(css_data, pid, tid, reg_id, ts); buffer_full |= write_css_data(cpu, css_data); } } else { /* calculate the overflow count in calibration mode */ es[reg_id].overflow++; } A9_WritePMUCounter(reg_id, es[reg_id].reset_value); } } if (!buffer_full) { /* enable the counters */ pmcr_value |= 0x1; pmcr_value &= ~0x6; A9_Write_PMCR(pmcr_value); } return IRQ_HANDLED; }
static void start_pmu(void * data) { int i; struct pmu_registers_a9 pmu_regs; bool is_start_paused; is_start_paused = *(bool *)data; pmu_regs.pmcr = 0x0; pmu_regs.ccnt = 0x0; pmu_regs.cntenset = 0x0; pmu_regs.cntenclr = 0x0; pmu_regs.intenset = 0x0; pmu_regs.intenclr = 0x0; pmu_regs.ovsr = 0x0; for (i=0; i<A9_PMN_NUM; i++) { pmu_regs.evtsel[i] = 0; pmu_regs.pmncnt[i] = 0; } /* disable PMU and clear CCNT & PMNx */ pmu_regs.pmcr = A9_Read_PMCR(); pmu_regs.pmcr &= ~0x1; pmu_regs.pmcr |= 0x6; A9_Write_PMCR(pmu_regs.pmcr); for (i=0; i<g_ebs_settings.eventNumber; i++) { switch (g_ebs_settings.event[i].registerId) { case COUNTER_A9_PMU_CCNT: set_ccnt_events(&g_ebs_settings.event[i], &pmu_regs); break; case COUNTER_A9_PMU_PMN0: set_pmn_events(0, &g_ebs_settings.event[i], &pmu_regs); break; case COUNTER_A9_PMU_PMN1: set_pmn_events(1, &g_ebs_settings.event[i], &pmu_regs); break; case COUNTER_A9_PMU_PMN2: set_pmn_events(2, &g_ebs_settings.event[i], &pmu_regs); break; case COUNTER_A9_PMU_PMN3: set_pmn_events(3, &g_ebs_settings.event[i], &pmu_regs); break; case COUNTER_A9_PMU_PMN4: set_pmn_events(4, &g_ebs_settings.event[i], &pmu_regs); break; case COUNTER_A9_PMU_PMN5: set_pmn_events(5, &g_ebs_settings.event[i], &pmu_regs); break; default: break; } } /* disable all counters */ A9_Write_CNTENCLR(0xffffffff); /* disable all interrupts */ A9_Write_INTENCLR(0xffffffff); /* clear overflow flags */ A9_Write_OVSR(0xffffffff); A9_Write_USEREN(0); /* write the counter values */ A9_WritePMUCounter(COUNTER_A9_PMU_CCNT, pmu_regs.ccnt); A9_WritePMUCounter(COUNTER_A9_PMU_PMN0, pmu_regs.pmncnt[0]); A9_WritePMUCounter(COUNTER_A9_PMU_PMN1, pmu_regs.pmncnt[1]); A9_WritePMUCounter(COUNTER_A9_PMU_PMN2, pmu_regs.pmncnt[2]); A9_WritePMUCounter(COUNTER_A9_PMU_PMN3, pmu_regs.pmncnt[3]); A9_WritePMUCounter(COUNTER_A9_PMU_PMN4, pmu_regs.pmncnt[4]); A9_WritePMUCounter(COUNTER_A9_PMU_PMN5, pmu_regs.pmncnt[5]); /* set events */ for (i=0; i<A9_PMN_NUM; i++) { A9_Write_PMNXSEL(i); A9_Write_PMNCNT(pmu_regs.pmncnt[i]); A9_Write_EVTSEL(pmu_regs.evtsel[i]); } pmu_regs.pmcr &= ~0x6; if (is_start_paused) { pmu_regs.pmcr &= ~0x1; } else { pmu_regs.pmcr |= 0x1; } /* enable the interrupts */ A9_Write_INTENSET(pmu_regs.intenset); /* enable the counters */ A9_Write_CNTENSET(pmu_regs.cntenset); /* Enable PMU */ A9_Write_PMCR(pmu_regs.pmcr); }
static irqreturn_t px_pmu_isr(unsigned int pid, unsigned int tid, struct pt_regs * const regs, unsigned int cpu, unsigned long long ts) { u32 pmcr_value; u32 flag_value; unsigned int i; bool buffer_full = false; bool found = false; PXD32_Hotspot_Sample_V2 sample_rec; /* disable the counters */ pmcr_value = A9_Read_PMCR(); pmcr_value &= ~0x1; //pmcr_value |= 0x6; A9_Write_PMCR(pmcr_value); /* clear the overflow flag */ flag_value = A9_Read_OVSR(); A9_Write_OVSR(0xffffffff); /* add for PXA988 platform, Need to init CTI irq line */ #ifdef PX_SOC_PXA988 pxa988_ack_ctiint_func = (pxa988_ack_ctiint_t)kallsyms_lookup_name_func("pxa988_ack_ctiint"); pxa988_ack_ctiint_func(); //printk(KERN_EMERG "%s:%d, kallsyms_lookup_name_func is : 0x%x, pxa988_ack_ctiint_func is : 0x%x\n", __FILE__, __LINE__, (unsigned long)kallsyms_lookup_name_func, (unsigned long)pxa988_ack_ctiint_func); #endif if (flag_value == 0) { return IRQ_NONE; } sample_rec.pc = regs->ARM_pc; sample_rec.pid = pid; sample_rec.tid = tid; memcpy(sample_rec.timestamp, &ts, sizeof(sample_rec.timestamp)); if ((flag_value & 0x80000000) && es[COUNTER_A9_PMU_CCNT].enabled) { found = true; sample_rec.registerId = COUNTER_A9_PMU_CCNT; /* ccnt overflow */ if (es[sample_rec.registerId].calibration == false) { /* write sample record in non-calibration mode */ buffer_full |= write_sample(cpu, &sample_rec); } else { /* calculate the overflow count in calibration mode */ es[sample_rec.registerId].overflow++; } A9_WritePMUCounter(sample_rec.registerId, es[sample_rec.registerId].reset_value); } for (i=0; i<A9_PMN_NUM; i++) { if (flag_value & (0x1 << i)) { found = true; switch (i) { case 0: sample_rec.registerId = COUNTER_A9_PMU_PMN0; break; case 1: sample_rec.registerId = COUNTER_A9_PMU_PMN1; break; case 2: sample_rec.registerId = COUNTER_A9_PMU_PMN2; break; case 3: sample_rec.registerId = COUNTER_A9_PMU_PMN3; break; case 4: sample_rec.registerId = COUNTER_A9_PMU_PMN4; break; case 5: sample_rec.registerId = COUNTER_A9_PMU_PMN5; break; default: break; } if (es[sample_rec.registerId].calibration == false) { /* write sample record in non-calibration mode */ buffer_full |= write_sample(cpu, &sample_rec); } else { /* calculate the overflow count in calibration mode */ es[sample_rec.registerId].overflow++; } A9_WritePMUCounter(sample_rec.registerId, es[sample_rec.registerId].reset_value); } } if (!buffer_full) { /* enable the counters */ pmcr_value |= 0x1; pmcr_value &= ~0x6; A9_Write_PMCR(pmcr_value); } return IRQ_HANDLED; }