/* * As System PMUs are affine to CPU0, the fact that interrupts are disabled * during interrupt handling is enough to serialise our actions and make this * safe. We do not need to grab our pmu_lock here. */ static irqreturn_t l2x0pmu_handle_irq(int irq, void *dev) { irqreturn_t status = IRQ_NONE; struct perf_sample_data data; struct pt_regs *regs; int idx; regs = get_irq_regs(); for (idx = 0; idx < L2X0_NUM_COUNTERS; ++idx) { struct perf_event *event = l2x0pmu_hw_events.events[idx]; struct hw_perf_event *hwc; if (!counter_is_saturated(idx)) continue; status = IRQ_HANDLED; hwc = &event->hw; /* * The armpmu_* functions expect counters to overflow, but * L220/PL310 counters saturate instead. Fake the overflow * here so the hardware is in sync with what the framework * expects. */ l2x0pmu_write_counter(idx, 0); armpmu_event_update(event, hwc, idx); data.period = event->hw.last_period; if (!armpmu_event_set_period(event, hwc, idx)) continue; if (perf_event_overflow(event, &data, regs)) l2x0pmu_disable_counter(idx); } l2x0_clear_interrupts(L2X0_INTR_MASK_ECNTR); irq_work_run(); return status; }
static irqreturn_t l2x0pmu_handle_irq(int irq, void *dev) { irqreturn_t status = IRQ_NONE; struct perf_sample_data data; struct pt_regs *regs; int idx; regs = get_irq_regs(); for (idx = 0; idx < L2X0_NUM_COUNTERS; ++idx) { struct perf_event *event = l2x0pmu_hw_events.events[idx]; struct hw_perf_event *hwc; if (!counter_is_saturated(idx)) continue; status = IRQ_HANDLED; hwc = &event->hw; /* */ l2x0pmu_write_counter(idx, 0); armpmu_event_update(event, hwc, idx); data.period = event->hw.last_period; if (!armpmu_event_set_period(event, hwc, idx)) continue; if (perf_event_overflow(event, &data, regs)) l2x0pmu_disable_counter(idx); } l2x0_clear_interrupts(L2X0_INTR_MASK_ECNTR); irq_work_run(); return status; }
static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) { u32 pmovsr; struct perf_sample_data data; struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); struct pt_regs *regs; int idx; /* * Get and reset the IRQ flags */ pmovsr = armv8pmu_getreset_flags(); /* * Did an overflow occur? */ if (!armv8pmu_has_overflowed(pmovsr)) return IRQ_NONE; /* * Handle the counter(s) overflow(s) */ regs = get_irq_regs(); for (idx = 0; idx < cpu_pmu->num_events; ++idx) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; /* Ignore if we don't have an event. */ if (!event) continue; /* * We have a single interrupt for all counters. Check that * each counter has overflowed before we process it. */ if (!armv8pmu_counter_has_overflowed(pmovsr, idx)) continue; hwc = &event->hw; armpmu_event_update(event); perf_sample_data_init(&data, 0, hwc->last_period); if (!armpmu_event_set_period(event)) continue; if (perf_event_overflow(event, &data, regs)) cpu_pmu->disable(event); } /* * Handle the pending perf events. * * Note: this call *must* be run with interrupts disabled. For * platforms that can have the PMU interrupts raised as an NMI, this * will not work. */ irq_work_run(); return IRQ_HANDLED; }