static void armv8pmu_disable_event(struct perf_event *event) { unsigned long flags; struct hw_perf_event *hwc = &event->hw; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); int idx = hwc->idx; /* * Disable counter and interrupt */ raw_spin_lock_irqsave(&events->pmu_lock, flags); /* * Disable counter */ armv8pmu_disable_counter(idx); /* * Disable interrupt for this counter */ armv8pmu_disable_intens(idx); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); }
static void armv8pmu_reset(void *info) { struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; u32 idx, nb_cnt = cpu_pmu->num_events; /* The counter and interrupt enable registers are unknown at reset. */ for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { armv8pmu_disable_counter(idx); armv8pmu_disable_intens(idx); } /* * Initialize & Reset PMNC. Request overflow interrupt for * 64 bit cycle counter but cheat in armv8pmu_write_counter(). */ armv8pmu_pmcr_write(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_LC); }
static inline int armv8pmu_disable_event_irq(struct perf_event *event) { return armv8pmu_disable_intens(event->hw.idx); }