static void l2x0pmu_disable(struct hw_perf_event *event, int idx)
{
	unsigned long flags;

	raw_spin_lock_irqsave(&l2x0pmu_hw_events.pmu_lock, flags);
	l2x0pmu_disable_counter(idx);
	raw_spin_unlock_irqrestore(&l2x0pmu_hw_events.pmu_lock, flags);
}
static void l2x0pmu_write_counter(int idx, u32 val)
{
	/*
	 * L2X0 counters can only be written to when they are disabled.
	 * As perf core does not disable counters before writing to them
	 * under interrupts, we must do so here.
	 */
	u32 cfg = l2x0pmu_disable_counter(idx);
	writel_relaxed(val, COUNTER_ADDR(idx));
	l2x0pmu_write_cfg(cfg, idx);
}
static void l2x0pmu_write_counter(int idx, u32 val)
{
	/*
                                                                
                                                                 
                                         
  */
	u32 cfg = l2x0pmu_disable_counter(idx);
	writel_relaxed(val, COUNTER_ADDR(idx));
	l2x0pmu_write_cfg(cfg, idx);
}
/*
 * As System PMUs are affine to CPU0, the fact that interrupts are disabled
 * during interrupt handling is enough to serialise our actions and make this
 * safe. We do not need to grab our pmu_lock here.
 */
static irqreturn_t l2x0pmu_handle_irq(int irq, void *dev)
{
	irqreturn_t status = IRQ_NONE;
	struct perf_sample_data data;
	struct pt_regs *regs;
	int idx;

	regs = get_irq_regs();

	for (idx = 0; idx < L2X0_NUM_COUNTERS; ++idx) {
		struct perf_event *event = l2x0pmu_hw_events.events[idx];
		struct hw_perf_event *hwc;

		if (!counter_is_saturated(idx))
			continue;

		status = IRQ_HANDLED;

		hwc = &event->hw;

		/*
		 * The armpmu_* functions expect counters to overflow, but
		 * L220/PL310 counters saturate instead. Fake the overflow
		 * here so the hardware is in sync with what the framework
		 * expects.
		 */
		l2x0pmu_write_counter(idx, 0);

		armpmu_event_update(event, hwc, idx);
		data.period = event->hw.last_period;

		if (!armpmu_event_set_period(event, hwc, idx))
			continue;

		if (perf_event_overflow(event, &data, regs))
			l2x0pmu_disable_counter(idx);
	}

	l2x0_clear_interrupts(L2X0_INTR_MASK_ECNTR);

	irq_work_run();

	return status;
}
static irqreturn_t l2x0pmu_handle_irq(int irq, void *dev)
{
	irqreturn_t status = IRQ_NONE;
	struct perf_sample_data data;
	struct pt_regs *regs;
	int idx;

	regs = get_irq_regs();

	for (idx = 0; idx < L2X0_NUM_COUNTERS; ++idx) {
		struct perf_event *event = l2x0pmu_hw_events.events[idx];
		struct hw_perf_event *hwc;

		if (!counter_is_saturated(idx))
			continue;

		status = IRQ_HANDLED;

		hwc = &event->hw;

		/*
                                                            
                                                            
                                                            
             
   */
		l2x0pmu_write_counter(idx, 0);

		armpmu_event_update(event, hwc, idx);
		data.period = event->hw.last_period;

		if (!armpmu_event_set_period(event, hwc, idx))
			continue;

		if (perf_event_overflow(event, &data, regs))
			l2x0pmu_disable_counter(idx);
	}

	l2x0_clear_interrupts(L2X0_INTR_MASK_ECNTR);

	irq_work_run();

	return status;
}