Пример #1
0
/*
 * Main handler for inter-processor interrupts
 */
void handle_IPI(int ipinr, struct pt_regs *regs)
{
	unsigned int cpu = smp_processor_id();
	struct pt_regs *old_regs = set_irq_regs(regs);

	if ((unsigned)ipinr < NR_IPI) {
		trace_ipi_entry_rcuidle(ipi_types[ipinr]);
		__inc_irq_stat(cpu, ipi_irqs[ipinr]);
	}

	switch (ipinr) {
	case IPI_RESCHEDULE:
		scheduler_ipi();
		break;

	case IPI_CALL_FUNC:
		irq_enter();
		generic_smp_call_function_interrupt();
		irq_exit();
		break;

	case IPI_CPU_STOP:
		irq_enter();
		ipi_cpu_stop(cpu);
		irq_exit();
		break;

#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
	case IPI_TIMER:
		irq_enter();
		tick_receive_broadcast();
		irq_exit();
		break;
#endif

#ifdef CONFIG_IRQ_WORK
	case IPI_IRQ_WORK:
		irq_enter();
		irq_work_run();
		irq_exit();
		break;
#endif

#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
	case IPI_WAKEUP:
		WARN_ONCE(!acpi_parking_protocol_valid(cpu),
			  "CPU%u: Wake-up IPI outside the ACPI parking protocol\n",
			  cpu);
		break;
#endif

	default:
		pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
		break;
	}

	if ((unsigned)ipinr < NR_IPI)
		trace_ipi_exit_rcuidle(ipi_types[ipinr]);
	set_irq_regs(old_regs);
}
Пример #2
0
/*
 * Main handler for inter-processor interrupts
 */
void handle_IPI(int ipinr, struct pt_regs *regs)
{
    unsigned int cpu = smp_processor_id();
    struct pt_regs *old_regs = set_irq_regs(regs);

    if ((unsigned)ipinr < NR_IPI) {
        trace_ipi_entry(ipi_types[ipinr]);
        __inc_irq_stat(cpu, ipi_irqs[ipinr]);
    }

    switch (ipinr) {
    case IPI_RESCHEDULE:
        scheduler_ipi();
        break;

    case IPI_CALL_FUNC:
        irq_enter();
        generic_smp_call_function_interrupt();
        irq_exit();
        break;

    case IPI_CALL_FUNC_SINGLE:
        irq_enter();
        generic_smp_call_function_single_interrupt();
        irq_exit();
        break;

    case IPI_CPU_STOP:
        irq_enter();
        ipi_cpu_stop(cpu);
        irq_exit();
        break;

#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
    case IPI_TIMER:
        irq_enter();
        tick_receive_broadcast();
        irq_exit();
        break;
#endif

#ifdef CONFIG_IRQ_WORK
    case IPI_IRQ_WORK:
        irq_enter();
        irq_work_run();
        irq_exit();
        break;
#endif

    default:
        pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
        break;
    }

    if ((unsigned)ipinr < NR_IPI)
        trace_ipi_exit(ipi_types[ipinr]);
    set_irq_regs(old_regs);
}
Пример #3
0
static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
{
	irq_enter();
	irq_work_run();
	inc_irq_stat(apic_irq_work_irqs);
	irq_exit();

	return IRQ_HANDLED;
}
Пример #4
0
/* Performance counter interrupts run unmasked at PIL level 15.
 * Therefore we can't do things like wakeups and other work
 * that expects IRQ disabling to be adhered to in locking etc.
 *
 * Therefore in such situations we defer the work by signalling
 * a lower level cpu IRQ.
 */
void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
{
	struct pt_regs *old_regs;

	clear_softint(1 << PIL_DEFERRED_PCR_WORK);

	old_regs = set_irq_regs(regs);
	irq_enter();
#ifdef CONFIG_IRQ_WORK
	irq_work_run();
#endif
	irq_exit();
	set_irq_regs(old_regs);
}
/*
 * As System PMUs are affine to CPU0, the fact that interrupts are disabled
 * during interrupt handling is enough to serialise our actions and make this
 * safe. We do not need to grab our pmu_lock here.
 */
static irqreturn_t l2x0pmu_handle_irq(int irq, void *dev)
{
	irqreturn_t status = IRQ_NONE;
	struct perf_sample_data data;
	struct pt_regs *regs;
	int idx;

	regs = get_irq_regs();

	for (idx = 0; idx < L2X0_NUM_COUNTERS; ++idx) {
		struct perf_event *event = l2x0pmu_hw_events.events[idx];
		struct hw_perf_event *hwc;

		if (!counter_is_saturated(idx))
			continue;

		status = IRQ_HANDLED;

		hwc = &event->hw;

		/*
		 * The armpmu_* functions expect counters to overflow, but
		 * L220/PL310 counters saturate instead. Fake the overflow
		 * here so the hardware is in sync with what the framework
		 * expects.
		 */
		l2x0pmu_write_counter(idx, 0);

		armpmu_event_update(event, hwc, idx);
		data.period = event->hw.last_period;

		if (!armpmu_event_set_period(event, hwc, idx))
			continue;

		if (perf_event_overflow(event, &data, regs))
			l2x0pmu_disable_counter(idx);
	}

	l2x0_clear_interrupts(L2X0_INTR_MASK_ECNTR);

	irq_work_run();

	return status;
}
static irqreturn_t l2x0pmu_handle_irq(int irq, void *dev)
{
	irqreturn_t status = IRQ_NONE;
	struct perf_sample_data data;
	struct pt_regs *regs;
	int idx;

	regs = get_irq_regs();

	for (idx = 0; idx < L2X0_NUM_COUNTERS; ++idx) {
		struct perf_event *event = l2x0pmu_hw_events.events[idx];
		struct hw_perf_event *hwc;

		if (!counter_is_saturated(idx))
			continue;

		status = IRQ_HANDLED;

		hwc = &event->hw;

		/*
                                                            
                                                            
                                                            
             
   */
		l2x0pmu_write_counter(idx, 0);

		armpmu_event_update(event, hwc, idx);
		data.period = event->hw.last_period;

		if (!armpmu_event_set_period(event, hwc, idx))
			continue;

		if (perf_event_overflow(event, &data, regs))
			l2x0pmu_disable_counter(idx);
	}

	l2x0_clear_interrupts(L2X0_INTR_MASK_ECNTR);

	irq_work_run();

	return status;
}
Пример #7
0
static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
{
	u32 pmovsr;
	struct perf_sample_data data;
	struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
	struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
	struct pt_regs *regs;
	int idx;

	/*
	 * Get and reset the IRQ flags
	 */
	pmovsr = armv8pmu_getreset_flags();

	/*
	 * Did an overflow occur?
	 */
	if (!armv8pmu_has_overflowed(pmovsr))
		return IRQ_NONE;

	/*
	 * Handle the counter(s) overflow(s)
	 */
	regs = get_irq_regs();

	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
		struct perf_event *event = cpuc->events[idx];
		struct hw_perf_event *hwc;

		/* Ignore if we don't have an event. */
		if (!event)
			continue;

		/*
		 * We have a single interrupt for all counters. Check that
		 * each counter has overflowed before we process it.
		 */
		if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
			continue;

		hwc = &event->hw;
		armpmu_event_update(event);
		perf_sample_data_init(&data, 0, hwc->last_period);
		if (!armpmu_event_set_period(event))
			continue;

		if (perf_event_overflow(event, &data, regs))
			cpu_pmu->disable(event);
	}

	/*
	 * Handle the pending perf events.
	 *
	 * Note: this call *must* be run with interrupts disabled. For
	 * platforms that can have the PMU interrupts raised as an NMI, this
	 * will not work.
	 */
	irq_work_run();

	return IRQ_HANDLED;
}
Пример #8
0
static inline void __smp_irq_work_interrupt(void)
{
	inc_irq_stat(apic_irq_work_irqs);
	irq_work_run();
}