irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) { irqreturn_t retval = IRQ_NONE; unsigned int random = 0, irq = desc->irq_data.irq; do { irqreturn_t res; #ifdef CONFIG_SEC_DEBUG sec_debug_timer_log(4444, (int)irqs_disabled(), (void *)action->handler); #endif trace_irq_handler_entry(irq, action); res = action->handler(irq, action->dev_id); trace_irq_handler_exit(irq, action, res); #ifdef CONFIG_SEC_DEBUG sec_debug_timer_log(5555, (int)irqs_disabled(), (void *)action->handler); /* sec_debug_irq_sched_log(irq, (void *)action->handler, 2); */ #endif if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts\n", irq, action->handler)) local_irq_disable(); switch (res) { case IRQ_WAKE_THREAD: /* * Catch drivers which return WAKE_THREAD but * did not set up a thread function */ if (unlikely(!action->thread_fn)) { warn_no_thread(irq, action); break; } irq_wake_thread(desc, action); /* Fall through to add to randomness */ case IRQ_HANDLED: random |= action->flags; break; default: break; } retval |= res; action = action->next; } while (action); if (random & IRQF_SAMPLE_RANDOM) add_interrupt_randomness(irq); if (!noirqdebug) note_interrupt(irq, desc, retval); return retval; }
static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) { struct hrtimer_clock_base *base = timer->base; struct hrtimer_cpu_base *cpu_base = base->cpu_base; enum hrtimer_restart (*fn)(struct hrtimer *); int restart; WARN_ON(!irqs_disabled()); debug_deactivate(timer); __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); timer_stats_account_hrtimer(timer); fn = timer->function; /* * Because we run timers from hardirq context, there is no chance * they get migrated to another cpu, therefore its safe to unlock * the timer base. */ raw_spin_unlock(&cpu_base->lock); trace_hrtimer_expire_entry(timer, now); sec_debug_timer_log(1111, (void *)fn); restart = fn(timer); sec_debug_timer_log(2222, (void *)fn); trace_hrtimer_expire_exit(timer); raw_spin_lock(&cpu_base->lock); /* * Note: We clear the CALLBACK bit after enqueue_hrtimer and * we do not reprogramm the event hardware. Happens either in * hrtimer_start_range_ns() or in hrtimer_interrupt() * * Note: Because we dropped the cpu_base->lock above, * hrtimer_start_range_ns() can have popped in and enqueued the timer * for us already. */ if (restart != HRTIMER_NORESTART && !(timer->state & HRTIMER_STATE_ENQUEUED)) enqueue_hrtimer(timer, base); WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK)); timer->state &= ~HRTIMER_STATE_CALLBACK; }