/** * generic_handle_irq - Invoke the handler for a particular irq * @irq: The irq number to handle * */ int generic_handle_irq(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); if (!desc) return -EINVAL; #ifdef CONFIG_SEC_DEBUG if (desc->action) sec_debug_irq_sched_log(irq, (void *)desc->action->handler, irqs_disabled()); else sec_debug_irq_sched_log(irq, (void *)desc->handle_irq, irqs_disabled()); #endif generic_handle_irq_desc(irq, desc); return 0; }
static void tasklet_action(struct softirq_action *a) { struct tasklet_struct *list; local_irq_disable(); list = __this_cpu_read(tasklet_vec.head); __this_cpu_write(tasklet_vec.head, NULL); __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head); local_irq_enable(); while (list) { struct tasklet_struct *t = list; list = list->next; if (tasklet_trylock(t)) { if (!atomic_read(&t->count)) { if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) BUG(); #ifdef CONFIG_SEC_DEBUG sec_debug_irq_sched_log(-1, t->func, 3); t->func(t->data); sec_debug_irq_sched_log(-1, t->func, 4); #else t->func(t->data); #endif tasklet_unlock(t); continue; } tasklet_unlock(t); } local_irq_disable(); t->next = NULL; *__this_cpu_read(tasklet_vec.tail) = t; __this_cpu_write(tasklet_vec.tail, &(t->next)); __raise_softirq_irqoff(TASKLET_SOFTIRQ); local_irq_enable(); } }
irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) { irqreturn_t retval = IRQ_NONE; unsigned int flags = 0, irq = desc->irq_data.irq; do { irqreturn_t res; sec_debug_irq_sched_log(irq, (void *)action->handler, 1); sec_debug_timer_log(4444, (int)irqs_disabled(), (void*)action->handler); trace_irq_handler_entry(irq, action); res = action->handler(irq, action->dev_id); trace_irq_handler_exit(irq, action, res); sec_debug_timer_log(5555, (int)irqs_disabled(), (void*)action->handler); /* sec_debug_irq_sched_log(irq, (void *)action->handler, 2); */ if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts\n", irq, action->handler)) local_irq_disable(); switch (res) { case IRQ_WAKE_THREAD: /* * Catch drivers which return WAKE_THREAD but * did not set up a thread function */ if (unlikely(!action->thread_fn)) { warn_no_thread(irq, action); break; } irq_wake_thread(desc, action); /* Fall through to add to randomness */ case IRQ_HANDLED: flags |= action->flags; break; default: break; } retval |= res; action = action->next; } while (action); add_interrupt_randomness(irq, flags); if (!noirqdebug) note_interrupt(irq, desc, retval); return retval; }
/** * __do_IRQ - original all in one highlevel IRQ handler * @irq: the interrupt number * * __do_IRQ handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific * handlers). * * This is the original x86 implementation which is used for every * interrupt type. */ unsigned int __do_IRQ(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action; unsigned int status; kstat_incr_irqs_this_cpu(irq, desc); if (CHECK_IRQ_PER_CPU(desc->status)) { irqreturn_t action_ret; /* * No locking required for CPU-local interrupts: */ if (desc->chip->ack) desc->chip->ack(irq); if (likely(!(desc->status & IRQ_DISABLED))) { action_ret = handle_IRQ_event(irq, desc->action); if (!noirqdebug) note_interrupt(irq, desc, action_ret); } desc->chip->end(irq); return 1; } raw_spin_lock(&desc->lock); if (desc->chip->ack) desc->chip->ack(irq); /* * REPLAY is when Linux resends an IRQ that was dropped earlier * WAITING is used by probe to mark irqs that are being tested */ status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); status |= IRQ_PENDING; /* we _want_ to handle it */ /* * If the IRQ is disabled for whatever reason, we cannot * use the action we have. */ action = NULL; if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) { action = desc->action; status &= ~IRQ_PENDING; /* we commit to handling */ status |= IRQ_INPROGRESS; /* we are handling it */ } desc->status = status; /* * If there is no IRQ handler or it was disabled, exit early. * Since we set PENDING, if another processor is handling * a different instance of this same irq, the other processor * will take care of it. */ if (unlikely(!action)) { sec_debug_irq_sched_log(irq, 0, 0); goto out; } /* * Edge triggered interrupts need to remember * pending events. * This applies to any hw interrupts that allow a second * instance of the same irq to arrive while we are in do_IRQ * or in the handler. But the code here only handles the _second_ * instance of the irq, not the third or fourth. So it is mostly * useful for irq hardware that does not mask cleanly in an * SMP environment. */ for (;;) { irqreturn_t action_ret; raw_spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); if (!noirqdebug) note_interrupt(irq, desc, action_ret); raw_spin_lock(&desc->lock); if (likely(!(desc->status & IRQ_PENDING))) break; desc->status &= ~IRQ_PENDING; } desc->status &= ~IRQ_INPROGRESS; out: /* * The ->end() handler has to deal with interrupts which got * disabled while the handler was running. */ desc->chip->end(irq); raw_spin_unlock(&desc->lock); return 1; }
/** * handle_IRQ_event - irq action chain handler * @irq: the interrupt number * @action: the interrupt action chain for this irq * * Handles the action chain of an irq event */ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) { irqreturn_t ret, retval = IRQ_NONE; unsigned int status = 0; do { sec_debug_irq_sched_log(irq, (void *)action->handler, 1); trace_irq_handler_entry(irq, action); ret = action->handler(irq, action->dev_id); trace_irq_handler_exit(irq, action, ret); sec_debug_irq_sched_log(irq, (void *)action->handler, 2); switch (ret) { case IRQ_WAKE_THREAD: /* * Set result to handled so the spurious check * does not trigger. */ ret = IRQ_HANDLED; /* * Catch drivers which return WAKE_THREAD but * did not set up a thread function */ if (unlikely(!action->thread_fn)) { warn_no_thread(irq, action); break; } /* * Wake up the handler thread for this * action. In case the thread crashed and was * killed we just pretend that we handled the * interrupt. The hardirq handler above has * disabled the device interrupt, so no irq * storm is lurking. */ if (likely(!test_bit(IRQTF_DIED, &action->thread_flags))) { set_bit(IRQTF_RUNTHREAD, &action->thread_flags); wake_up_process(action->thread); } /* Fall through to add to randomness */ case IRQ_HANDLED: status |= action->flags; break; default: break; } retval |= ret; action = action->next; } while (action); if (status & IRQF_SAMPLE_RANDOM) add_interrupt_randomness(irq); local_irq_disable(); return retval; }
asmlinkage void __do_softirq(void) { struct softirq_action *h; __u32 pending; unsigned long end = jiffies + MAX_SOFTIRQ_TIME; int cpu; unsigned long old_flags = current->flags; int max_restart = MAX_SOFTIRQ_RESTART; /* * Mask out PF_MEMALLOC s current task context is borrowed for the * softirq. A softirq handled such as network RX might set PF_MEMALLOC * again if the socket is related to swap */ current->flags &= ~PF_MEMALLOC; pending = local_softirq_pending(); account_irq_enter_time(current); __local_bh_disable((unsigned long)__builtin_return_address(0), SOFTIRQ_OFFSET); lockdep_softirq_enter(); cpu = smp_processor_id(); restart: /* Reset the pending bitmask before enabling irqs */ set_softirq_pending(0); local_irq_enable(); h = softirq_vec; do { if (pending & 1) { unsigned int vec_nr = h - softirq_vec; int prev_count = preempt_count(); kstat_incr_softirqs_this_cpu(vec_nr); trace_softirq_entry(vec_nr); #ifdef CONFIG_SEC_DEBUG sec_debug_irq_sched_log(-1, h->action, 5); #endif h->action(h); #ifdef CONFIG_SEC_DEBUG sec_debug_irq_sched_log(-1, h->action, 6); #endif trace_softirq_exit(vec_nr); if (unlikely(prev_count != preempt_count())) { printk(KERN_ERR "huh, entered softirq %u %s %p" "with preempt_count %08x," " exited with %08x?\n", vec_nr, softirq_to_name[vec_nr], h->action, prev_count, preempt_count()); preempt_count() = prev_count; } rcu_bh_qs(cpu); } h++; pending >>= 1; } while (pending); local_irq_disable(); pending = local_softirq_pending(); if (pending) { if (time_before(jiffies, end) && !need_resched() && --max_restart) goto restart; wakeup_softirqd(); } lockdep_softirq_exit(); account_irq_exit_time(current); __local_bh_enable(SOFTIRQ_OFFSET); tsk_restore_flags(current, old_flags, PF_MEMALLOC); }