static inline void _local_bh_enable_ip(unsigned long ip) { WARN_ON_ONCE(in_irq() || irqs_disabled()); if (in_irq() || irqs_disabled()) printk("in_irq()%d irqs_disabled()%d\n", in_irq(), irqs_disabled()); #ifdef CONFIG_TRACE_IRQFLAGS local_irq_disable(); #endif /* * Are softirqs going to be turned on now: */ if (softirq_count() == SOFTIRQ_DISABLE_OFFSET) trace_softirqs_on(ip); /* * Keep preemption disabled until we are done with * softirq processing: */ sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1); if (unlikely(!in_interrupt() && local_softirq_pending())) do_softirq(); dec_preempt_count(); #ifdef CONFIG_TRACE_IRQFLAGS local_irq_enable(); #endif preempt_check_resched(); }
void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) { WARN_ON_ONCE(in_irq() || irqs_disabled()); #ifdef CONFIG_TRACE_IRQFLAGS local_irq_disable(); #endif /* * Are softirqs going to be turned on now: */ if (softirq_count() == SOFTIRQ_DISABLE_OFFSET) trace_softirqs_on(ip); /* * Keep preemption disabled until we are done with * softirq processing: */ preempt_count_sub(cnt - 1); if (unlikely(!in_interrupt() && local_softirq_pending())) { /* * Run softirq if any pending. And do it in its own stack * as we may be calling this deep in a task call stack already. */ do_softirq(); } preempt_count_dec(); #ifdef CONFIG_TRACE_IRQFLAGS local_irq_enable(); #endif preempt_check_resched(); }
void local_bh_enable_ip(unsigned long ip) { #ifdef CONFIG_TRACE_IRQFLAGS unsigned long flags; WARN_ON_ONCE(in_irq()); local_irq_save(flags); #endif /* * Are softirqs going to be turned on now: */ if (softirq_count() == SOFTIRQ_OFFSET) trace_softirqs_on(ip); /* * Keep preemption disabled until we are done with * softirq processing: */ sub_preempt_count(SOFTIRQ_OFFSET - 1); if (unlikely(!in_interrupt() && local_softirq_pending())) do_softirq(); dec_preempt_count(); #ifdef CONFIG_TRACE_IRQFLAGS local_irq_restore(flags); #endif preempt_check_resched(); }
asmlinkage void do_IRQ(int irq, struct pt_regs * regs) { struct irqaction *action; int do_random, cpu; cpu = smp_processor_id(); irq_enter(cpu); kstat.irqs[cpu][irq]++; action = irq_action[irq]; if (action) { if (!(action->flags & SA_INTERRUPT)) __sti(); action = irq_action[irq]; do_random = 0; do { do_random |= action->flags; action->handler(irq, action->dev_id, regs); action = action->next; } while (action); if (do_random & SA_SAMPLE_RANDOM) add_interrupt_randomness(irq); __cli(); } irq_exit(cpu); if (softirq_pending(cpu)) do_softirq(); /* unmasking and bottom half handling is done magically for us. */ }
void smp_apic_timer_interrupt(struct pt_regs * regs) { int cpu = smp_processor_id(); /* * the NMI deadlock-detector uses this. */ apic_timer_irqs[cpu]++; /* * NOTE! We'd better ACK the irq immediately, * because timer handling can be slow. */ ack_APIC_irq(); /* * update_process_times() expects us to have done irq_enter(). * Besides, if we don't timer interrupts ignore the global * interrupt lock, which is the WrongThing (tm) to do. */ irq_enter(cpu, 0); smp_local_timer_interrupt(regs); irq_exit(cpu, 0); if (softirq_pending(cpu)) do_softirq(); }
void irq_exit(void) { account_system_vtime(current); trace_hardirq_exit(); sub_preempt_count(IRQ_EXIT_OFFSET); if (!in_interrupt() && local_softirq_pending()) do_softirq(); }
void __domain_crash_synchronous(void) { __domain_crash(current->domain); vcpu_end_shutdown_deferral(current); for ( ; ; ) do_softirq(); }
void indy_r4k_timer_interrupt (struct pt_regs *regs) { static const int INDY_R4K_TIMER_IRQ = 7; int cpu = smp_processor_id(); r4k_timer_interrupt (INDY_R4K_TIMER_IRQ, NULL, regs); if (softirq_pending(cpu)) do_softirq(); }
static void softirq_task_function(void *context) { while (true) { do_softirq(); g_n_raises--; if (g_n_raises == 0 || local_softirq_pending() == 0) { g_n_raises = 0; lib_task_wait(); } } }
void local_bh_enable_ip(unsigned long ip) { WARN_ON_ONCE(in_irq() || irqs_disabled()); sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1); if (unlikely(!in_interrupt() && local_softirq_pending())) do_softirq(); dec_preempt_count(); preempt_check_resched(); }
void rt_timer_interrupt(struct pt_regs *regs) { int cpu = smp_processor_id(); int cpuA = ((cputoslice(cpu)) == 0); int irq = IP27_TIMER_IRQ; irq_enter(cpu, irq); write_lock(&xtime_lock); again: LOCAL_HUB_S(cpuA ? PI_RT_PEND_A : PI_RT_PEND_B, 0); /* Ack */ ct_cur[cpu] += CYCLES_PER_JIFFY; LOCAL_HUB_S(cpuA ? PI_RT_COMPARE_A : PI_RT_COMPARE_B, ct_cur[cpu]); if (LOCAL_HUB_L(PI_RT_COUNT) >= ct_cur[cpu]) goto again; kstat.irqs[cpu][irq]++; /* kstat only for bootcpu? */ if (cpu == 0) do_timer(regs); #ifdef CONFIG_SMP update_process_times(user_mode(regs)); #endif /* CONFIG_SMP */ /* * If we have an externally synchronized Linux clock, then update * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be * called as close as possible to when a second starts. */ if ((time_status & STA_UNSYNC) == 0 && xtime.tv_sec > last_rtc_update + 660) { if (xtime.tv_usec >= 1000000 - ((unsigned) tick) / 2) { if (set_rtc_mmss(xtime.tv_sec + 1) == 0) last_rtc_update = xtime.tv_sec; else last_rtc_update = xtime.tv_sec - 600; } else if (xtime.tv_usec <= ((unsigned) tick) / 2) { if (set_rtc_mmss(xtime.tv_sec) == 0) last_rtc_update = xtime.tv_sec; else last_rtc_update = xtime.tv_sec - 600; } } write_unlock(&xtime_lock); irq_exit(cpu, irq); if (softirq_pending(cpu)) do_softirq(); }
void local_bh_enable(void) { WARN_ON(irqs_disabled()); /* * Keep preemption disabled until we are done with * softirq processing: */ preempt_count() -= SOFTIRQ_OFFSET - 1; if (unlikely(!in_interrupt() && local_softirq_pending())) do_softirq(); dec_preempt_count(); preempt_check_resched(); }
/** Softirq thread function. * * Once started, a softirq thread waits for tasklets to be scheduled * and executes them. * * \param arg # of this softirq thread so that it grabs the correct lock * if multiple softirq threads are running. */ void l4dde26_softirq_thread(void *arg) { printk("Softirq daemon starting\n"); l4dde26_process_add_worker(); /* This thread will always be in a softirq, so set the * corresponding flag right now. */ preempt_count() |= SOFTIRQ_MASK; while(1) { ddekit_sem_down(dde_softirq_sem); do_softirq(); } }
static inline void invoke_softirq(void) { if (!force_irqthreads) { #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED __do_softirq(); #else do_softirq(); #endif } else { __local_bh_disable((unsigned long)__builtin_return_address(0), SOFTIRQ_OFFSET); wakeup_softirqd(); __local_bh_enable(SOFTIRQ_OFFSET); } }
asmlinkage void ll_timer_interrupt(int irq, struct pt_regs *regs) { int cpu = smp_processor_id(); irq_enter(cpu, irq); kstat.irqs[cpu][irq]++; /* we keep interrupt disabled all the time */ timer_interrupt(irq, NULL, regs); irq_exit(cpu, irq); if (softirq_pending(cpu)) do_softirq(); }
static inline void invoke_softirq(void) { if (!force_irqthreads) { /* * We can safely execute softirq on the current stack if * it is the irq stack, because it should be near empty * at this stage. But we have no way to know if the arch * calls irq_exit() on the irq stack. So call softirq * in its own stack to prevent from any overrun on top * of a potentially deep task stack. */ do_softirq(); } else { wakeup_softirqd(); } }
static int ksoftirqd(void * __bind_cpu) { set_user_nice(current, 19); current->flags |= PF_NOFREEZE; set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { preempt_disable(); if (!local_softirq_pending()) { preempt_enable_no_resched(); schedule(); preempt_disable(); } __set_current_state(TASK_RUNNING); while (local_softirq_pending()) { /* Preempt disable stops cpu going offline. If already offline, we'll be on wrong CPU: don't process */ if (cpu_is_offline((long)__bind_cpu)) goto wait_to_die; do_softirq(); preempt_enable_no_resched(); cond_resched(); preempt_disable(); rcu_qsctr_inc((long)__bind_cpu); } preempt_enable(); set_current_state(TASK_INTERRUPTIBLE); } __set_current_state(TASK_RUNNING); return 0; wait_to_die: preempt_enable(); /* Wait for kthread_stop */ set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { schedule(); set_current_state(TASK_INTERRUPTIBLE); } __set_current_state(TASK_RUNNING); return 0; }
void idle_loop(void) { for ( ; ; ) { if ( cpu_is_offline(smp_processor_id()) ) stop_cpu(); local_irq_disable(); if ( cpu_is_haltable(smp_processor_id()) ) { dsb(sy); wfi(); } local_irq_enable(); do_tasklet(); do_softirq(); } }
asmlinkage void do_IRQ(int irq, struct pt_regs * regs) { struct irqaction *action; int do_random, cpu; int retval = 0; cpu = smp_processor_id(); irq_enter(); kstat_cpu(cpu).irqs[irq]++; action = irq_action[irq]; if (action) { if (!(action->flags & SA_INTERRUPT)) local_irq_enable(); action = irq_action[irq]; do_random = 0; do { do_random |= action->flags; retval |= action->handler(irq, action->dev_id, regs); action = action->next; } while (action); if (retval != 1) { if (retval) { printk("irq event %d: bogus retval mask %x\n", irq, retval); } else { printk("irq %d: nobody cared\n", irq); } } if (do_random & SA_SAMPLE_RANDOM) add_interrupt_randomness(irq); local_irq_disable(); } irq_exit(); if (softirq_pending(cpu)) do_softirq(); /* unmasking and bottom half handling is done magically for us. */ }
// This gets called from our ASM interrupt handler stub. void irq_handler(registers_t regs) { // Send an EOI (end of interrupt) signal to the PICs. // If this interrupt involved the slave. if (regs.int_no >= 40) { // Send reset signal to slave. outb(0xA0, 0x20); } // Send reset signal to master. (As well as slave, if necessary). outb(0x20, 0x20); if (interrupt_handlers[regs.int_no] != 0) { isr_t handler = interrupt_handlers[regs.int_no]; handler(regs); } asm volatile("sti"); do_softirq(); }
static int ksoftirqd(void * __bind_cpu) { daemonize("ksoftirqd/%d", (int) (long) __bind_cpu); set_user_nice(current, 19); __set_current_state(TASK_INTERRUPTIBLE); mb(); __get_cpu_var(ksoftirqd) = current; for (;;) { if (!local_softirq_pending()) schedule(); __set_current_state(TASK_RUNNING); while (local_softirq_pending()) { do_softirq(); if(need_resched()) schedule(); } __set_current_state(TASK_INTERRUPTIBLE); } }
void LinuxMode::doSoftIRQ(uval disabled) { uval old = mode; if (disabled) Scheduler::Enable(); while (softirq_pending(cpu)) { mode = Undefined; // Thread mode is undefined to allow // do_softirq to set thread mode via a // local_bh_disable //bhDisabled should not be set --- it could be set accidentally // by syscalls on loopback device. flags &= ~LinuxMode::bhDisabled; TraceOSLinuxBH(cpu, softirq_pending(cpu), 0); // Now we know we're the only thread running on this thing do_softirq(); }; mode = old; if (disabled) Scheduler::Disable(); }
void mips_timer_interrupt(struct pt_regs *regs) { int cpu = smp_processor_id(); int irq = MIPS_CPU_TIMER_IRQ; irq_enter(cpu, irq); kstat.irqs[cpu][irq]++; timer_interrupt(irq, NULL, regs); if ((timer_tick_count++ % HZ) == 0) { mips_display_message(&display_string[display_count++]); if (display_count == MAX_DISPLAY_COUNT) display_count = 0; } irq_exit(cpu, irq); if (softirq_pending(cpu)) do_softirq(); }
void mips_timer_interrupt(struct pt_regs *regs) { int irq = 63; unsigned long count; int cpu = smp_processor_id(); irq_enter(cpu, irq); kstat.irqs[cpu][irq]++; #ifdef CONFIG_PM printk(KERN_ERR "Unexpected CP0 interrupt\n"); regs->cp0_status &= ~IE_IRQ5; /* disable CP0 interrupt */ return; #endif if (r4k_offset == 0) goto null; do { count = read_c0_count(); timerhi += (count < timerlo); /* Wrap around */ timerlo = count; kstat.irqs[0][irq]++; do_timer(regs); r4k_cur += r4k_offset; ack_r4ktimer(r4k_cur); } while (((unsigned long)read_c0_count() - r4k_cur) < 0x7fffffff); irq_exit(cpu, irq); if (softirq_pending(cpu)) do_softirq(); return; null: ack_r4ktimer(0); }
/* FIXME: SMP, flags, bottom halves, rest */ void do_irq(struct irqaction *action, int irq, struct pt_regs * regs) { int cpu = smp_processor_id(); irq_enter(cpu, irq); #ifdef DEBUG_IRQ if (irq != TIMER_IRQ) #endif DBG_IRQ("do_irq(%d) %d+%d\n", irq, IRQ_REGION(irq), IRQ_OFFSET(irq)); if (action->handler == NULL) printk(KERN_ERR "No handler for interrupt %d !\n", irq); for(; action && action->handler; action = action->next) { action->handler(irq, action->dev_id, regs); } irq_exit(cpu, irq); /* don't need to care about unmasking and stuff */ do_softirq(); }
//------------------------------ LX_ServiceThread ------------------------------ void LX_ServiceThread(void) { unsigned long f; // Do any outstanding SOFTIRQs if(atomic_read(&lx_softirq_wakes)!=0) { LX_enter_irq_current(); atomic_set(&lx_softirq_wakes,0); do_softirq(); LX_leave_irq_current(); LX_check_utl_freeme(); } // Check if we have to start a new thread if(lx_started_task.eip || lx_started_task.esp || lx_started_task.prev) lxa_service_thread_block_time=10; else if(!list_empty(&lx_task_to_start_list)) { struct lx_new_task_struct* p=0; spin_lock_irqsave(&lx_task_to_start_lock,f); if(!list_empty(&lx_task_to_start_list)) { p=list_entry(lx_task_to_start_list.next,struct lx_new_task_struct,list); list_del(&p->list); lx_started_task.prev=p->prev; lx_started_task.esp=p->esp; lx_started_task.eip=p->eip; // This must be the last one! } spin_unlock_irqrestore(&lx_task_to_start_lock,f); if(p) { LXA_CreateThread((unsigned long)LXA_TaskStub32,&p->tid); if(!(p->flags&LX_STARTTASK_WAIT)) kfree(p); else p->flags=LX_STARTTASK_STARTED; DevRun((unsigned long)&lx_task_to_start_list); } lxa_service_thread_block_time=10; }
void idle_loop(void) { for ( ; ; ) { if ( cpu_is_offline(smp_processor_id()) ) stop_cpu(); local_irq_disable(); if ( cpu_is_haltable(smp_processor_id()) ) { dsb(sy); wfi(); } local_irq_enable(); do_tasklet(); do_softirq(); /* * We MUST be last (or before dsb, wfi). Otherwise after we get the * softirq we would execute dsb,wfi (and sleep) and not patch. */ check_for_livepatch_work(); } }
static int ksoftirqd(void * __bind_cpu) { int bind_cpu = (int) (long) __bind_cpu; int cpu = cpu_logical_map(bind_cpu); daemonize(); current->nice = 19; sigfillset(¤t->blocked); /* Migrate to the right CPU */ current->cpus_allowed = 1UL << cpu; while (smp_processor_id() != cpu) schedule(); sprintf(current->comm, "ksoftirqd_CPU%d", bind_cpu); __set_current_state(TASK_INTERRUPTIBLE); mb(); ksoftirqd_task(cpu) = current; for (;;) { if (!softirq_pending(cpu)) schedule(); __set_current_state(TASK_RUNNING); while (softirq_pending(cpu)) { do_softirq(); if (current->need_resched) schedule(); } __set_current_state(TASK_INTERRUPTIBLE); } }
/* * do_IRQ handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific * handlers). */ asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs) { /* * We ack quickly, we don't want the irq controller * thinking we're snobs just because some other CPU has * disabled global interrupts (we have already done the * INT_ACK cycles, it's too late to try to pretend to the * controller that we aren't taking the interrupt). * * 0 return value means that this irq is already being * handled by some other CPU. (or is disabled) */ int cpu = smp_processor_id(); irq_desc_t *desc = irq_desc + irq; struct irqaction * action; unsigned int status; kstat.irqs[cpu][irq]++; spin_lock(&desc->lock); desc->handler->ack(irq); /* REPLAY is when Linux resends an IRQ that was dropped earlier WAITING is used by probe to mark irqs that are being tested */ status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); status |= IRQ_PENDING; /* we _want_ to handle it */ /* * If the IRQ is disabled for whatever reason, we cannot * use the action we have. */ action = NULL; if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) { action = desc->action; status &= ~IRQ_PENDING; /* we commit to handling */ status |= IRQ_INPROGRESS; /* we are handling it */ } desc->status = status; /* * If there is no IRQ handler or it was disabled, exit early. Since we set PENDING, if another processor is handling a different instance of this same irq, the other processor will take care of it. */ if (!action) { goto out; } /* * Edge triggered interrupts need to remember * pending events. * This applies to any hw interrupts that allow a second * instance of the same irq to arrive while we are in do_IRQ * or in the handler. But the code here only handles the _second_ * instance of the irq, not the third or fourth. So it is mostly * useful for irq hardware that does not mask cleanly in an * SMP environment. */ for (;;) { spin_unlock(&desc->lock); handle_IRQ_event(irq, regs, action); spin_lock(&desc->lock); if (!(desc->status & IRQ_PENDING)) break; desc->status &= ~IRQ_PENDING; } desc->status &= ~IRQ_INPROGRESS; out: /* * The ->end() handler has to deal with interrupts which got * disabled while the handler was running. */ desc->handler->end(irq); spin_unlock(&desc->lock); if (softirq_pending(cpu)) do_softirq(); return 1; }
void __domain_crash_synchronous(void) { __domain_crash(current->domain); for ( ; ; ) do_softirq(); }