/* * Enqueue the irq_work @entry, returns true on success, failure when the * @entry was already enqueued by someone else. * * Can be re-enqueued while the callback is still in progress. */ bool irq_work_queue(struct irq_work *work) { if (!irq_work_claim(work)) { /* * Already enqueued, can't do! */ return false; } __irq_work_queue(work); return true; }
/* * Enqueue the irq_work @work on @cpu unless it's already pending * somewhere. * * Can be re-enqueued while the callback is still in progress. */ bool irq_work_queue_on(struct irq_work *work, int cpu) { /* All work should have been flushed before going offline */ WARN_ON_ONCE(cpu_is_offline(cpu)); /* Arch remote IPI send/receive backend aren't NMI safe */ WARN_ON_ONCE(in_nmi()); /* Only queue if not already pending */ if (!irq_work_claim(work)) return false; if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) arch_send_call_function_single_ipi(cpu); return true; }
/* Enqueue the irq work @work on the current CPU */ void irq_work_queue(struct irq_work *work) { /* Only queue if not already pending */ if (!irq_work_claim(work)) return; /* Queue the entry and raise the IPI if needed. */ preempt_disable(); /* If the work is "lazy", handle it from next tick if any */ if (work->flags & IRQ_WORK_LAZY) { if (llist_add(&work->llnode, &__get_cpu_var(lazy_list)) && tick_nohz_tick_stopped()) arch_irq_work_raise(); } else { if (llist_add(&work->llnode, &__get_cpu_var(raised_list))) arch_irq_work_raise(); } preempt_enable(); }
/* * Enqueue the irq_work @entry unless it's already pending * somewhere. * * Can be re-enqueued while the callback is still in progress. */ void irq_work_queue(struct irq_work *work) { /* Only queue if not already pending */ if (!irq_work_claim(work)) return; /* Queue the entry and raise the IPI if needed. */ preempt_disable(); llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); /* * If the work is not "lazy" or the tick is stopped, raise the irq * work interrupt (if supported by the arch), otherwise, just wait * for the next tick. */ if (!(work->flags & IRQ_WORK_LAZY) || tick_nohz_tick_stopped()) { if (!this_cpu_cmpxchg(irq_work_raised, 0, 1)) arch_irq_work_raise(); } preempt_enable(); }