static void tasklet_hi_action(struct softirq_action *a) { int cpu = smp_processor_id(); struct tasklet_struct *list; local_irq_disable(); list = tasklet_hi_vec[cpu].list; tasklet_hi_vec[cpu].list = NULL; local_irq_enable(); while (list) { struct tasklet_struct *t = list; list = list->next; if (tasklet_trylock(t)) { if (!atomic_read(&t->count)) { if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) BUG(); t->func(t->data); tasklet_unlock(t); continue; } tasklet_unlock(t); } local_irq_disable(); t->next = tasklet_hi_vec[cpu].list; tasklet_hi_vec[cpu].list = t; __cpu_raise_softirq(cpu, HI_SOFTIRQ); local_irq_enable(); } }
static void tasklet_hi_action(struct softirq_action *a) { struct tasklet_struct *list; local_irq_disable(); list = __this_cpu_read(tasklet_hi_vec.head); __this_cpu_write(tasklet_hi_vec.head, NULL); __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head); local_irq_enable(); while (list) { struct tasklet_struct *t = list; list = list->next; if (tasklet_trylock(t)) { if (!atomic_read(&t->count)) { if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) BUG(); t->func(t->data); tasklet_unlock(t); continue; } tasklet_unlock(t); } local_irq_disable(); t->next = NULL; *__this_cpu_read(tasklet_hi_vec.tail) = t; __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); __raise_softirq_irqoff(HI_SOFTIRQ); local_irq_enable(); } }
static void tasklet_hi_action(struct softirq_action *a) { struct tasklet_struct *list; ddekit_lock_lock(&tasklet_hi_vec.lock); list = tasklet_hi_vec.list; tasklet_hi_vec.list = NULL; ddekit_lock_unlock(&tasklet_hi_vec.lock); while (list) { struct tasklet_struct *t = list; list = list->next; if (tasklet_trylock(t)) { if (!atomic_read(&t->count)) { if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) BUG(); t->func(t->data); tasklet_unlock(t); continue; } tasklet_unlock(t); } ddekit_lock_lock(&tasklet_hi_vec.lock); t->next = tasklet_hi_vec.list; tasklet_hi_vec.list = t; raise_softirq_irqoff_cpu(HI_SOFTIRQ, 0); ddekit_lock_unlock(&tasklet_hi_vec.lock); } }
static void tasklet_hi_action(struct softirq_action *a) { struct tasklet_struct *list; local_irq_disable(); list = __this_cpu_read(tasklet_hi_vec.head); __this_cpu_write(tasklet_hi_vec.head, NULL); __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head); local_irq_enable(); while (list) { struct tasklet_struct *t = list; list = list->next; if (tasklet_trylock(t)) { if (!atomic_read(&t->count)) { #ifdef CONFIG_SEC_DEBUG_RT_THROTTLE_ACTIVE unsigned long long start_time, end_time; start_time = sec_debug_clock(); #endif if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) BUG(); sec_debug_softirq_log(9998, t->func, 7); t->func(t->data); sec_debug_softirq_log(9998, t->func, 8); #ifdef CONFIG_SEC_DEBUG_RT_THROTTLE_ACTIVE end_time = sec_debug_clock(); if (start_time + 950000000 < end_time) { sec_debug_aux_log(SEC_DEBUG_AUXLOG_IRQ, "TH:%llu %pf", start_time, t->func); } #endif tasklet_unlock(t); continue; } tasklet_unlock(t); } local_irq_disable(); t->next = NULL; *__this_cpu_read(tasklet_hi_vec.tail) = t; __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); __raise_softirq_irqoff(HI_SOFTIRQ); local_irq_enable(); } }
static void tasklet_action(struct softirq_action *a) { struct tasklet_struct *list; local_irq_disable(); list = __this_cpu_read(tasklet_vec.head); __this_cpu_write(tasklet_vec.head, NULL); __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head); local_irq_enable(); while (list) { struct tasklet_struct *t = list; list = list->next; if (tasklet_trylock(t)) { if (!atomic_read(&t->count)) { if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) BUG(); #ifdef CONFIG_SEC_DEBUG sec_debug_irq_sched_log(-1, t->func, 3); t->func(t->data); sec_debug_irq_sched_log(-1, t->func, 4); #else t->func(t->data); #endif tasklet_unlock(t); continue; } tasklet_unlock(t); } local_irq_disable(); t->next = NULL; *__this_cpu_read(tasklet_vec.tail) = t; __this_cpu_write(tasklet_vec.tail, &(t->next)); __raise_softirq_irqoff(TASKLET_SOFTIRQ); local_irq_enable(); } }
static void tasklet_hi_action(struct softirq_action *a) { struct tasklet_struct *list; local_irq_disable(); list = __get_cpu_var(tasklet_hi_vec).list; __get_cpu_var(tasklet_hi_vec).list = NULL; local_irq_enable(); while (list) { struct tasklet_struct *t = list; list = list->next; if (tasklet_trylock(t)) { if (!atomic_read(&t->count)) { if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) BUG(); trace_kernel_tasklet_entry( LTTNG_HIGH, t->func, t->data); t->func(t->data); trace_kernel_tasklet_exit( LTTNG_HIGH, t->func, t->data); tasklet_unlock(t); continue; } tasklet_unlock(t); } local_irq_disable(); t->next = __get_cpu_var(tasklet_hi_vec).list; __get_cpu_var(tasklet_hi_vec).list = t; __raise_softirq_irqoff(HI_SOFTIRQ); local_irq_enable(); } }
/** * ipath_no_bufs_available - tell the layer driver we need buffers * @qp: the QP that caused the problem * @dev: the device we ran out of buffers on * * Called when we run out of PIO buffers. */ void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev) { unsigned long flags; spin_lock_irqsave(&dev->pending_lock, flags); if (list_empty(&qp->piowait)) list_add_tail(&qp->piowait, &dev->piowait); spin_unlock_irqrestore(&dev->pending_lock, flags); /* * Note that as soon as ipath_layer_want_buffer() is called and * possibly before it returns, ipath_ib_piobufavail() * could be called. If we are still in the tasklet function, * tasklet_hi_schedule() will not call us until the next time * tasklet_hi_schedule() is called. * We clear the tasklet flag now since we are committing to return * from the tasklet function. */ clear_bit(IPATH_S_BUSY, &qp->s_flags); tasklet_unlock(&qp->s_task); ipath_layer_want_buffer(dev->dd); dev->n_piowait++; }