static void blk_iopoll_softirq(struct softirq_action *h) { struct list_head *list = &__get_cpu_var(blk_cpu_iopoll); int rearm = 0, budget = blk_iopoll_budget; unsigned long start_time = jiffies; local_irq_disable(); while (!list_empty(list)) { struct blk_iopoll *iop; int work, weight; /* * If softirq window is exhausted then punt. */ if (budget <= 0 || time_after(jiffies, start_time)) { rearm = 1; break; } local_irq_enable(); /* Even though interrupts have been re-enabled, this * access is safe because interrupts can only add new * entries to the tail of this list, and only ->poll() * calls can remove this head entry from the list. */ iop = list_entry(list->next, struct blk_iopoll, list); weight = iop->weight; work = 0; if (test_bit(IOPOLL_F_SCHED, &iop->state)) work = iop->poll(iop, weight); budget -= work; local_irq_disable(); /* * Drivers must not modify the iopoll state, if they * consume their assigned weight (or more, some drivers can't * easily just stop processing, they have to complete an * entire mask of commands).In such cases this code * still "owns" the iopoll instance and therefore can * move the instance around on the list at-will. */ if (work >= weight) { if (blk_iopoll_disable_pending(iop)) __blk_iopoll_complete(iop); else list_move_tail(&iop->list, list); } } if (rearm) __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); local_irq_enable(); preempt_check_resched_rt(); }
/** * blk_iopoll_sched - Schedule a run of the iopoll handler * @iop: The parent iopoll structure * * Description: * Add this blk_iopoll structure to the pending poll list and trigger the * raise of the blk iopoll softirq. The driver must already have gotten a * successful return from blk_iopoll_sched_prep() before calling this. **/ void blk_iopoll_sched(struct blk_iopoll *iop) { unsigned long flags; local_irq_save(flags); list_add_tail(&iop->list, &__get_cpu_var(blk_cpu_iopoll)); __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); local_irq_restore(flags); preempt_check_resched_rt(); }
void __blk_complete_request(struct request *req) { int ccpu, cpu; struct request_queue *q = req->q; unsigned long flags; bool shared = false; BUG_ON(!q->softirq_done_fn); local_irq_save(flags); cpu = smp_processor_id(); /* * Select completion CPU */ if (req->cpu != -1) { ccpu = req->cpu; if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) shared = cpus_share_cache(cpu, ccpu); } else ccpu = cpu; /* * If current CPU and requested CPU share a cache, run the softirq on * the current CPU. One might concern this is just like * QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is * running in interrupt handler, and currently I/O controller doesn't * support multiple interrupts, so current CPU is unique actually. This * avoids IPI sending from current CPU to the first CPU of a group. */ if (ccpu == cpu || shared) { struct list_head *list; do_local: list = this_cpu_ptr(&blk_cpu_done); list_add_tail(&req->csd.list, list); /* * if the list only contains our just added request, * signal a raise of the softirq. If there are already * entries there, someone already raised the irq but it * hasn't run yet. */ if (list->next == &req->csd.list) raise_softirq_irqoff(BLOCK_SOFTIRQ); } else if (raise_blk_irq(ccpu, req)) goto do_local; local_irq_restore(flags); preempt_check_resched_rt(); }
static void trigger_softirq(void *data) { struct request *rq = data; unsigned long flags; struct list_head *list; local_irq_save(flags); list = this_cpu_ptr(&blk_cpu_done); list_add_tail(&rq->csd.list, list); if (list->next == &rq->csd.list) raise_softirq_irqoff(BLOCK_SOFTIRQ); local_irq_restore(flags); preempt_check_resched_rt(); }
static int blk_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { /* * If a CPU goes away, splice its entries to the current CPU * and trigger a run of the softirq */ if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { int cpu = (unsigned long) hcpu; local_irq_disable(); list_splice_init(&per_cpu(blk_cpu_done, cpu), this_cpu_ptr(&blk_cpu_done)); raise_softirq_irqoff(BLOCK_SOFTIRQ); local_irq_enable(); preempt_check_resched_rt(); } return NOTIFY_OK; }
void __blk_complete_request(struct request *req) { struct request_queue *q = req->q; unsigned long flags; int ccpu, cpu, group_cpu; BUG_ON(!q->softirq_done_fn); local_irq_save(flags); cpu = smp_processor_id(); group_cpu = blk_cpu_to_group(cpu); /* * Select completion CPU */ if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && req->cpu != -1) ccpu = req->cpu; else ccpu = cpu; if (ccpu == cpu || ccpu == group_cpu) { struct list_head *list; do_local: list = &__get_cpu_var(blk_cpu_done); list_add_tail(&req->csd.list, list); /* * if the list only contains our just added request, * signal a raise of the softirq. If there are already * entries there, someone already raised the irq but it * hasn't run yet. */ if (list->next == &req->csd.list) raise_softirq_irqoff(BLOCK_SOFTIRQ); } else if (raise_blk_irq(ccpu, req)) goto do_local; local_irq_restore(flags); preempt_check_resched_rt(); }