/* * row_dispatch_insert() - move request to dispatch queue * @rd: pointer to struct row_data * @rq: the request to dispatch * * This function moves the given request to the dispatch queue * */ static void row_dispatch_insert(struct row_data *rd, struct request *rq) { struct row_queue *rqueue = RQ_ROWQ(rq); row_remove_request(rd, rq); elv_dispatch_sort(rd->dispatch_queue, rq); if (rq->cmd_flags & REQ_URGENT) { WARN_ON(rd->urgent_in_flight); rd->urgent_in_flight = true; } rqueue->nr_dispatched++; row_clear_rowq_unserved(rd, rqueue->prio); row_log_rowq(rd, rqueue->prio, " Dispatched request %p nr_disp = %d", rq, rqueue->nr_dispatched); if (rqueue->prio < ROWQ_REG_PRIO_IDX) { rd->last_served_ioprio_class = IOPRIO_CLASS_RT; if (row_regular_req_pending(rd)) rd->reg_prio_starvation.starvation_counter++; if (row_low_req_pending(rd)) rd->low_prio_starvation.starvation_counter++; } else if (rqueue->prio < ROWQ_LOW_PRIO_IDX) { rd->last_served_ioprio_class = IOPRIO_CLASS_BE; rd->reg_prio_starvation.starvation_counter = 0; if (row_low_req_pending(rd)) rd->low_prio_starvation.starvation_counter++; } else { rd->last_served_ioprio_class = IOPRIO_CLASS_IDLE; rd->low_prio_starvation.starvation_counter = 0; } }
/* * row_get_ioprio_class_to_serve() - Return the next I/O priority * class to dispatch requests from * @rd: pointer to struct row_data * @force: flag indicating if forced dispatch * * This function returns the next I/O priority class to serve * {IOPRIO_CLASS_NONE, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE}. * If there are no more requests in scheduler or if we're idling on some queue * IOPRIO_CLASS_NONE will be returned. * If idling is scheduled on a lower priority queue than the one that needs * to be served, it will be canceled. * */ static int row_get_ioprio_class_to_serve(struct row_data *rd, int force) { int i; int ret = IOPRIO_CLASS_NONE; if (!rd->nr_reqs[READ] && !rd->nr_reqs[WRITE]) { row_log(rd->dispatch_queue, "No more requests in scheduler"); goto check_idling; } /* First, go over the high priority queues */ for (i = 0; i < ROWQ_REG_PRIO_IDX; i++) { if (!list_empty(&rd->row_queues[i].fifo)) { if (hrtimer_active(&rd->rd_idle_data.hr_timer)) { if (hrtimer_try_to_cancel( &rd->rd_idle_data.hr_timer) >= 0) { row_log(rd->dispatch_queue, "Canceling delayed work on %d. RT pending", rd->rd_idle_data.idling_queue_idx); rd->rd_idle_data.idling_queue_idx = ROWQ_MAX_PRIO; } } if (row_regular_req_pending(rd) && (rd->reg_prio_starvation.starvation_counter >= rd->reg_prio_starvation.starvation_limit)) ret = IOPRIO_CLASS_BE; else if (row_low_req_pending(rd) && (rd->low_prio_starvation.starvation_counter >= rd->low_prio_starvation.starvation_limit)) ret = IOPRIO_CLASS_IDLE; else ret = IOPRIO_CLASS_RT; goto done; } } /* * At the moment idling is implemented only for READ queues. * If enabled on WRITE, this needs updating */ if (hrtimer_active(&rd->rd_idle_data.hr_timer)) { row_log(rd->dispatch_queue, "Delayed work pending. Exiting"); goto done; } check_idling: /* Check for (high priority) idling and enable if needed */ for (i = 0; i < ROWQ_REG_PRIO_IDX && !force; i++) { if (rd->row_queues[i].idle_data.begin_idling && row_queues_def[i].idling_enabled) goto initiate_idling; } /* Regular priority queues */ for (i = ROWQ_REG_PRIO_IDX; i < ROWQ_LOW_PRIO_IDX; i++) { if (list_empty(&rd->row_queues[i].fifo)) { /* We can idle only if this is not a forced dispatch */ if (rd->row_queues[i].idle_data.begin_idling && !force && row_queues_def[i].idling_enabled) goto initiate_idling; } else { if (row_low_req_pending(rd) && (rd->low_prio_starvation.starvation_counter >= rd->low_prio_starvation.starvation_limit)) ret = IOPRIO_CLASS_IDLE; else ret = IOPRIO_CLASS_BE; goto done; } } if (rd->nr_reqs[READ] || rd->nr_reqs[WRITE]) ret = IOPRIO_CLASS_IDLE; goto done; initiate_idling: hrtimer_start(&rd->rd_idle_data.hr_timer, ktime_set(0, rd->rd_idle_data.idle_time_ms * NSEC_PER_MSEC), HRTIMER_MODE_REL); rd->rd_idle_data.idling_queue_idx = i; row_log_rowq(rd, i, "Scheduled delayed work on %d. exiting", i); done: return ret; }
static int row_get_ioprio_class_to_serve(struct row_data *rd, int force) { int i; int ret = IOPRIO_CLASS_NONE; if (!rd->nr_reqs[READ] && !rd->nr_reqs[WRITE]) { row_log(rd->dispatch_queue, "No more requests in scheduler"); goto check_idling; } for (i = 0; i < ROWQ_REG_PRIO_IDX; i++) { if (!list_empty(&rd->row_queues[i].fifo)) { if (hrtimer_active(&rd->rd_idle_data.hr_timer)) { if (hrtimer_try_to_cancel( &rd->rd_idle_data.hr_timer) >= 0) { row_log(rd->dispatch_queue, "Canceling delayed work on %d. RT pending", rd->rd_idle_data.idling_queue_idx); rd->rd_idle_data.idling_queue_idx = ROWQ_MAX_PRIO; } } if (row_regular_req_pending(rd) && (rd->reg_prio_starvation.starvation_counter >= rd->reg_prio_starvation.starvation_limit)) ret = IOPRIO_CLASS_BE; else if (row_low_req_pending(rd) && (rd->low_prio_starvation.starvation_counter >= rd->low_prio_starvation.starvation_limit)) ret = IOPRIO_CLASS_IDLE; else ret = IOPRIO_CLASS_RT; goto done; } } if (hrtimer_active(&rd->rd_idle_data.hr_timer)) { row_log(rd->dispatch_queue, "Delayed work pending. Exiting"); goto done; } check_idling: for (i = 0; i < ROWQ_REG_PRIO_IDX && !force; i++) { if (rd->row_queues[i].idle_data.begin_idling && row_queues_def[i].idling_enabled) goto initiate_idling; } for (i = ROWQ_REG_PRIO_IDX; i < ROWQ_LOW_PRIO_IDX; i++) { if (list_empty(&rd->row_queues[i].fifo)) { if (rd->row_queues[i].idle_data.begin_idling && !force && row_queues_def[i].idling_enabled) goto initiate_idling; } else { if (row_low_req_pending(rd) && (rd->low_prio_starvation.starvation_counter >= rd->low_prio_starvation.starvation_limit)) ret = IOPRIO_CLASS_IDLE; else ret = IOPRIO_CLASS_BE; goto done; } } if (rd->nr_reqs[READ] || rd->nr_reqs[WRITE]) ret = IOPRIO_CLASS_IDLE; goto done; initiate_idling: hrtimer_start(&rd->rd_idle_data.hr_timer, ktime_set(0, rd->rd_idle_data.idle_time_ms * NSEC_PER_MSEC), HRTIMER_MODE_REL); rd->rd_idle_data.idling_queue_idx = i; row_log_rowq(rd, i, "Scheduled delayed work on %d. exiting", i); done: return ret; }