/*
 * row_add_request() - Add request to the scheduler
 * @q:	requests queue
 * @rq:	request to add
 *
 */
static void row_add_request(struct request_queue *q,
			    struct request *rq)
{
	struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
	struct row_queue *rqueue = RQ_ROWQ(rq);
	s64 diff_ms;
	bool queue_was_empty = list_empty(&rqueue->fifo);

	list_add_tail(&rq->queuelist, &rqueue->fifo);
	rd->nr_reqs[rq_data_dir(rq)]++;
	rqueue->nr_req++;
	rq_set_fifo_time(rq, jiffies); /* for statistics*/

	if (rq->cmd_flags & REQ_URGENT) {
		WARN_ON(1);
		blk_dump_rq_flags(rq, "");
		rq->cmd_flags &= ~REQ_URGENT;
	}

	if (row_queues_def[rqueue->prio].idling_enabled) {
		if (rd->rd_idle_data.idling_queue_idx == rqueue->prio &&
		    hrtimer_active(&rd->rd_idle_data.hr_timer)) {
			if (hrtimer_try_to_cancel(
				&rd->rd_idle_data.hr_timer) >= 0) {
				row_log_rowq(rd, rqueue->prio,
				    "Canceled delayed work on %d",
				    rd->rd_idle_data.idling_queue_idx);
				rd->rd_idle_data.idling_queue_idx =
					ROWQ_MAX_PRIO;
			}
		}
		diff_ms = ktime_to_ms(ktime_sub(ktime_get(),
				rqueue->idle_data.last_insert_time));
		if (unlikely(diff_ms < 0)) {
			pr_err("%s(): time delta error: diff_ms < 0",
				__func__);
			rqueue->idle_data.begin_idling = false;
			return;
		}
		if (diff_ms < rd->rd_idle_data.freq_ms) {
			rqueue->idle_data.begin_idling = true;
			row_log_rowq(rd, rqueue->prio, "Enable idling");
		} else {
			rqueue->idle_data.begin_idling = false;
			row_log_rowq(rd, rqueue->prio, "Disable idling (%ldms)",
				(long)diff_ms);
		}

		rqueue->idle_data.last_insert_time = ktime_get();
	}
	if (row_queues_def[rqueue->prio].is_urgent &&
	    !rd->pending_urgent_rq && !rd->urgent_in_flight) {
		/* Handle High Priority queues */
		if (rqueue->prio < ROWQ_REG_PRIO_IDX &&
		    rd->last_served_ioprio_class != IOPRIO_CLASS_RT &&
		    queue_was_empty) {
			row_log_rowq(rd, rqueue->prio,
				"added (high prio) urgent request");
			rq->cmd_flags |= REQ_URGENT;
			rd->pending_urgent_rq = rq;
		} else  if (row_rowq_unserved(rd, rqueue->prio)) {
			/* Handle Regular priotity queues */
			row_log_rowq(rd, rqueue->prio,
				"added urgent request (total on queue=%d)",
				rqueue->nr_req);
			rq->cmd_flags |= REQ_URGENT;
			rd->pending_urgent_rq = rq;
		}
	} else
		row_log_rowq(rd, rqueue->prio,
			"added request (total on queue=%d)", rqueue->nr_req);
}
Esempio n. 2
0
/*
 * row_dispatch_requests() - selects the next request to dispatch
 * @q:		requests queue
 * @force:	ignored
 *
 * Return 0 if no requests were moved to the dispatch queue.
 *	  1 otherwise
 *
 */
static int row_dispatch_requests(struct request_queue *q, int force)
{
	struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
	int ret = 0, currq, i;

	currq = rd->curr_queue;

	/*
	 * Find the first unserved queue (with higher priority then currq)
	 * that is not empty
	 */
	for (i = 0; i < currq; i++) {
		if (row_rowq_unserved(rd, i) &&
		    !list_empty(&rd->row_queues[i].rqueue.fifo)) {
			row_log_rowq(rd, currq,
				" Preemting for unserved rowq%d", i);
			rd->curr_queue = i;
			row_dispatch_insert(rd);
			ret = 1;
			goto done;
		}
	}

	if (rd->row_queues[currq].rqueue.nr_dispatched >=
	    rd->row_queues[currq].disp_quantum) {
		rd->row_queues[currq].rqueue.nr_dispatched = 0;
		row_log_rowq(rd, currq, "Expiring rqueue");
		ret = row_choose_queue(rd);
		if (ret)
			row_dispatch_insert(rd);
		goto done;
	}

	/* Dispatch from curr_queue */
	if (list_empty(&rd->row_queues[currq].rqueue.fifo)) {
		/* check idling */
		if (delayed_work_pending(&rd->read_idle.idle_work)) {
			if (force) {
				(void)cancel_delayed_work(
				&rd->read_idle.idle_work);
				row_log_rowq(rd, currq,
					"Canceled delayed work - forced dispatch");
			} else {
				row_log_rowq(rd, currq,
						 "Delayed work pending. Exiting");
				goto done;
			}
		}

		if (!force && queue_idling_enabled[currq] &&
		    rd->row_queues[currq].rqueue.idle_data.begin_idling) {
			if (!queue_delayed_work(rd->read_idle.idle_workqueue,
						&rd->read_idle.idle_work,
						rd->read_idle.idle_time)) {
				row_log_rowq(rd, currq,
					     "Work already on queue!");
				pr_err("ROW_BUG: Work already on queue!");
			} else
				row_log_rowq(rd, currq,
				     "Scheduled delayed work. exiting");
			goto done;
		} else {
			row_log_rowq(rd, currq,
				     "Currq empty. Choose next queue");
			ret = row_choose_queue(rd);
			if (!ret)
				goto done;
		}
	}

	ret = 1;
	row_dispatch_insert(rd);

done:
	return ret;
}
/*
 * row_get_ioprio_class_to_serve() - Return the next I/O priority
 *				      class to dispatch requests from
 * @rd:	pointer to struct row_data
 * @force:	flag indicating if forced dispatch
 *
 * This function returns the next I/O priority class to serve
 * {IOPRIO_CLASS_NONE, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE}.
 * If there are no more requests in scheduler or if we're idling on some queue
 * IOPRIO_CLASS_NONE will be returned.
 * If idling is scheduled on a lower priority queue than the one that needs
 * to be served, it will be canceled.
 *
 */
static int row_get_ioprio_class_to_serve(struct row_data *rd, int force)
{
	int i;
	int ret = IOPRIO_CLASS_NONE;

	if (!rd->nr_reqs[READ] && !rd->nr_reqs[WRITE]) {
		row_log(rd->dispatch_queue, "No more requests in scheduler");
		goto check_idling;
	}

	/* First, go over the high priority queues */
	for (i = 0; i < ROWQ_REG_PRIO_IDX; i++) {
		if (!list_empty(&rd->row_queues[i].fifo)) {
			if (hrtimer_active(&rd->rd_idle_data.hr_timer)) {
				if (hrtimer_try_to_cancel(
					&rd->rd_idle_data.hr_timer) >= 0) {
					row_log(rd->dispatch_queue,
					"Canceling delayed work on %d. RT pending",
					     rd->rd_idle_data.idling_queue_idx);
					rd->rd_idle_data.idling_queue_idx =
						ROWQ_MAX_PRIO;
				}
			}

			if (row_regular_req_pending(rd) &&
			    (rd->reg_prio_starvation.starvation_counter >=
			     rd->reg_prio_starvation.starvation_limit))
				ret = IOPRIO_CLASS_BE;
			else if (row_low_req_pending(rd) &&
			    (rd->low_prio_starvation.starvation_counter >=
			     rd->low_prio_starvation.starvation_limit))
				ret = IOPRIO_CLASS_IDLE;
			else
				ret = IOPRIO_CLASS_RT;

			goto done;
		}
	}

	/*
	 * At the moment idling is implemented only for READ queues.
	 * If enabled on WRITE, this needs updating
	 */
	if (hrtimer_active(&rd->rd_idle_data.hr_timer)) {
		row_log(rd->dispatch_queue, "Delayed work pending. Exiting");
		goto done;
	}
check_idling:
	/* Check for (high priority) idling and enable if needed */
	for (i = 0; i < ROWQ_REG_PRIO_IDX && !force; i++) {
		if (rd->row_queues[i].idle_data.begin_idling &&
		    row_queues_def[i].idling_enabled)
			goto initiate_idling;
	}

	/* Regular priority queues */
	for (i = ROWQ_REG_PRIO_IDX; i < ROWQ_LOW_PRIO_IDX; i++) {
		if (list_empty(&rd->row_queues[i].fifo)) {
			/* We can idle only if this is not a forced dispatch */
			if (rd->row_queues[i].idle_data.begin_idling &&
			    !force && row_queues_def[i].idling_enabled)
				goto initiate_idling;
		} else {
			if (row_low_req_pending(rd) &&
			    (rd->low_prio_starvation.starvation_counter >=
			     rd->low_prio_starvation.starvation_limit))
				ret = IOPRIO_CLASS_IDLE;
			else
				ret = IOPRIO_CLASS_BE;
			goto done;
		}
	}

	if (rd->nr_reqs[READ] || rd->nr_reqs[WRITE])
		ret = IOPRIO_CLASS_IDLE;
	goto done;

initiate_idling:
	hrtimer_start(&rd->rd_idle_data.hr_timer,
		ktime_set(0, rd->rd_idle_data.idle_time_ms * NSEC_PER_MSEC),
		HRTIMER_MODE_REL);

	rd->rd_idle_data.idling_queue_idx = i;
	row_log_rowq(rd, i, "Scheduled delayed work on %d. exiting", i);

done:
	return ret;
}
Esempio n. 4
0
static int row_get_ioprio_class_to_serve(struct row_data *rd, int force)
{
	int i;
	int ret = IOPRIO_CLASS_NONE;

	if (!rd->nr_reqs[READ] && !rd->nr_reqs[WRITE]) {
		row_log(rd->dispatch_queue, "No more requests in scheduler");
		goto check_idling;
	}

	
	for (i = 0; i < ROWQ_REG_PRIO_IDX; i++) {
		if (!list_empty(&rd->row_queues[i].fifo)) {
			if (hrtimer_active(&rd->rd_idle_data.hr_timer)) {
				if (hrtimer_try_to_cancel(
					&rd->rd_idle_data.hr_timer) >= 0) {
					row_log(rd->dispatch_queue,
					"Canceling delayed work on %d. RT pending",
					     rd->rd_idle_data.idling_queue_idx);
					rd->rd_idle_data.idling_queue_idx =
						ROWQ_MAX_PRIO;
				}
			}

			if (row_regular_req_pending(rd) &&
			    (rd->reg_prio_starvation.starvation_counter >=
			     rd->reg_prio_starvation.starvation_limit))
				ret = IOPRIO_CLASS_BE;
			else if (row_low_req_pending(rd) &&
			    (rd->low_prio_starvation.starvation_counter >=
			     rd->low_prio_starvation.starvation_limit))
				ret = IOPRIO_CLASS_IDLE;
			else
				ret = IOPRIO_CLASS_RT;

			goto done;
		}
	}

	if (hrtimer_active(&rd->rd_idle_data.hr_timer)) {
		row_log(rd->dispatch_queue, "Delayed work pending. Exiting");
		goto done;
	}
check_idling:
	
	for (i = 0; i < ROWQ_REG_PRIO_IDX && !force; i++) {
		if (rd->row_queues[i].idle_data.begin_idling &&
		    row_queues_def[i].idling_enabled)
			goto initiate_idling;
	}

	
	for (i = ROWQ_REG_PRIO_IDX; i < ROWQ_LOW_PRIO_IDX; i++) {
		if (list_empty(&rd->row_queues[i].fifo)) {
			
			if (rd->row_queues[i].idle_data.begin_idling &&
			    !force && row_queues_def[i].idling_enabled)
				goto initiate_idling;
		} else {
			if (row_low_req_pending(rd) &&
			    (rd->low_prio_starvation.starvation_counter >=
			     rd->low_prio_starvation.starvation_limit))
				ret = IOPRIO_CLASS_IDLE;
			else
				ret = IOPRIO_CLASS_BE;
			goto done;
		}
	}

	if (rd->nr_reqs[READ] || rd->nr_reqs[WRITE])
		ret = IOPRIO_CLASS_IDLE;
	goto done;

initiate_idling:
	hrtimer_start(&rd->rd_idle_data.hr_timer,
		ktime_set(0, rd->rd_idle_data.idle_time_ms * NSEC_PER_MSEC),
		HRTIMER_MODE_REL);

	rd->rd_idle_data.idling_queue_idx = i;
	row_log_rowq(rd, i, "Scheduled delayed work on %d. exiting", i);

done:
	return ret;
}