Пример #1
0
/*
 * row_dispatch_insert() - move request to dispatch queue
 * @rd:		pointer to struct row_data
 * @rq:		the request to dispatch
 *
 * This function moves the given request to the dispatch queue
 *
 */
static void row_dispatch_insert(struct row_data *rd, struct request *rq)
{
	struct row_queue *rqueue = RQ_ROWQ(rq);

	row_remove_request(rd, rq);
	elv_dispatch_sort(rd->dispatch_queue, rq);
	if (rq->cmd_flags & REQ_URGENT) {
		WARN_ON(rd->urgent_in_flight);
		rd->urgent_in_flight = true;
	}
	rqueue->nr_dispatched++;
	row_clear_rowq_unserved(rd, rqueue->prio);
	row_log_rowq(rd, rqueue->prio,
		" Dispatched request %p nr_disp = %d", rq,
		rqueue->nr_dispatched);
	if (rqueue->prio < ROWQ_REG_PRIO_IDX) {
		rd->last_served_ioprio_class = IOPRIO_CLASS_RT;
		if (row_regular_req_pending(rd))
			rd->reg_prio_starvation.starvation_counter++;
		if (row_low_req_pending(rd))
			rd->low_prio_starvation.starvation_counter++;
	} else if (rqueue->prio < ROWQ_LOW_PRIO_IDX) {
		rd->last_served_ioprio_class = IOPRIO_CLASS_BE;
		rd->reg_prio_starvation.starvation_counter = 0;
		if (row_low_req_pending(rd))
			rd->low_prio_starvation.starvation_counter++;
	} else {
		rd->last_served_ioprio_class = IOPRIO_CLASS_IDLE;
		rd->low_prio_starvation.starvation_counter = 0;
	}
}
/*
 * row_add_request() - Add request to the scheduler
 * @q:	requests queue
 * @rq:	request to add
 *
 */
static void row_add_request(struct request_queue *q,
			    struct request *rq)
{
	struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
	struct row_queue *rqueue = RQ_ROWQ(rq);

	list_add_tail(&rq->queuelist, &rqueue->fifo);
	rd->nr_reqs[rq_data_dir(rq)]++;
	rq_set_fifo_time(rq, jiffies); /* for statistics*/

	if (queue_idling_enabled[rqueue->prio]) {
		if (delayed_work_pending(&rd->read_idle.idle_work))
			(void)cancel_delayed_work(
				&rd->read_idle.idle_work);
		if (ktime_to_ms(ktime_sub(ktime_get(),
				rqueue->idle_data.last_insert_time)) <
				rd->read_idle.freq) {
			rqueue->idle_data.begin_idling = true;
			row_log_rowq(rd, rqueue->prio, "Enable idling");
		} else {
			rqueue->idle_data.begin_idling = false;
			row_log_rowq(rd, rqueue->prio, "Disable idling");
		}

		rqueue->idle_data.last_insert_time = ktime_get();
	}
	row_log_rowq(rd, rqueue->prio, "added request");
}
Пример #3
0
/*
 * row_merged_requests() - Called when 2 requests are merged
 * @q:		requests queue
 * @rq:		request the two requests were merged into
 * @next:	request that was merged
 */
static void row_merged_requests(struct request_queue *q, struct request *rq,
				 struct request *next)
{
	struct row_queue   *rqueue = RQ_ROWQ(next);

	list_del_init(&next->queuelist);

	rqueue->rdata->nr_reqs[rq_data_dir(rq)]--;
}
Пример #4
0
/**
 * row_remove_request() -  Remove given request from scheduler
 * @q:	requests queue
 * @rq:	request to remove
 *
 */
static void row_remove_request(struct request_queue *q,
			       struct request *rq)
{
	struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
	struct row_queue *rqueue = RQ_ROWQ(rq);

	rq_fifo_clear(rq);
	rqueue->nr_req--;
	rd->nr_reqs[rq_data_dir(rq)]--;
}
Пример #5
0
/**
 * row_remove_request() -  Remove given request from scheduler
 * @q:	requests queue
 * @rq:	request to remove
 *
 */
static void row_remove_request(struct row_data *rd,
			       struct request *rq)
{
	struct row_queue *rqueue = RQ_ROWQ(rq);

	list_del_init(&(rq)->queuelist);
	if (rd->pending_urgent_rq == rq)
		rd->pending_urgent_rq = NULL;
	else
		BUG_ON(rq->cmd_flags & REQ_URGENT);
	rqueue->nr_req--;
	rd->nr_reqs[rq_data_dir(rq)]--;
}
Пример #6
0
/*
 * row_add_request() - Add request to the scheduler
 * @q:	requests queue
 * @rq:	request to add
 *
 */
static void row_add_request(struct request_queue *q,
			    struct request *rq)
{
	struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
	struct row_queue *rqueue = RQ_ROWQ(rq);
	s64 diff_ms;

	list_add_tail(&rq->queuelist, &rqueue->fifo);
	rd->nr_reqs[rq_data_dir(rq)]++;
	rqueue->nr_req++;
	rq_set_fifo_time(rq, jiffies); /* for statistics*/

	if (row_queues_def[rqueue->prio].idling_enabled) {
		if (rd->rd_idle_data.idling_queue_idx == rqueue->prio &&
		    hrtimer_active(&rd->rd_idle_data.hr_timer)) {
			(void)hrtimer_cancel(&rd->rd_idle_data.hr_timer);
			row_log_rowq(rd, rqueue->prio,
				"Canceled delayed work on %d",
				rd->rd_idle_data.idling_queue_idx);
			rd->rd_idle_data.idling_queue_idx = ROWQ_MAX_PRIO;
		}
		diff_ms = ktime_to_ms(ktime_sub(ktime_get(),
				rqueue->idle_data.last_insert_time));
		if (unlikely(diff_ms < 0)) {
			pr_err("ROW BUG: %s diff_ms < 0", __func__);
			rqueue->idle_data.begin_idling = false;
			return;
		}
		if (diff_ms < rd->rd_idle_data.freq_ms) {
			rqueue->idle_data.begin_idling = true;
			row_log_rowq(rd, rqueue->prio, "Enable idling");
		} else {
			rqueue->idle_data.begin_idling = false;
			row_log_rowq(rd, rqueue->prio, "Disable idling (%ldms)",
				(long)diff_ms);
		}

		rqueue->idle_data.last_insert_time = ktime_get();
	}
	if (row_queues_def[rqueue->prio].is_urgent &&
	    row_rowq_unserved(rd, rqueue->prio)) {
		row_log_rowq(rd, rqueue->prio,
			"added urgent request (total on queue=%d)",
			rqueue->nr_req);
		rq->cmd_flags |= REQ_URGENT;
	} else
		row_log_rowq(rd, rqueue->prio,
			"added request (total on queue=%d)", rqueue->nr_req);
}
Пример #7
0
/*
 * row_merged_requests() - Called when 2 requests are merged
 * @q:		requests queue
 * @rq:		request the two requests were merged into
 * @next:	request that was merged
 */
static void row_merged_requests(struct request_queue *q, struct request *rq,
				 struct request *next)
{
	struct row_queue   *rqueue = RQ_ROWQ(next);

	list_del_init(&next->queuelist);
	rqueue->nr_req--;
	if (rqueue->rdata->pending_urgent_rq == next) {
		pr_err("\n\nROW_WARNING: merging pending urgent!");
		rqueue->rdata->pending_urgent_rq = rq;
		rq->cmd_flags |= REQ_URGENT;
		WARN_ON(!(next->cmd_flags & REQ_URGENT));
		next->cmd_flags &= ~REQ_URGENT;
	}
	rqueue->rdata->nr_reqs[rq_data_dir(rq)]--;
}
Пример #8
0
/**
 * row_reinsert_req() - Reinsert request back to the scheduler
 * @q:	requests queue
 * @rq:	request to add
 *
 * Reinsert the given request back to the queue it was
 * dispatched from as if it was never dispatched.
 *
 * Returns 0 on success, error code otherwise
 */
static int row_reinsert_req(struct request_queue *q,
			    struct request *rq)
{
	struct row_data    *rd = q->elevator->elevator_data;
	struct row_queue   *rqueue = RQ_ROWQ(rq);

	if (!rqueue || rqueue->prio >= ROWQ_MAX_PRIO)
		return -EIO;

	list_add(&rq->queuelist, &rqueue->fifo);
	rd->nr_reqs[rq_data_dir(rq)]++;
	rqueue->nr_req++;

	row_log_rowq(rd, rqueue->prio,
		"%s request reinserted (total on queue=%d)",
		(rq_data_dir(rq) == READ ? "READ" : "write"), rqueue->nr_req);

	if (rq->cmd_flags & REQ_URGENT) {
		/*
		 * It's not compliant with the design to re-insert
		 * urgent requests. We want to be able to track this
		 * down.
		 */
		WARN_ON(1);
		if (!rd->urgent_in_flight) {
			pr_err("%s(): no urgent in flight", __func__);
		} else {
			rd->urgent_in_flight = false;
			pr_err("%s(): reinserting URGENT %s req",
				__func__,
				(rq_data_dir(rq) == READ ? "READ" : "WRITE"));
			if (rd->pending_urgent_rq) {
				pr_err("%s(): urgent rq is pending",
					__func__);
				rd->pending_urgent_rq->cmd_flags &= ~REQ_URGENT;
			}
			rd->pending_urgent_rq = rq;
		}
	}
	return 0;
}
/**
 * row_reinsert_req() - Reinsert request back to the scheduler
 * @q:	requests queue
 * @rq:	request to add
 *
 * Reinsert the given request back to the queue it was
 * dispatched from as if it was never dispatched.
 *
 * Returns 0 on success, error code otherwise
 */
static int row_reinsert_req(struct request_queue *q,
			    struct request *rq)
{
	struct row_data    *rd = q->elevator->elevator_data;
	struct row_queue   *rqueue = RQ_ROWQ(rq);

	/* Verify rqueue is legitimate */
	if (rqueue->prio >= ROWQ_MAX_PRIO) {
		pr_err("\n\nROW BUG: row_reinsert_req() rqueue->prio = %d\n",
			   rqueue->prio);
		blk_dump_rq_flags(rq, "");
		return -EIO;
	}

	list_add(&rq->queuelist, &rqueue->fifo);
	rd->nr_reqs[rq_data_dir(rq)]++;

	row_log_rowq(rd, rqueue->prio, "request reinserted");

	return 0;
}
Пример #10
0
/**
 * row_reinsert_req() - Reinsert request back to the scheduler
 * @q:	requests queue
 * @rq:	request to add
 *
 * Reinsert the given request back to the queue it was
 * dispatched from as if it was never dispatched.
 *
 * Returns 0 on success, error code otherwise
 */
static int row_reinsert_req(struct request_queue *q,
			    struct request *rq)
{
	struct row_data    *rd = q->elevator->elevator_data;
	struct row_queue   *rqueue = RQ_ROWQ(rq);

	if (rqueue->prio >= ROWQ_MAX_PRIO) {
		pr_err("\n\n%s:ROW BUG: row_reinsert_req() rqueue->prio = %d\n",
			   rq->rq_disk->disk_name, rqueue->prio);
		blk_dump_rq_flags(rq, "");
		return -EIO;
	}

	list_add(&rq->queuelist, &rqueue->fifo);
	rd->nr_reqs[rq_data_dir(rq)]++;
	rqueue->nr_req++;

	row_log_rowq(rd, rqueue->prio,
		"request reinserted (total on queue=%d)", rqueue->nr_req);

	return 0;
}
Пример #11
0
/*
 * row_add_request() - Add request to the scheduler
 * @q:	requests queue
 * @rq:	request to add
 *
 */
static void row_add_request(struct request_queue *q,
			    struct request *rq)
{
	struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
	struct row_queue *rqueue = RQ_ROWQ(rq);
	s64 diff_ms;
	bool queue_was_empty = list_empty(&rqueue->fifo);

	list_add_tail(&rq->queuelist, &rqueue->fifo);
	rd->nr_reqs[rq_data_dir(rq)]++;
	rqueue->nr_req++;
	rq_set_fifo_time(rq, jiffies); /* for statistics*/

	if (rq->cmd_flags & REQ_URGENT) {
		WARN_ON(1);
		blk_dump_rq_flags(rq, "");
		rq->cmd_flags &= ~REQ_URGENT;
	}

	if (row_queues_def[rqueue->prio].idling_enabled) {
		if (rd->rd_idle_data.idling_queue_idx == rqueue->prio &&
		    hrtimer_active(&rd->rd_idle_data.hr_timer)) {
			if (hrtimer_try_to_cancel(
				&rd->rd_idle_data.hr_timer) >= 0) {
				row_log_rowq(rd, rqueue->prio,
				    "Canceled delayed work on %d",
				    rd->rd_idle_data.idling_queue_idx);
				rd->rd_idle_data.idling_queue_idx =
					ROWQ_MAX_PRIO;
			}
		}
		diff_ms = ktime_to_ms(ktime_sub(ktime_get(),
				rqueue->idle_data.last_insert_time));
		if (unlikely(diff_ms < 0)) {
			pr_err("%s(): time delta error: diff_ms < 0",
				__func__);
			rqueue->idle_data.begin_idling = false;
			return;
		}
		if (diff_ms < rd->rd_idle_data.freq_ms) {
			rqueue->idle_data.begin_idling = true;
			row_log_rowq(rd, rqueue->prio, "Enable idling");
		} else {
			rqueue->idle_data.begin_idling = false;
			row_log_rowq(rd, rqueue->prio, "Disable idling (%ldms)",
				(long)diff_ms);
		}

		rqueue->idle_data.last_insert_time = ktime_get();
	}
	if (row_queues_def[rqueue->prio].is_urgent &&
	    !rd->pending_urgent_rq && !rd->urgent_in_flight) {
		/* Handle High Priority queues */
		if (rqueue->prio < ROWQ_REG_PRIO_IDX &&
		    rd->last_served_ioprio_class != IOPRIO_CLASS_RT &&
		    queue_was_empty) {
			row_log_rowq(rd, rqueue->prio,
				"added (high prio) urgent request");
			rq->cmd_flags |= REQ_URGENT;
			rd->pending_urgent_rq = rq;
		} else  if (row_rowq_unserved(rd, rqueue->prio)) {
			/* Handle Regular priotity queues */
			row_log_rowq(rd, rqueue->prio,
				"added urgent request (total on queue=%d)",
				rqueue->nr_req);
			rq->cmd_flags |= REQ_URGENT;
			rd->pending_urgent_rq = rq;
		}
	} else
		row_log_rowq(rd, rqueue->prio,
			"added request (total on queue=%d)", rqueue->nr_req);
}