Esempio n. 1
0
/*
 * row_dispatch_requests() - selects the next request to dispatch
 * @q:		requests queue
 * @force:		flag indicating if forced dispatch
 *
 * Return 0 if no requests were moved to the dispatch queue.
 *	  1 otherwise
 *
 */
static int row_dispatch_requests(struct request_queue *q, int force)
{
	struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
	int ret = 0, currq, ioprio_class_to_serve, start_idx, end_idx;

	if (force && hrtimer_active(&rd->rd_idle_data.hr_timer)) {
		if (hrtimer_try_to_cancel(&rd->rd_idle_data.hr_timer) >= 0) {
			row_log(rd->dispatch_queue,
				"Canceled delayed work on %d - forced dispatch",
				rd->rd_idle_data.idling_queue_idx);
			rd->rd_idle_data.idling_queue_idx = ROWQ_MAX_PRIO;
		}
	}

	if (rd->pending_urgent_rq) {
		row_log(rd->dispatch_queue, "dispatching urgent request");
		row_dispatch_insert(rd, rd->pending_urgent_rq);
		ret = 1;
		goto done;
	}

	ioprio_class_to_serve = row_get_ioprio_class_to_serve(rd, force);
	row_log(rd->dispatch_queue, "Dispatching from %d priority class",
		ioprio_class_to_serve);

	switch (ioprio_class_to_serve) {
	case IOPRIO_CLASS_NONE:
		rd->last_served_ioprio_class = IOPRIO_CLASS_NONE;
		goto done;
	case IOPRIO_CLASS_RT:
		start_idx = ROWQ_HIGH_PRIO_IDX;
		end_idx = ROWQ_REG_PRIO_IDX;
		break;
	case IOPRIO_CLASS_BE:
		start_idx = ROWQ_REG_PRIO_IDX;
		end_idx = ROWQ_LOW_PRIO_IDX;
		break;
	case IOPRIO_CLASS_IDLE:
		start_idx = ROWQ_LOW_PRIO_IDX;
		end_idx = ROWQ_MAX_PRIO;
		break;
	default:
		pr_err("%s(): Invalid I/O priority class", __func__);
		goto done;
	}

	currq = row_get_next_queue(q, rd, start_idx, end_idx);

	/* Dispatch */
	if (currq >= 0) {
		row_dispatch_insert(rd,
			rq_entry_fifo(rd->row_queues[currq].fifo.next));
		ret = 1;
	}
done:
	return ret;
}
Esempio n. 2
0
/*
 * row_dispatch_requests() - selects the next request to dispatch
 * @q:		requests queue
 * @force:	ignored
 *
 * Return 0 if no requests were moved to the dispatch queue.
 *	  1 otherwise
 *
 */
static int row_dispatch_requests(struct request_queue *q, int force)
{
	struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
	int ret = 0, currq, i;

	currq = rd->curr_queue;

	/*
	 * Find the first unserved queue (with higher priority then currq)
	 * that is not empty
	 */
	for (i = 0; i < currq; i++) {
		if (row_rowq_unserved(rd, i) &&
		    !list_empty(&rd->row_queues[i].rqueue.fifo)) {
			row_log_rowq(rd, currq,
				" Preemting for unserved rowq%d", i);
			rd->curr_queue = i;
			row_dispatch_insert(rd);
			ret = 1;
			goto done;
		}
	}

	if (rd->row_queues[currq].rqueue.nr_dispatched >=
	    rd->row_queues[currq].disp_quantum) {
		rd->row_queues[currq].rqueue.nr_dispatched = 0;
		row_log_rowq(rd, currq, "Expiring rqueue");
		ret = row_choose_queue(rd);
		if (ret)
			row_dispatch_insert(rd);
		goto done;
	}

	/* Dispatch from curr_queue */
	if (list_empty(&rd->row_queues[currq].rqueue.fifo)) {
		/* check idling */
		if (delayed_work_pending(&rd->read_idle.idle_work)) {
			row_log_rowq(rd, currq,
				     "Delayed work pending. Exiting");
			goto done;
		}

		if (queue_idling_enabled[currq] &&
		    rd->row_queues[currq].rqueue.idle_data.begin_idling) {
			if (!kblockd_schedule_delayed_work(rd->dispatch_queue,
			     &rd->read_idle.idle_work, jiffies +
			     msecs_to_jiffies(rd->read_idle.idle_time))) {
				row_log_rowq(rd, currq,
					     "Work already on queue!");
				pr_err("ROW_BUG: Work already on queue!");
			} else
				row_log_rowq(rd, currq,
				     "Scheduled delayed work. exiting");
			goto done;
		} else {
			row_log_rowq(rd, currq,
				     "Currq empty. Choose next queue");
			ret = row_choose_queue(rd);
			if (!ret)
				goto done;
		}
	}

	ret = 1;
	row_dispatch_insert(rd);

done:
	return ret;
}
Esempio n. 3
0
/*
 * row_dispatch_requests() - selects the next request to dispatch
 * @q:		requests queue
 * @force:	ignored
 *
 * Return 0 if no requests were moved to the dispatch queue.
 *	  1 otherwise
 *
 */
static int row_dispatch_requests(struct request_queue *q, int force)
{
    struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
    int ret = 0, currq, i;

    currq = rd->curr_queue;

    /*
     * Find the first unserved queue (with higher priority then currq)
     * that is not empty
     */
    for (i = 0; i < currq; i++) {
        if (row_rowq_unserved(rd, i) &&
                !list_empty(&rd->row_queues[i].rqueue.fifo)) {
            rd->curr_queue = i;
            row_dispatch_insert(rd);
            ret = 1;
            goto done;
        }
    }

    if (rd->row_queues[currq].rqueue.nr_dispatched >=
            rd->row_queues[currq].disp_quantum) {
        rd->row_queues[currq].rqueue.nr_dispatched = 0;
        ret = row_choose_queue(rd);
        if (ret)
            row_dispatch_insert(rd);
        goto done;
    }

    /* Dispatch from curr_queue */
    if (list_empty(&rd->row_queues[currq].rqueue.fifo)) {
        /* check idling */
        if (delayed_work_pending(&rd->read_idle.idle_work)) {
            if (force) {
                (void)cancel_delayed_work(
                    &rd->read_idle.idle_work);
            } else {
                goto done;
            }
        }

        if (!force && queue_idling_enabled[currq] &&
                rd->row_queues[currq].rqueue.idle_data.begin_idling) {
            if (!queue_delayed_work(rd->read_idle.idle_workqueue,
                                    &rd->read_idle.idle_work,
                                    rd->read_idle.idle_time)) {
            } else
                goto done;
        } else {
            ret = row_choose_queue(rd);
            if (!ret)
                goto done;
        }
    }

    ret = 1;
    row_dispatch_insert(rd);

done:
    return ret;
}