Ejemplo n.º 1
0
/*
 * row_dispatch_requests() - selects the next request to dispatch
 * @q:		requests queue
 * @force:	ignored
 *
 * Return 0 if no requests were moved to the dispatch queue.
 *	  1 otherwise
 *
 */
static int row_dispatch_requests(struct request_queue *q, int force)
{
	struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
	int ret = 0, currq, i;

	currq = rd->curr_queue;

	/*
	 * Find the first unserved queue (with higher priority then currq)
	 * that is not empty
	 */
	for (i = 0; i < currq; i++) {
		if (row_rowq_unserved(rd, i) &&
		    !list_empty(&rd->row_queues[i].rqueue.fifo)) {
			row_log_rowq(rd, currq,
				" Preemting for unserved rowq%d", i);
			rd->curr_queue = i;
			row_dispatch_insert(rd);
			ret = 1;
			goto done;
		}
	}

	if (rd->row_queues[currq].rqueue.nr_dispatched >=
	    rd->row_queues[currq].disp_quantum) {
		rd->row_queues[currq].rqueue.nr_dispatched = 0;
		row_log_rowq(rd, currq, "Expiring rqueue");
		ret = row_choose_queue(rd);
		if (ret)
			row_dispatch_insert(rd);
		goto done;
	}

	/* Dispatch from curr_queue */
	if (list_empty(&rd->row_queues[currq].rqueue.fifo)) {
		/* check idling */
		if (delayed_work_pending(&rd->read_idle.idle_work)) {
			row_log_rowq(rd, currq,
				     "Delayed work pending. Exiting");
			goto done;
		}

		if (queue_idling_enabled[currq] &&
		    rd->row_queues[currq].rqueue.idle_data.begin_idling) {
			if (!kblockd_schedule_delayed_work(rd->dispatch_queue,
			     &rd->read_idle.idle_work, jiffies +
			     msecs_to_jiffies(rd->read_idle.idle_time))) {
				row_log_rowq(rd, currq,
					     "Work already on queue!");
				pr_err("ROW_BUG: Work already on queue!");
			} else
				row_log_rowq(rd, currq,
				     "Scheduled delayed work. exiting");
			goto done;
		} else {
			row_log_rowq(rd, currq,
				     "Currq empty. Choose next queue");
			ret = row_choose_queue(rd);
			if (!ret)
				goto done;
		}
	}

	ret = 1;
	row_dispatch_insert(rd);

done:
	return ret;
}
Ejemplo n.º 2
0
/*
 * row_dispatch_requests() - selects the next request to dispatch
 * @q:		requests queue
 * @force:	ignored
 *
 * Return 0 if no requests were moved to the dispatch queue.
 *	  1 otherwise
 *
 */
static int row_dispatch_requests(struct request_queue *q, int force)
{
    struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
    int ret = 0, currq, i;

    currq = rd->curr_queue;

    /*
     * Find the first unserved queue (with higher priority then currq)
     * that is not empty
     */
    for (i = 0; i < currq; i++) {
        if (row_rowq_unserved(rd, i) &&
                !list_empty(&rd->row_queues[i].rqueue.fifo)) {
            rd->curr_queue = i;
            row_dispatch_insert(rd);
            ret = 1;
            goto done;
        }
    }

    if (rd->row_queues[currq].rqueue.nr_dispatched >=
            rd->row_queues[currq].disp_quantum) {
        rd->row_queues[currq].rqueue.nr_dispatched = 0;
        ret = row_choose_queue(rd);
        if (ret)
            row_dispatch_insert(rd);
        goto done;
    }

    /* Dispatch from curr_queue */
    if (list_empty(&rd->row_queues[currq].rqueue.fifo)) {
        /* check idling */
        if (delayed_work_pending(&rd->read_idle.idle_work)) {
            if (force) {
                (void)cancel_delayed_work(
                    &rd->read_idle.idle_work);
            } else {
                goto done;
            }
        }

        if (!force && queue_idling_enabled[currq] &&
                rd->row_queues[currq].rqueue.idle_data.begin_idling) {
            if (!queue_delayed_work(rd->read_idle.idle_workqueue,
                                    &rd->read_idle.idle_work,
                                    rd->read_idle.idle_time)) {
            } else
                goto done;
        } else {
            ret = row_choose_queue(rd);
            if (!ret)
                goto done;
        }
    }

    ret = 1;
    row_dispatch_insert(rd);

done:
    return ret;
}