Ejemplo n.º 1
0
static int deadline_dispatch_requests(struct request_queue *q, int force)
{
	struct deadline_data *dd = q->elevator->elevator_data;
	const int reads = !list_empty(&dd->fifo_list[READ]);
	const int writes = !list_empty(&dd->fifo_list[WRITE]);
	struct request *rq;
	int data_dir;

	if (dd->next_rq[WRITE])
		rq = dd->next_rq[WRITE];
	else
		rq = dd->next_rq[READ];

	if (rq && dd->batching < dd->fifo_batch)

		goto dispatch_request;


	if (reads) {
		BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));

		if (writes && (dd->starved++ >= dd->writes_starved))
			goto dispatch_writes;

		data_dir = READ;

		goto dispatch_find_request;
	}


	if (writes) {
dispatch_writes:
		BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));

		dd->starved = 0;

		data_dir = WRITE;

		goto dispatch_find_request;
	}

	return 0;

dispatch_find_request:
	if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) {
		rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
	} else {
		rq = dd->next_rq[data_dir];
	}

	dd->batching = 0;

dispatch_request:
	dd->batching++;
	deadline_move_request(dd, rq);

	return 1;
}
Ejemplo n.º 2
0
static void
deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
{
	struct rb_root *root = deadline_rb_root(dd, rq);
	struct request *__alias;

	while (unlikely(__alias = elv_rb_add(root, rq)))
		deadline_move_request(dd, __alias);
}
Ejemplo n.º 3
0
static void
deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
{
    struct rb_root *root = RQ_RB_ROOT(dd, rq);
    struct request *__alias;

retry:
    __alias = elv_rb_add(root, rq);
    if (unlikely(__alias)) {
        deadline_move_request(dd, __alias);
        goto retry;
    }
}
Ejemplo n.º 4
0
void ElvDeadline::deadline_add_rq_rb(request *rq)
{
	Rbtree *tree = RQ_RB_ROOT(rq);
	request *__alias;

retry:
	__alias = elv_rb_add(tree, rq);
	if (__alias) {
		/* TODO: move to dispatch queue */
		assert(0);
		deadline_move_request(__alias);
		goto retry;
	}
}
Ejemplo n.º 5
0
static void
deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
{
	struct deadline_rq *__alias;

	drq->rb_key = rq_rb_key(drq->request);

retry:
	__alias = __deadline_add_drq_rb(dd, drq);
	if (!__alias) {
		rb_insert_color(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
		return;
	}

	deadline_move_request(dd, __alias);
	goto retry;
}
Ejemplo n.º 6
0
/*
 * deadline_dispatch_requests selects the best request according to
 * read/write expire, fifo_batch, etc
 */
static int deadline_dispatch_requests(struct request_queue *q, int force)
{
	struct deadline_data *dd = q->elevator->elevator_data;
	const int reads = !list_empty(&dd->fifo_list[READ]);
	const int writes = !list_empty(&dd->fifo_list[WRITE]);
	struct request *rq;
	int data_dir;

	/*
	 * batches are currently reads XOR writes
	 */
	if (dd->next_rq[WRITE])
		rq = dd->next_rq[WRITE];
	else
		rq = dd->next_rq[READ];

	if (rq && dd->batching < dd->fifo_batch)
		/* we have a next request are still entitled to batch */
		goto dispatch_request;

	/*
	 * at this point we are not running a batch. select the appropriate
	 * data direction (read / write)
	 */

	if (reads) {
		BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));

		if (writes && (dd->starved++ >= dd->writes_starved))
			goto dispatch_writes;

		data_dir = READ;

		goto dispatch_find_request;
	}

	/*
	 * there are either no reads or writes have been starved
	 */

	if (writes) {
dispatch_writes:
		BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));

		dd->starved = 0;

		data_dir = WRITE;

		goto dispatch_find_request;
	}

	return 0;

dispatch_find_request:
	/*
	 * we are not running a batch, find best request for selected data_dir
	 */
	if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) {
		/*
		 * A deadline has expired, the last request was in the other
		 * direction, or we have run out of higher-sectored requests.
		 * Start again from the request with the earliest expiry time.
		 */
		rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
	} else {
		/*
		 * The last req was the same dir and we have a next request in
		 * sort order. No expired requests so continue on from here.
		 */
		rq = dd->next_rq[data_dir];
	}

	dd->batching = 0;

dispatch_request:
	/*
	 * rq is the selected appropriate request.
	 */
	dd->batching++;
	deadline_move_request(dd, rq);

	return 1;
}
Ejemplo n.º 7
0
/*
 * deadline_dispatch_requests selects the best request according to
 * read/write expire, fifo_batch, etc
 */
static int deadline_dispatch_requests(request_queue_t *q, int force)
{
    struct deadline_data *dd = q->elevator->elevator_data;
    const int reads = !list_empty(&dd->fifo_list[READ]);
    const int writes = !list_empty(&dd->fifo_list[WRITE]);
    struct request *rq;
    int data_dir;

    /*
     * batches are currently reads XOR writes
     */
    if (dd->next_rq[WRITE])
        rq = dd->next_rq[WRITE];
    else
        rq = dd->next_rq[READ];

    if (rq) {
        /* we have a "next request" */

        if (dd->last_sector != rq->sector)
            /* end the batch on a non sequential request */
            dd->batching += dd->fifo_batch;

        if (dd->batching < dd->fifo_batch)
            /* we are still entitled to batch */
            goto dispatch_request;
    }

    /*
     * at this point we are not running a batch. select the appropriate
     * data direction (read / write)
     */

    if (reads) {
        BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));

        if (writes && (dd->starved++ >= dd->writes_starved))
            goto dispatch_writes;

        data_dir = READ;

        goto dispatch_find_request;
    }

    /*
     * there are either no reads or writes have been starved
     */

    if (writes) {
dispatch_writes:
        BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));

        dd->starved = 0;

        data_dir = WRITE;

        goto dispatch_find_request;
    }

    return 0;

dispatch_find_request:
    /*
     * we are not running a batch, find best request for selected data_dir
     */
    if (deadline_check_fifo(dd, data_dir)) {
        /* An expired request exists - satisfy it */
        dd->batching = 0;
        rq = rq_entry_fifo(dd->fifo_list[data_dir].next);

    } else if (dd->next_rq[data_dir]) {
        /*
         * The last req was the same dir and we have a next request in
         * sort order. No expired requests so continue on from here.
         */
        rq = dd->next_rq[data_dir];
    } else {
        struct rb_node *node;
        /*
         * The last req was the other direction or we have run out of
         * higher-sectored requests. Go back to the lowest sectored
         * request (1 way elevator) and start a new batch.
         */
        dd->batching = 0;
        node = rb_first(&dd->sort_list[data_dir]);
        if (node)
            rq = rb_entry_rq(node);
    }

dispatch_request:
    /*
     * rq is the selected appropriate request.
     */
    dd->batching++;
    deadline_move_request(dd, rq);

    return 1;
}
Ejemplo n.º 8
0
int ElvDeadline::Dispatch(int force)
{
	int reads = !fifo_list[READ].Empty();
	int writes = !fifo_list[WRITE].Empty();
	request *rq;
	int rw;

	/* Not consider the writes starved the reads.*/
	if (writes)
		rw = WRITE;
	else if (reads)
		rw = READ;
	else
		return 0;

	goto dispatch_simple;

	/*
	 * batches are currently reads XOR writes
	 */
	if (next_rq[WRITE])
		rq = next_rq[WRITE];
	else
		rq = next_rq[READ];
	
	if (rq) {
			/* we have a "next request" */
			if (headpos != rq->off)
				/* end the batch on a non sequential request */
				batching += fifo_batch;
			if (batching < fifo_batch)
				goto dispatch_request;
	}

	/*
	 * at this point we are not running a batch, select the appropriate
	 * data direction (read/write)
	 */
	if (reads) {
		if (writes && (starved++ >= writes_starved))
			goto dispatch_writes;

		rw = READ;
		goto dispatch_find_request;
	}
	
	/*
	 * there are either no reads or writes have been starved
	 */
	if (writes) {
dispatch_writes:
		starved = 0;
		rw = WRITE;
		goto dispatch_find_request;
	}

	return 0;

dispatch_find_request:
	/*
	 * we are not running a batch, find best request for selected rw
	 */
	if (deadline_check_fifo(rw)) {
		/* an expired request exists - satisfy it */
		batching = 0;
		rq = (request *) fifo_list[rw].suc;
	} else if (next_rq[rw]) {
		/*
		 * The last req was the same dir and we have a next request in
		 * sort order. No expired requests so continue on from here.
		 */
		rq = next_rq[rw];
	} else {
dispatch_simple:
		rb_node *node;
		/*
		 * The last req was the other direction or we have run out of
		 * higher-offed requests. Go back to the lowest off resquest 
		 * (1 way elevator) and start a new batch.
		 */
		batching = 0;
		node = sort_list[rw].rb_first();
		if (node)
			rq = rb_entry_rq(node);
	}

dispatch_request:
	/*
	 * rq is the selected appropriate requests.
	 */
	batching++;
	deadline_move_request(rq);
	
	return 1;
}