Пример #1
0
request *ElvDeadline::ElvLatterRequest(request *rq)
{
	rb_node *next = Rbtree::rb_next(&rq->rbnode);
	
	if (next)
		return rb_entry_rq(next);

	return NULL;
}
Пример #2
0
request *ElvDeadline::ElvFormerRequest(request *rq)
{
	rb_node *prev = Rbtree::rb_prev(&rq->rbnode);

	if (prev)
		return rb_entry_rq(prev);

	return NULL;
}
Пример #3
0
/*
 * get the request after `rq' in sector-sorted order
 */
static inline struct request *
deadline_latter_request(struct request *rq)
{
	struct rb_node *node = rb_next(&rq->rb_node);

	if (node)
		return rb_entry_rq(node);

	return NULL;
}
Пример #4
0
void ElvDeadline::deadline_del_rq_rb(request *rq)
{
	if (next_rq[rq->cmd] == rq) {
		rb_node *next = Rbtree::rb_next(&rq->rbnode);

		next_rq[rq->cmd] = NULL;
		if (next)
			next_rq[rq->cmd] = rb_entry_rq(next);
	}

	elv_rb_del(RQ_RB_ROOT(rq), rq);
}
Пример #5
0
static inline void
deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
{
    const int data_dir = rq_data_dir(rq);

    if (dd->next_rq[data_dir] == rq) {
        struct rb_node *rbnext = rb_next(&rq->rb_node);

        dd->next_rq[data_dir] = NULL;
        if (rbnext)
            dd->next_rq[data_dir] = rb_entry_rq(rbnext);
    }

    elv_rb_del(RQ_RB_ROOT(dd, rq), rq);
}
Пример #6
0
void ElvDeadline::deadline_move_request(request *rq)
{
	int rw = rq->cmd;
	rb_node *next = Rbtree::rb_next(&rq->rbnode);

	next_rq[READ] = NULL;
	next_rq[WRITE] = NULL;
	
	if (next)
		next_rq[rw] = rb_entry_rq(next);

	headpos = rq->off + rq->count;

	/*
	 * take it off the sort and fifo list, 
	 * move to dispatch queue.
	 */
	deadline_move_to_dispatch(rq);
}
Пример #7
0
/*
 * move an entry to dispatch queue
 */
static void
deadline_move_request(struct deadline_data *dd, struct request *rq)
{
    const int data_dir = rq_data_dir(rq);
    struct rb_node *rbnext = rb_next(&rq->rb_node);

    dd->next_rq[READ] = NULL;
    dd->next_rq[WRITE] = NULL;

    if (rbnext)
        dd->next_rq[data_dir] = rb_entry_rq(rbnext);

    dd->last_sector = rq->sector + rq->nr_sectors;

    /*
     * take it off the sort and fifo list, move
     * to dispatch queue
     */
    deadline_move_to_dispatch(dd, rq);
}
Пример #8
0
static int cscan_dispatch(struct request_queue *q, int force)
{
	struct request *rq;
	struct cscan_data *cd = q->elevator->elevator_data;
	
	struct rb_node *node = rb_first(&(cd->sort_list[cd->curr]));
	if(!node) {
		cd->curr = 1-cd->curr;
		node = rb_first(&(cd->sort_list[cd->curr]));
	}

	if(node) {
		rq = rb_entry_rq(node);
		cd->last_sector = rq_end_sector(rq);
		elv_rb_del(&(cd->sort_list[cd->curr]), rq);
		elv_dispatch_add_tail(q, rq);
		return 1;
	}	
	return 0;
}
Пример #9
0
/*
 * deadline_dispatch_requests selects the best request according to
 * read/write expire, fifo_batch, etc
 */
static int deadline_dispatch_requests(request_queue_t *q, int force)
{
    struct deadline_data *dd = q->elevator->elevator_data;
    const int reads = !list_empty(&dd->fifo_list[READ]);
    const int writes = !list_empty(&dd->fifo_list[WRITE]);
    struct request *rq;
    int data_dir;

    /*
     * batches are currently reads XOR writes
     */
    if (dd->next_rq[WRITE])
        rq = dd->next_rq[WRITE];
    else
        rq = dd->next_rq[READ];

    if (rq) {
        /* we have a "next request" */

        if (dd->last_sector != rq->sector)
            /* end the batch on a non sequential request */
            dd->batching += dd->fifo_batch;

        if (dd->batching < dd->fifo_batch)
            /* we are still entitled to batch */
            goto dispatch_request;
    }

    /*
     * at this point we are not running a batch. select the appropriate
     * data direction (read / write)
     */

    if (reads) {
        BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));

        if (writes && (dd->starved++ >= dd->writes_starved))
            goto dispatch_writes;

        data_dir = READ;

        goto dispatch_find_request;
    }

    /*
     * there are either no reads or writes have been starved
     */

    if (writes) {
dispatch_writes:
        BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));

        dd->starved = 0;

        data_dir = WRITE;

        goto dispatch_find_request;
    }

    return 0;

dispatch_find_request:
    /*
     * we are not running a batch, find best request for selected data_dir
     */
    if (deadline_check_fifo(dd, data_dir)) {
        /* An expired request exists - satisfy it */
        dd->batching = 0;
        rq = rq_entry_fifo(dd->fifo_list[data_dir].next);

    } else if (dd->next_rq[data_dir]) {
        /*
         * The last req was the same dir and we have a next request in
         * sort order. No expired requests so continue on from here.
         */
        rq = dd->next_rq[data_dir];
    } else {
        struct rb_node *node;
        /*
         * The last req was the other direction or we have run out of
         * higher-sectored requests. Go back to the lowest sectored
         * request (1 way elevator) and start a new batch.
         */
        dd->batching = 0;
        node = rb_first(&dd->sort_list[data_dir]);
        if (node)
            rq = rb_entry_rq(node);
    }

dispatch_request:
    /*
     * rq is the selected appropriate request.
     */
    dd->batching++;
    deadline_move_request(dd, rq);

    return 1;
}
Пример #10
0
int ElvDeadline::Dispatch(int force)
{
	int reads = !fifo_list[READ].Empty();
	int writes = !fifo_list[WRITE].Empty();
	request *rq;
	int rw;

	/* Not consider the writes starved the reads.*/
	if (writes)
		rw = WRITE;
	else if (reads)
		rw = READ;
	else
		return 0;

	goto dispatch_simple;

	/*
	 * batches are currently reads XOR writes
	 */
	if (next_rq[WRITE])
		rq = next_rq[WRITE];
	else
		rq = next_rq[READ];
	
	if (rq) {
			/* we have a "next request" */
			if (headpos != rq->off)
				/* end the batch on a non sequential request */
				batching += fifo_batch;
			if (batching < fifo_batch)
				goto dispatch_request;
	}

	/*
	 * at this point we are not running a batch, select the appropriate
	 * data direction (read/write)
	 */
	if (reads) {
		if (writes && (starved++ >= writes_starved))
			goto dispatch_writes;

		rw = READ;
		goto dispatch_find_request;
	}
	
	/*
	 * there are either no reads or writes have been starved
	 */
	if (writes) {
dispatch_writes:
		starved = 0;
		rw = WRITE;
		goto dispatch_find_request;
	}

	return 0;

dispatch_find_request:
	/*
	 * we are not running a batch, find best request for selected rw
	 */
	if (deadline_check_fifo(rw)) {
		/* an expired request exists - satisfy it */
		batching = 0;
		rq = (request *) fifo_list[rw].suc;
	} else if (next_rq[rw]) {
		/*
		 * The last req was the same dir and we have a next request in
		 * sort order. No expired requests so continue on from here.
		 */
		rq = next_rq[rw];
	} else {
dispatch_simple:
		rb_node *node;
		/*
		 * The last req was the other direction or we have run out of
		 * higher-offed requests. Go back to the lowest off resquest 
		 * (1 way elevator) and start a new batch.
		 */
		batching = 0;
		node = sort_list[rw].rb_first();
		if (node)
			rq = rb_entry_rq(node);
	}

dispatch_request:
	/*
	 * rq is the selected appropriate requests.
	 */
	batching++;
	deadline_move_request(rq);
	
	return 1;
}