Esempio n. 1
0
static int deadline_dispatch_requests(struct request_queue *q, int force)
{
	struct deadline_data *dd = q->elevator->elevator_data;
	const int reads = !list_empty(&dd->fifo_list[READ]);
	const int writes = !list_empty(&dd->fifo_list[WRITE]);
	struct request *rq;
	int data_dir;

	if (dd->next_rq[WRITE])
		rq = dd->next_rq[WRITE];
	else
		rq = dd->next_rq[READ];

	if (rq && dd->batching < dd->fifo_batch)

		goto dispatch_request;


	if (reads) {
		BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));

		if (writes && (dd->starved++ >= dd->writes_starved))
			goto dispatch_writes;

		data_dir = READ;

		goto dispatch_find_request;
	}


	if (writes) {
dispatch_writes:
		BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));

		dd->starved = 0;

		data_dir = WRITE;

		goto dispatch_find_request;
	}

	return 0;

dispatch_find_request:
	if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) {
		rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
	} else {
		rq = dd->next_rq[data_dir];
	}

	dd->batching = 0;

dispatch_request:
	dd->batching++;
	deadline_move_request(dd, rq);

	return 1;
}
Esempio n. 2
0
/*
   Simple dispatching algorithm for now
   prioritize sync queue; 
   issue sync reqs for async_starved times;
   if no async write expires, issue bundled writes;
   if dispatched successfully, return 1; else return 0;
 */
static int flash_dispatch_requests(struct request_queue *q, int force)
{
	struct flash_data *fd = q->elevator->elevator_data;
	// syncs is 1 indicates sync queue is not empty;
	const int syncs = !list_empty(&fd->fifo_list[0]);
	const int asyncs = !list_empty(&fd->fifo_list[1]);
	const int bundles = !list_empty(&fd->bundle_list);
	struct request *rq;
//	int data_type;
	
	// sync queue not empty and sync req not starve async reqs
	if(syncs && (fd->sync_issued < fd->async_starved))
	{
		rq = rq_entry_fifo(fd->fifo_list[0].next);
		fd->sync_issued ++;
		goto dispatch_request;
	}

	// schedule bundle
	fd->sync_issued = 0;
	// if bundles not empty and no async writes expire
	if(bundles && !deadline_check_fifo(fd, 1))
	{
		rq = rq_entry_fifo(fd->bundle_list.next);
		goto dispatch_request;
	}

	// if async writes queue not empty and there is async writes expire
	// FIXME: This is like anticipatory algorithm, might cause system hang
	// if(asyncs && deadline_check_fifo(fd, 1))
	if(asyncs)
	{
		rq = rq_entry_fifo(fd->fifo_list[1].next);
		goto dispatch_request;
	}
	
	// dispatch not successful
	return 0;

dispatch_request:
	/*
	 * rq is the selected appropriate request. move rq to dispatch queue(request queue)
	 */
	flash_move_to_dispatch(fd, rq);

	return 1;
}
Esempio n. 3
0
/*
 * deadline_dispatch_requests selects the best request according to
 * read/write expire, fifo_batch, etc
 */
static int deadline_dispatch_requests(struct request_queue *q, int force)
{
	struct deadline_data *dd = q->elevator->elevator_data;
	const int reads = !list_empty(&dd->fifo_list[READ]);
	const int writes = !list_empty(&dd->fifo_list[WRITE]);
	struct request *rq;
	int data_dir;

	/*
	 * batches are currently reads XOR writes
	 */
	if (dd->next_rq[WRITE])
		rq = dd->next_rq[WRITE];
	else
		rq = dd->next_rq[READ];

	if (rq && dd->batching < dd->fifo_batch)
		/* we have a next request are still entitled to batch */
		goto dispatch_request;

	/*
	 * at this point we are not running a batch. select the appropriate
	 * data direction (read / write)
	 */

	if (reads) {
		BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));

		if (writes && (dd->starved++ >= dd->writes_starved))
			goto dispatch_writes;

		data_dir = READ;

		goto dispatch_find_request;
	}

	/*
	 * there are either no reads or writes have been starved
	 */

	if (writes) {
dispatch_writes:
		BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));

		dd->starved = 0;

		data_dir = WRITE;

		goto dispatch_find_request;
	}

	return 0;

dispatch_find_request:
	/*
	 * we are not running a batch, find best request for selected data_dir
	 */
	if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) {
		/*
		 * A deadline has expired, the last request was in the other
		 * direction, or we have run out of higher-sectored requests.
		 * Start again from the request with the earliest expiry time.
		 */
		rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
	} else {
		/*
		 * The last req was the same dir and we have a next request in
		 * sort order. No expired requests so continue on from here.
		 */
		rq = dd->next_rq[data_dir];
	}

	dd->batching = 0;

dispatch_request:
	/*
	 * rq is the selected appropriate request.
	 */
	dd->batching++;
	deadline_move_request(dd, rq);

	return 1;
}
Esempio n. 4
0
/*
 * deadline_dispatch_requests selects the best request according to
 * read/write expire, fifo_batch, etc
 */
static int deadline_dispatch_requests(request_queue_t *q, int force)
{
    struct deadline_data *dd = q->elevator->elevator_data;
    const int reads = !list_empty(&dd->fifo_list[READ]);
    const int writes = !list_empty(&dd->fifo_list[WRITE]);
    struct request *rq;
    int data_dir;

    /*
     * batches are currently reads XOR writes
     */
    if (dd->next_rq[WRITE])
        rq = dd->next_rq[WRITE];
    else
        rq = dd->next_rq[READ];

    if (rq) {
        /* we have a "next request" */

        if (dd->last_sector != rq->sector)
            /* end the batch on a non sequential request */
            dd->batching += dd->fifo_batch;

        if (dd->batching < dd->fifo_batch)
            /* we are still entitled to batch */
            goto dispatch_request;
    }

    /*
     * at this point we are not running a batch. select the appropriate
     * data direction (read / write)
     */

    if (reads) {
        BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));

        if (writes && (dd->starved++ >= dd->writes_starved))
            goto dispatch_writes;

        data_dir = READ;

        goto dispatch_find_request;
    }

    /*
     * there are either no reads or writes have been starved
     */

    if (writes) {
dispatch_writes:
        BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));

        dd->starved = 0;

        data_dir = WRITE;

        goto dispatch_find_request;
    }

    return 0;

dispatch_find_request:
    /*
     * we are not running a batch, find best request for selected data_dir
     */
    if (deadline_check_fifo(dd, data_dir)) {
        /* An expired request exists - satisfy it */
        dd->batching = 0;
        rq = rq_entry_fifo(dd->fifo_list[data_dir].next);

    } else if (dd->next_rq[data_dir]) {
        /*
         * The last req was the same dir and we have a next request in
         * sort order. No expired requests so continue on from here.
         */
        rq = dd->next_rq[data_dir];
    } else {
        struct rb_node *node;
        /*
         * The last req was the other direction or we have run out of
         * higher-sectored requests. Go back to the lowest sectored
         * request (1 way elevator) and start a new batch.
         */
        dd->batching = 0;
        node = rb_first(&dd->sort_list[data_dir]);
        if (node)
            rq = rb_entry_rq(node);
    }

dispatch_request:
    /*
     * rq is the selected appropriate request.
     */
    dd->batching++;
    deadline_move_request(dd, rq);

    return 1;
}
int ElvDeadline::Dispatch(int force)
{
	int reads = !fifo_list[READ].Empty();
	int writes = !fifo_list[WRITE].Empty();
	request *rq;
	int rw;

	/* Not consider the writes starved the reads.*/
	if (writes)
		rw = WRITE;
	else if (reads)
		rw = READ;
	else
		return 0;

	goto dispatch_simple;

	/*
	 * batches are currently reads XOR writes
	 */
	if (next_rq[WRITE])
		rq = next_rq[WRITE];
	else
		rq = next_rq[READ];
	
	if (rq) {
			/* we have a "next request" */
			if (headpos != rq->off)
				/* end the batch on a non sequential request */
				batching += fifo_batch;
			if (batching < fifo_batch)
				goto dispatch_request;
	}

	/*
	 * at this point we are not running a batch, select the appropriate
	 * data direction (read/write)
	 */
	if (reads) {
		if (writes && (starved++ >= writes_starved))
			goto dispatch_writes;

		rw = READ;
		goto dispatch_find_request;
	}
	
	/*
	 * there are either no reads or writes have been starved
	 */
	if (writes) {
dispatch_writes:
		starved = 0;
		rw = WRITE;
		goto dispatch_find_request;
	}

	return 0;

dispatch_find_request:
	/*
	 * we are not running a batch, find best request for selected rw
	 */
	if (deadline_check_fifo(rw)) {
		/* an expired request exists - satisfy it */
		batching = 0;
		rq = (request *) fifo_list[rw].suc;
	} else if (next_rq[rw]) {
		/*
		 * The last req was the same dir and we have a next request in
		 * sort order. No expired requests so continue on from here.
		 */
		rq = next_rq[rw];
	} else {
dispatch_simple:
		rb_node *node;
		/*
		 * The last req was the other direction or we have run out of
		 * higher-offed requests. Go back to the lowest off resquest 
		 * (1 way elevator) and start a new batch.
		 */
		batching = 0;
		node = sort_list[rw].rb_first();
		if (node)
			rq = rb_entry_rq(node);
	}

dispatch_request:
	/*
	 * rq is the selected appropriate requests.
	 */
	batching++;
	deadline_move_request(rq);
	
	return 1;
}