/*
 * get the first expired request in direction ddir
 */
static struct request *
zen_expired_request(struct zen_data *zdata, int ddir)
{
        struct request *rq;

        if (list_empty(&zdata->fifo_list[ddir]))
                return NULL;

        rq = rq_entry_fifo(zdata->fifo_list[ddir].next);
        if (time_after_eq(jiffies, rq_fifo_time(rq)))
                return rq;

        return NULL;
}
Exemple #2
0
/*
* get the first expired request in direction ddir
*/
static struct request *
vr_expired_request(struct vr_data *vd, int ddir)
{
struct request *rq;

if (list_empty(&vd->fifo_list[ddir]))
return NULL;

rq = rq_entry_fifo(vd->fifo_list[ddir].next);
if (time_after(jiffies, rq_fifo_time(rq)))
return rq;

return NULL;
}
/*
 * row_dispatch_insert() - move request to dispatch queue
 * @rd:		pointer to struct row_data
 * @queue_idx:	index of the row_queue to dispatch from
 *
 * This function moves the next request to dispatch from
 * the given queue (row_queues[queue_idx]) to the dispatch queue
 *
 */
static void row_dispatch_insert(struct row_data *rd, int queue_idx)
{
	struct request *rq;

	rq = rq_entry_fifo(rd->row_queues[queue_idx].fifo.next);
	row_remove_request(rd->dispatch_queue, rq);
	elv_dispatch_add_tail(rd->dispatch_queue, rq);
	rd->row_queues[queue_idx].nr_dispatched++;
	row_clear_rowq_unserved(rd, queue_idx);
	row_log_rowq(rd, queue_idx, " Dispatched request nr_disp = %d",
		     rd->row_queues[queue_idx].nr_dispatched);
	if (rq->cmd_flags & REQ_URGENT)
		rd->nr_urgent_in_flight++;
}
Exemple #4
0
static struct request *
sio_choose_request(struct sio_data *sd, int data_dir)
{
	struct list_head *sync = sd->fifo_list[SYNC];
	struct list_head *async = sd->fifo_list[ASYNC];

	/*
	 * Retrieve request from available fifo list.
	 * Synchronous requests have priority over asynchronous.
	 * Read requests have priority over write.
	 */
	if (!list_empty(&sync[data_dir]))
		return rq_entry_fifo(sync[data_dir].next);
	if (!list_empty(&async[data_dir]))
		return rq_entry_fifo(async[data_dir].next);

	if (!list_empty(&sync[!data_dir]))
		return rq_entry_fifo(sync[!data_dir].next);
	if (!list_empty(&async[!data_dir]))
		return rq_entry_fifo(async[!data_dir].next);

	return NULL;
}
Exemple #5
0
static struct request *
sio_expired_request(struct sio_data *sd, int sync, int data_dir)
{
struct list_head *list = &sd->fifo_list[sync][data_dir];
struct request *rq;
if (list_empty(list))
return NULL;
/* Retrieve request */
rq = rq_entry_fifo(list->next);
/* Request has expired */
if (time_after_eq(jiffies, rq_fifo_time(rq)))
return rq;
return NULL;
}
/* return vios dispatched */
static u64 fiops_dispatch_request(struct fiops_data *fiopsd,
	struct fiops_ioc *ioc)
{
	struct request *rq;
	struct request_queue *q = fiopsd->queue;

	rq = rq_entry_fifo(ioc->fifo.next);

	fiops_remove_request(rq);
	elv_dispatch_add_tail(q, rq);

	fiopsd->in_flight[rq_is_sync(rq)]++;
	ioc->in_flight++;

	return fiops_scaled_vios(fiopsd, ioc, rq);
}
static struct request *
sio_expired_request(struct sio_data *sd, int sync)
{
struct request *rq;

if (list_empty(&sd->fifo_list[sync]))
return NULL;

/* Retrieve request */
rq = rq_entry_fifo(sd->fifo_list[sync].next);

/* Request has expired */
if (time_after(jiffies, rq_fifo_time(rq)))
return rq;

return NULL;
}
/*
 * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
 * 1 otherwise. Requires !list_empty(&fd->fifo_list[data_type])
 */
static inline int deadline_check_fifo(struct flash_data *fd, int ddir)
{
	struct request *rq;
	
	// if no req on given list, return 0: not expire;
	if(list_empty(&fd->fifo_list[ddir]))
		return 0;

	rq = rq_entry_fifo(fd->fifo_list[ddir].next);

	/*
	 * rq is expired!
	 */
	if (time_after(jiffies, rq_fifo_time(rq)))
		return 1;

	return 0;
}
Exemple #9
0
/*
 * deadline_dispatch_requests selects the best request according to
 * read/write expire, fifo_batch, etc
 */
static int deadline_dispatch_requests(struct request_queue *q, int force)
{
	struct deadline_data *dd = q->elevator->elevator_data;
	const int reads = !list_empty(&dd->fifo_list[READ]);
	const int writes = !list_empty(&dd->fifo_list[WRITE]);
	struct request *rq;
	int data_dir;

	/*
	 * batches are currently reads XOR writes
	 */
	if (dd->next_rq[WRITE])
		rq = dd->next_rq[WRITE];
	else
		rq = dd->next_rq[READ];

	if (rq && dd->batching < dd->fifo_batch)
		/* we have a next request are still entitled to batch */
		goto dispatch_request;

	/*
	 * at this point we are not running a batch. select the appropriate
	 * data direction (read / write)
	 */

	if (reads) {
		BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));

		if (writes && (dd->starved++ >= dd->writes_starved))
			goto dispatch_writes;

		data_dir = READ;

		goto dispatch_find_request;
	}

	/*
	 * there are either no reads or writes have been starved
	 */

	if (writes) {
dispatch_writes:
		BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));

		dd->starved = 0;

		data_dir = WRITE;

		goto dispatch_find_request;
	}

	return 0;

dispatch_find_request:
	/*
	 * we are not running a batch, find best request for selected data_dir
	 */
	if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) {
		/*
		 * A deadline has expired, the last request was in the other
		 * direction, or we have run out of higher-sectored requests.
		 * Start again from the request with the earliest expiry time.
		 */
		rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
	} else {
		/*
		 * The last req was the same dir and we have a next request in
		 * sort order. No expired requests so continue on from here.
		 */
		rq = dd->next_rq[data_dir];
	}

	dd->batching = 0;

dispatch_request:
	/*
	 * rq is the selected appropriate request.
	 */
	dd->batching++;
	deadline_move_request(dd, rq);

	return 1;
}
Exemple #10
0
/*
 * row_dispatch_requests() - selects the next request to dispatch
 * @q:		requests queue
 * @force:		flag indicating if forced dispatch
 *
 * Return 0 if no requests were moved to the dispatch queue.
 *	  1 otherwise
 *
 */
static int row_dispatch_requests(struct request_queue *q, int force)
{
	struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
	int ret = 0, currq, ioprio_class_to_serve, start_idx, end_idx;
	int expire_index = -1;

	if (force && hrtimer_active(&rd->rd_idle_data.hr_timer)) {
		if (hrtimer_try_to_cancel(&rd->rd_idle_data.hr_timer) >= 0) {
			row_log(rd->dispatch_queue,
				"Canceled delayed work on %d - forced dispatch",
				rd->rd_idle_data.idling_queue_idx);
			rd->rd_idle_data.idling_queue_idx = ROWQ_MAX_PRIO;
		}
	}

	if (rd->pending_urgent_rq) {
		row_log(rd->dispatch_queue, "dispatching urgent request");
		row_dispatch_insert(rd, rd->pending_urgent_rq);
		ret = 1;
		goto done;
	}

	ioprio_class_to_serve = row_get_ioprio_class_to_serve(rd, force);
	row_log(rd->dispatch_queue, "Dispatching from %d priority class",
		ioprio_class_to_serve);

	if (ioprio_class_to_serve == IOPRIO_CLASS_RT) {
		expire_index = row_be_expire_adjust(rd);
		if (expire_index >= ROWQ_REG_PRIO_IDX)
			ioprio_class_to_serve = IOPRIO_CLASS_BE;
	}

	switch (ioprio_class_to_serve) {
	case IOPRIO_CLASS_NONE:
		rd->last_served_ioprio_class = IOPRIO_CLASS_NONE;
		goto done;
	case IOPRIO_CLASS_RT:
		if (expire_index >= 0) {
			start_idx = expire_index;
			end_idx = expire_index + 1;
			expire_index = -1;
		} else {
			start_idx = ROWQ_HIGH_PRIO_IDX;
			end_idx = ROWQ_REG_PRIO_IDX;
		}
		break;
	case IOPRIO_CLASS_BE:
		if (expire_index > 0) {
			start_idx = expire_index;
			end_idx = expire_index + 1;
			expire_index = -1;
		} else {
			start_idx = ROWQ_REG_PRIO_IDX;
			end_idx = ROWQ_LOW_PRIO_IDX;
		}
		break;
	case IOPRIO_CLASS_IDLE:
		start_idx = ROWQ_LOW_PRIO_IDX;
		end_idx = ROWQ_MAX_PRIO;
		break;
	default:
		pr_err("%s(): Invalid I/O priority class", __func__);
		goto done;
	}

	currq = row_get_next_queue(q, rd, start_idx, end_idx);

	/* Dispatch */
	if (currq >= 0) {
		row_dispatch_insert(rd,
			rq_entry_fifo(rd->row_queues[currq].fifo.next));
		ret = 1;
	}
done:
	return ret;
}
/*
 * deadline_dispatch_requests selects the best request according to
 * read/write expire, fifo_batch, etc
 */
static int deadline_dispatch_requests(request_queue_t *q, int force)
{
    struct deadline_data *dd = q->elevator->elevator_data;
    const int reads = !list_empty(&dd->fifo_list[READ]);
    const int writes = !list_empty(&dd->fifo_list[WRITE]);
    struct request *rq;
    int data_dir;

    /*
     * batches are currently reads XOR writes
     */
    if (dd->next_rq[WRITE])
        rq = dd->next_rq[WRITE];
    else
        rq = dd->next_rq[READ];

    if (rq) {
        /* we have a "next request" */

        if (dd->last_sector != rq->sector)
            /* end the batch on a non sequential request */
            dd->batching += dd->fifo_batch;

        if (dd->batching < dd->fifo_batch)
            /* we are still entitled to batch */
            goto dispatch_request;
    }

    /*
     * at this point we are not running a batch. select the appropriate
     * data direction (read / write)
     */

    if (reads) {
        BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));

        if (writes && (dd->starved++ >= dd->writes_starved))
            goto dispatch_writes;

        data_dir = READ;

        goto dispatch_find_request;
    }

    /*
     * there are either no reads or writes have been starved
     */

    if (writes) {
dispatch_writes:
        BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));

        dd->starved = 0;

        data_dir = WRITE;

        goto dispatch_find_request;
    }

    return 0;

dispatch_find_request:
    /*
     * we are not running a batch, find best request for selected data_dir
     */
    if (deadline_check_fifo(dd, data_dir)) {
        /* An expired request exists - satisfy it */
        dd->batching = 0;
        rq = rq_entry_fifo(dd->fifo_list[data_dir].next);

    } else if (dd->next_rq[data_dir]) {
        /*
         * The last req was the same dir and we have a next request in
         * sort order. No expired requests so continue on from here.
         */
        rq = dd->next_rq[data_dir];
    } else {
        struct rb_node *node;
        /*
         * The last req was the other direction or we have run out of
         * higher-sectored requests. Go back to the lowest sectored
         * request (1 way elevator) and start a new batch.
         */
        dd->batching = 0;
        node = rb_first(&dd->sort_list[data_dir]);
        if (node)
            rq = rb_entry_rq(node);
    }

dispatch_request:
    /*
     * rq is the selected appropriate request.
     */
    dd->batching++;
    deadline_move_request(dd, rq);

    return 1;
}
Exemple #12
0
static int osio_dispatch(struct request_queue *q, int force)
{
	struct osio_data *od = q->elevator->elevator_data;
	const unsigned int non_empty[3] = {!list_empty(&od->fifo_head[OSIO_DIR_READ]),
					   !list_empty(&od->fifo_head[OSIO_DIR_SYNC_WRITE]),
					   !list_empty(&od->fifo_head[OSIO_DIR_ASYNC_WRITE]),};
	struct request *rq = NULL;

	osio_dbg("1, od->fifo_dir = %d\n", od->fifo_dir);
	osio_dbg("1, non_empty[0] = %d\n", non_empty[0]);
	osio_dbg("1, non_empty[1] = %d\n", non_empty[1]);
	osio_dbg("1, non_empty[2] = %d\n", non_empty[2]);

	/* dispatch a batch of rq */
	if (od->fifo_dir != OSIO_DIR_UNDEF) {
		if ((od->batching >= od->fifo_batch[od->fifo_dir]) || (!non_empty[od->fifo_dir])) {
			od->fifo_dir = OSIO_DIR_UNDEF;
		} else {
			goto dispatch_request;
		}
	}

	/* redecide the direction */
	if (non_empty[OSIO_DIR_READ]) {
		goto dir_read;
	}

	if (non_empty[OSIO_DIR_SYNC_WRITE]) {
		goto dir_sync_write;
	}

	if (non_empty[OSIO_DIR_ASYNC_WRITE]) {
		goto dir_async_write;
	}

	return 0;

dir_read:
	/* find a starved write rq */
	if ((od->write_starved[OSIO_SYNC] > od->write_starved_line[OSIO_SYNC]) && non_empty[OSIO_DIR_SYNC_WRITE]) {
		goto dir_sync_write;
	} else if ((od->write_starved[OSIO_ASYNC] > od->write_starved_line[OSIO_ASYNC]) && non_empty[OSIO_DIR_ASYNC_WRITE]) {
		goto dir_async_write;
	}

	od->fifo_dir = OSIO_DIR_READ;
	od->batching = 0;
	od->write_starved[OSIO_SYNC] += non_empty[OSIO_DIR_SYNC_WRITE];
	od->write_starved[OSIO_ASYNC] += non_empty[OSIO_DIR_ASYNC_WRITE];
	goto dispatch_request;

dir_sync_write:
	if ((od->write_starved[OSIO_ASYNC] > od->write_starved_line[OSIO_ASYNC]) && non_empty[OSIO_DIR_ASYNC_WRITE]) {
		goto dir_async_write;
	}

	od->fifo_dir = OSIO_DIR_SYNC_WRITE;
	od->batching = 0;
	od->write_starved[OSIO_SYNC] = 0;
	od->write_starved[OSIO_ASYNC] += non_empty[OSIO_DIR_ASYNC_WRITE];
	goto dispatch_request;

dir_async_write:
	od->fifo_dir = OSIO_DIR_ASYNC_WRITE;
	od->batching = 0;
	od->write_starved[OSIO_ASYNC] = 0;
	od->write_starved[OSIO_SYNC] += non_empty[OSIO_DIR_SYNC_WRITE];
	goto dispatch_request;

dispatch_request:
	/* dispatch req */
	osio_dbg("2, od->fifo_dir = %d\n", od->fifo_dir);
	osio_dbg("2, od->batching = %d\n", od->batching);
	rq = rq_entry_fifo(od->fifo_head[od->fifo_dir].next);
	list_del_init(&rq->queuelist);
	elv_dispatch_add_tail(q, rq);
	od->batching ++;
	return 1;
}