static int greedy_dispatch(struct request_queue *q, int force) {
   struct request* rq, next_upper, next_lower;
	struct greedy_data *nd = q->elevator->elevator_data;
   struct sector lower_sector, upper_sector;

   if(list_empty(nd->lower_queue) && list_empty(nd->upper_queue)) return NULL;
   if(list_empty(nd->lower_queue)) { 
      rq = greedy_next_upper(q, nd);
      goto end;
   }
   if(list_empty(nd->upper_queue)) {
      rq = greedy_next_lower(q, nd);
      goto end;
   }
   
   next_lower = greedy_next_lower(nd);
   next_upper = greedy_next_upper(nd);

   lower_sector = blk_rq_pos(next_lower);
   upper_sector = blk_rq_pos(next_upper);

   if((upper_sector - nd->head_sector) < (nd->head_sector - lower_sector)) {
      rq = next_upper;
      goto end;
   }

   rq = next_lower;

   end:
   nd->head_sector = rq_end_sector(rq);
   list_del_init(&rq->queuelist);
   elv_dispatch_sort(q, rq);
   return 1;
}
示例#2
0
static void dm_start_request(struct mapped_device *md, struct request *orig)
{
	if (!orig->q->mq_ops)
		blk_start_request(orig);
	else
		blk_mq_start_request(orig);
	atomic_inc(&md->pending[rq_data_dir(orig)]);

	if (md->seq_rq_merge_deadline_usecs) {
		md->last_rq_pos = rq_end_sector(orig);
		md->last_rq_rw = rq_data_dir(orig);
		md->last_rq_start_time = ktime_get();
	}

	if (unlikely(dm_stats_used(&md->stats))) {
		struct dm_rq_target_io *tio = tio_from_request(orig);
		tio->duration_jiffies = jiffies;
		tio->n_sectors = blk_rq_sectors(orig);
		dm_stats_account_io(&md->stats, rq_data_dir(orig),
				    blk_rq_pos(orig), tio->n_sectors, false, 0,
				    &tio->stats_aux);
	}

	/*
	 * Hold the md reference here for the in-flight I/O.
	 * We can't rely on the reference count by device opener,
	 * because the device may be closed during the request completion
	 * when all bios are completed.
	 * See the comment in rq_completed() too.
	 */
	dm_get(md);
}
示例#3
0
static void
deadline_move_request(struct deadline_data *dd, struct request *rq)
{
	const int data_dir = rq_data_dir(rq);

	dd->next_rq[READ] = NULL;
	dd->next_rq[WRITE] = NULL;
	dd->next_rq[data_dir] = deadline_latter_request(rq);

	dd->last_sector = rq_end_sector(rq);

	deadline_move_to_dispatch(dd, rq);
}
示例#4
0
/*
 * move an entry to dispatch queue
 */
static void
deadline_move_request(struct deadline_data *dd, struct request *rq)
{
	const int data_dir = rq_data_dir(rq);

	dd->next_rq[READ] = NULL;
	dd->next_rq[WRITE] = NULL;
	dd->next_rq[data_dir] = deadline_latter_request(rq);

	dd->last_sector = rq_end_sector(rq);

	/*
	 * take it off the sort and fifo list, move
	 * to dispatch queue
	 */
	deadline_move_to_dispatch(dd, rq);
}
static int cscan_dispatch(struct request_queue *q, int force)
{
	struct request *rq;
	struct cscan_data *cd = q->elevator->elevator_data;
	
	struct rb_node *node = rb_first(&(cd->sort_list[cd->curr]));
	if(!node) {
		cd->curr = 1-cd->curr;
		node = rb_first(&(cd->sort_list[cd->curr]));
	}

	if(node) {
		rq = rb_entry_rq(node);
		cd->last_sector = rq_end_sector(rq);
		elv_rb_del(&(cd->sort_list[cd->curr]), rq);
		elv_dispatch_add_tail(q, rq);
		return 1;
	}	
	return 0;
}
//if shouldBuild dirty bit is set, combine c-scan lists into one list and buildTable
//use headposition and table build to make best path
//turn dirty shouldBuild dirty bit off
//dispatch first guy in path
//set new head position
static int optimal_dispatch(struct request_queue *q, int force)
{
	int size = 0, ndx = 1;
	long cost1, cost2;
	struct list_head *entry;
	struct requestList *scan;
	struct optimal_data *nd = q->elevator->elevator_data;
	if(nd->shouldBuild){
		//put everything in my cscan list into an array of requests
		//in cscan order
		entry = &nd->arrival_queue;
		while(((entry = entry->next)!=&nd->arrival_queue)&&size<nd->max_requests){
			nd->mylist[size] = list_entry_rq(entry);
			size++;
		}
		if(size == 0){
			nd->dispatchSize = 0;
			nd->currentNdx = 0;
			return 0;
		}
		//might be redundant
		if(size > nd->max_requests)
			size = nd->max_requests;

		buildTable(nd->mylist, size, nd->C);

		
		cost1 = ((size-1) + 1) * distance(blk_rq_pos(&nd->headpos), blk_rq_pos(nd->mylist[0])) + nd->C[0][size-1][0].cost;
		cost2 = ((size-1) + 1) * distance(blk_rq_pos(&nd->headpos), blk_rq_pos(nd->mylist[size-1])) + nd->C[0][size-1][1].cost;
		if(cost1 < cost2){
			nd->dispatch_head[0] = nd->mylist[0];
			//for each item in C[0][size-1][0]'s path, add to dispatch_head
			scan = nd->C[0][size-1][0].head;
			while(scan != NULL && scan->data != NULL){
				nd->dispatch_head[ndx] = scan->data;
				ndx++;
				scan = scan->next;
			}
		}
		else{
			nd->dispatch_head[0] = nd->mylist[size-1];
			scan = nd->C[0][size-1][1].head;
			while(scan != NULL && scan->data != NULL){
				nd->dispatch_head[ndx] = scan->data;
				ndx++;	
				scan = scan->next;
			}
		}
		nd->dispatchSize = size;
		nd->currentNdx = 0;
		nd->shouldBuild = 0;
	}

	/*
	if (!list_empty(&nd->arrival_queue)) {
		struct request *rq;
		rq = list_entry(nd->arrival_queue.next, struct request, queuelist);
		nd->headpos.__sector =rq_end_sector(rq);
		list_del_init(&rq->queuelist);
		elv_dispatch_add_tail(q, rq);
		nd->currentNdx++;
		return 1;
	}
	*/
	if(nd->currentNdx < nd->dispatchSize){
		struct request *rq;
		rq = nd->dispatch_head[nd->currentNdx];
		nd->headpos.__sector = rq_end_sector(rq);
		list_del_init(&rq->queuelist);
		elv_dispatch_add_tail(q, rq);
		nd->currentNdx++;
		return 1;
	}
	return 0;
}