Esempio n. 1
0
static struct request *blk_flush_complete_seq(struct request_queue *q,
					      unsigned seq, int error)
{
	struct request *next_rq = NULL;

	if (error && !q->flush_err)
		q->flush_err = error;

	BUG_ON(q->flush_seq & seq);
	q->flush_seq |= seq;
	q->ordseq = q->flush_seq;

	if (blk_flush_cur_seq(q) != QUEUE_FSEQ_DONE) {
		/* not complete yet, queue the next flush sequence */
		next_rq = queue_next_fseq(q);
	} else {
		/* complete this flush request */
		__blk_end_request_all(q->orig_flush_rq, q->flush_err);
		q->orig_flush_rq = NULL;
		q->flush_seq = 0;
		q->ordseq = q->flush_seq;

		/* dispatch the next flush if there's one */
		if (!list_empty(&q->pending_flushes)) {
			next_rq = list_entry_rq(q->pending_flushes.next);
			list_move(&next_rq->queuelist, &q->queue_head);
		}
	}
	return next_rq;
}
Esempio n. 2
0
/**
 * blk_queue_invalidate_tags - invalidate all pending tags
 * @q:  the request queue for the device
 *
 *  Description:
 *   Hardware conditions may dictate a need to stop all pending requests.
 *   In this case, we will safely clear the block side of the tag queue and
 *   readd all requests to the request queue in the right order.
 *
 *  Notes:
 *   queue lock must be held.
 **/
void blk_queue_invalidate_tags(struct request_queue *q)
{
	struct list_head *tmp, *n;

	list_for_each_safe(tmp, n, &q->tag_busy_list)
		blk_requeue_request(q, list_entry_rq(tmp));
}
Esempio n. 3
0
static int row_queue_is_expired(struct row_data *rd,int start_idx,int end_idx)
{
	int i;
	for(i=start_idx;i<end_idx;i++){
		if (!list_empty(&rd->row_queues[i].fifo)){
			struct request *check_req = list_entry_rq(rd->row_queues[i].fifo.next);/*lint !e826 */
			if (check_req && time_after(jiffies,  ((unsigned long) (check_req)->fifo_time) + msecs_to_jiffies(ROW_IO_MAX_LATENCY_MS)) )/*lint !e666 !e550 !e774 */
				return 1;
		}
	}
	return 0;
}
static void optimal_add_request(struct request_queue *q, struct request *rq)
{
	struct optimal_data *nd = q->elevator->elevator_data;
	
	int head_before_req, req_before_entry, entry_before_head;
	struct list_head *entry;
	nd->shouldBuild = 1;
	
	entry = &nd->arrival_queue;
	
	head_before_req = INORDER(&nd->headpos, rq);
	while((entry=entry->next) != &nd->arrival_queue){//&nd->headpos is the problem here
		req_before_entry = INORDER(rq, list_entry_rq(entry));
		entry_before_head = INORDER(list_entry_rq(entry),&nd->headpos);
		if(head_before_req && (req_before_entry || entry_before_head)) break;
		if(!head_before_req && req_before_entry && entry_before_head) break;
	}
	
	list_add_tail(&rq->queuelist, entry);
	return;

}
static void greedy_sorted_add(struct list_head* entry, struct list_head* head) {
   struct request* entry_request = list_entry_rq(entry);
   struct sector   entry_sector  = blk_rq_pos(entry_request); 
   struct request* comparison;

   // to-do
   list_for_each_entry(comparison, head, queuelist) {
      if(entry_sector > blk_rq_pos(comparison)) {
          list_add_tail(entry, comparision->queuelist);
          return;
      }
   }
   list_add_tail(entry, head);
}
Esempio n. 6
0
/*
 * row_be_expire_adjust() - find the queue has max expire time
 * @rd:                pointer to struct row_data
 *
 * Return index of the queues has max expire time, return -1
 * if no queues expire.
 *
 */
static int row_be_expire_adjust(struct row_data *rd)
{
	int start_idx = ROWQ_HIGH_PRIO_IDX;
	int end_idx = ROWQ_LOW_PRIO_IDX;
	int i = 0;
	unsigned int max_expire_time = 0;
	int expire_number = 0;
	unsigned int timeout = 0;
	int expire_index = -1;
	unsigned int expire_time = 0;
	unsigned long temp_jiffies = jiffies;
	struct request *check_req = NULL;

	for (i = start_idx; i < end_idx; i++) {
		if (list_empty(&rd->row_queues[i].fifo))
			continue;
		/*lint -save -e115*/
		check_req = list_entry_rq(rd->row_queues[i].fifo.next);
		expire_time = jiffies_to_msecs(temp_jiffies -
			check_req->fifo_time);
		if (i < ROWQ_REG_PRIO_IDX && expire_time > HP_EXPIRE_TIME)
			timeout = expire_time - HP_EXPIRE_TIME;
		else if (expire_time > RP_EXPIRE_TIME)
			timeout = expire_time - RP_EXPIRE_TIME;
		/*lint restore*/
		if (timeout > 0) {
			expire_number++;
			if (timeout > max_expire_time) {
				max_expire_time = timeout;
				expire_index = i;
			}
			timeout = 0;
		}
	}
	if (expire_number <= 0)
		expire_index = -1;
	else if (printk_ratelimit())
		pr_crit("ROW_LOG:max expire time:%u in Q%d(%d-%d-%d-%d-%d)!!!\n",
			max_expire_time, expire_index, rd->row_queues[0].nr_req,
			rd->row_queues[1].nr_req, rd->row_queues[2].nr_req,
			 rd->row_queues[3].nr_req, rd->row_queues[4].nr_req);
	return expire_index;
}
Esempio n. 7
0
static struct request *deadline_next_request(request_queue_t *q)
{
	struct deadline_data *dd = q->elevator->elevator_data;
	struct request *rq;

	/*
	 * if there are still requests on the dispatch queue, grab the first one
	 */
	if (!list_empty(dd->dispatch)) {
dispatch:
		rq = list_entry_rq(dd->dispatch->next);
		return rq;
	}

	if (deadline_dispatch_requests(dd))
		goto dispatch;

	return NULL;
}
Esempio n. 8
0
static inline void __maybe_unused row_dump_reg_and_low_stat(struct row_data *rd)
{
	int i;
	unsigned int total_dispatch_cnt = 0;
	bool print_statistics = false;
	unsigned int diff;

	for (i = ROWQ_PRIO_REG_SWRITE; i <= ROWQ_PRIO_REG_WRITE; i++) {
		if (!list_empty(&rd->row_queues[i].fifo)) {
			struct request *check_req = list_entry_rq(rd->row_queues[i].fifo.next);
			unsigned long check_jiffies = ((unsigned long) (check_req)->csd.list.next);

			if (time_after(jiffies,
			    check_jiffies + msecs_to_jiffies(ROW_DUMP_REQ_STAT_MSECS))) {
				printk("ROW scheduler: request(pid:%d)"
				    " stays in queue[%d][nr_reqs:%d] for %ums\n",
				    check_req->pid, i, rd->row_queues[i].nr_req,
				    jiffies_to_msecs(jiffies - check_jiffies));
				print_statistics = true;
			}
		}
	}

	if (!print_statistics)
		return;

	diff = jiffies_to_msecs(jiffies - rd->last_update_jiffies);
	if (diff < 10 * 1000)
		return;

	printk("ROW scheduler: dispatched request statistics:");

	for (i = 0; i < ROWQ_MAX_PRIO; i++) {
		printk(" Q[%d]: %u;", i, rd->row_queues[i].dispatch_cnt);
		total_dispatch_cnt += rd->row_queues[i].dispatch_cnt;
		rd->row_queues[i].dispatch_cnt = 0;
	}
	printk("\n%u requests dispatched in %umsec\n",
			total_dispatch_cnt, diff);
	rd->last_update_jiffies = jiffies;
}
// dispatch lower
static struct request* greedy_next_upper(struct greedy_data* greedy_data) {
   return list_entry_rq(greedy_data->upper->prev);
}
// dispatch upper
static struct request* greedy_next_lower(struct greedy_data* greedy_data) {
   return list_entry_rq(greedy_data->lower->next);
}
Esempio n. 11
0
int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
{
	return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
}
//if shouldBuild dirty bit is set, combine c-scan lists into one list and buildTable
//use headposition and table build to make best path
//turn dirty shouldBuild dirty bit off
//dispatch first guy in path
//set new head position
static int optimal_dispatch(struct request_queue *q, int force)
{
	int size = 0, ndx = 1;
	long cost1, cost2;
	struct list_head *entry;
	struct requestList *scan;
	struct optimal_data *nd = q->elevator->elevator_data;
	if(nd->shouldBuild){
		//put everything in my cscan list into an array of requests
		//in cscan order
		entry = &nd->arrival_queue;
		while(((entry = entry->next)!=&nd->arrival_queue)&&size<nd->max_requests){
			nd->mylist[size] = list_entry_rq(entry);
			size++;
		}
		if(size == 0){
			nd->dispatchSize = 0;
			nd->currentNdx = 0;
			return 0;
		}
		//might be redundant
		if(size > nd->max_requests)
			size = nd->max_requests;

		buildTable(nd->mylist, size, nd->C);

		
		cost1 = ((size-1) + 1) * distance(blk_rq_pos(&nd->headpos), blk_rq_pos(nd->mylist[0])) + nd->C[0][size-1][0].cost;
		cost2 = ((size-1) + 1) * distance(blk_rq_pos(&nd->headpos), blk_rq_pos(nd->mylist[size-1])) + nd->C[0][size-1][1].cost;
		if(cost1 < cost2){
			nd->dispatch_head[0] = nd->mylist[0];
			//for each item in C[0][size-1][0]'s path, add to dispatch_head
			scan = nd->C[0][size-1][0].head;
			while(scan != NULL && scan->data != NULL){
				nd->dispatch_head[ndx] = scan->data;
				ndx++;
				scan = scan->next;
			}
		}
		else{
			nd->dispatch_head[0] = nd->mylist[size-1];
			scan = nd->C[0][size-1][1].head;
			while(scan != NULL && scan->data != NULL){
				nd->dispatch_head[ndx] = scan->data;
				ndx++;	
				scan = scan->next;
			}
		}
		nd->dispatchSize = size;
		nd->currentNdx = 0;
		nd->shouldBuild = 0;
	}

	/*
	if (!list_empty(&nd->arrival_queue)) {
		struct request *rq;
		rq = list_entry(nd->arrival_queue.next, struct request, queuelist);
		nd->headpos.__sector =rq_end_sector(rq);
		list_del_init(&rq->queuelist);
		elv_dispatch_add_tail(q, rq);
		nd->currentNdx++;
		return 1;
	}
	*/
	if(nd->currentNdx < nd->dispatchSize){
		struct request *rq;
		rq = nd->dispatch_head[nd->currentNdx];
		nd->headpos.__sector = rq_end_sector(rq);
		list_del_init(&rq->queuelist);
		elv_dispatch_add_tail(q, rq);
		nd->currentNdx++;
		return 1;
	}
	return 0;
} 
Esempio n. 13
0
static int row_get_next_queue(struct request_queue *q, struct row_data *rd,
				int start_idx, int end_idx)
{
	int i = start_idx;
	bool restart = true;
	int ret = -EIO;
	bool print_debug_log = false;

	do {
		if (list_empty(&rd->row_queues[i].fifo) ||
		    rd->row_queues[i].nr_dispatched >=
		    rd->row_queues[i].disp_quantum) {
			
			if ((i == ROWQ_PRIO_HIGH_READ || i == ROWQ_PRIO_REG_READ)
			    && rd->row_queues[i].nr_dispatched >=
			    rd->row_queues[i].disp_quantum)
				print_debug_log = true;

			i++;
			if (i == end_idx && restart) {
				
				row_restart_cycle(rd, start_idx, end_idx);
				i = start_idx;
				restart = false;
			}
		} else {
			ret = i;
			break;
		}
	} while (i < end_idx);

	
	if (print_debug_log)
		row_dump_reg_and_low_stat(rd);

#define EXPIRE_REQUEST_THRESHOLD       20

	if (ret == ROWQ_PRIO_REG_READ) {
		struct request *check_req;
		bool reset_quantum = false;

		for (i = ret + 1; i < end_idx; i++) {
			if (!row_queues_def[i].expire)
				continue;

			if (list_empty(&rd->row_queues[i].fifo)) {
				reset_quantum = true;
				continue;
			}

			check_req = list_entry_rq(rd->row_queues[i].fifo.next);

			if (time_after(jiffies,
			    ((unsigned long) (check_req)->csd.list.next)
			    + row_queues_def[i].expire) &&
			    rd->row_queues[i].nr_req > EXPIRE_REQUEST_THRESHOLD) {
				rd->row_queues[ret].disp_quantum =
					row_queues_def[ret].quantum / 2;
				reset_quantum = false;
				break;
			} else
				reset_quantum = true;
		}

		if (reset_quantum)
			rd->row_queues[ret].disp_quantum =
				row_queues_def[ret].quantum;
	}

	return ret;
}