/* * move request from sort list to dispatch queue. */ static inline void deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq) { struct request_queue *q = rq->q; deadline_remove_request(q, rq); elv_dispatch_add_tail(q, rq); }
static void zen_dispatch(struct zen_data *zdata, struct request *rq) { /* Remove request from list and dispatch it */ rq_fifo_clear(rq); elv_dispatch_add_tail(rq->q, rq); /* Increment # of sequential requests */ zdata->batching++; }
/* * row_dispatch_insert() - move request to dispatch queue * @rd: pointer to struct row_data * * This function moves the next request to dispatch from * rd->curr_queue to the dispatch queue * */ static void row_dispatch_insert(struct row_data *rd) { struct request *rq; rq = rq_entry_fifo(rd->row_queues[rd->curr_queue].rqueue.fifo.next); row_remove_request(rd->dispatch_queue, rq); elv_dispatch_add_tail(rd->dispatch_queue, rq); rd->row_queues[rd->curr_queue].rqueue.nr_dispatched++; row_clear_rowq_unserved(rd, rd->curr_queue); }
static inline void sio_dispatch_request(struct sio_data *sd, struct request *rq) { /* * Remove the request from the fifo list * and dispatch it. */ rq_fifo_clear(rq); elv_dispatch_add_tail(rq->q, rq); sd->batched++; }
/* * move request from additional fifo list to dispatch queue. */ static inline void flash_move_to_dispatch(struct flash_data *dd, struct request *rq) { struct request_queue *q = rq->q; /* remove rq from its associated fifo queue and reinit */ rq_fifo_clear(rq); elv_dispatch_add_tail(q, rq); #ifdef DEBUG_FLASH printk("req of size %d is moved to dispatch queue\n", rq->__data_len); #endif }
/* * row_dispatch_insert() - move request to dispatch queue * @rd: pointer to struct row_data * @queue_idx: index of the row_queue to dispatch from * * This function moves the next request to dispatch from * the given queue (row_queues[queue_idx]) to the dispatch queue * */ static void row_dispatch_insert(struct row_data *rd, int queue_idx) { struct request *rq; rq = rq_entry_fifo(rd->row_queues[queue_idx].fifo.next); row_remove_request(rd->dispatch_queue, rq); elv_dispatch_add_tail(rd->dispatch_queue, rq); rd->row_queues[queue_idx].nr_dispatched++; row_clear_rowq_unserved(rd, queue_idx); row_log_rowq(rd, queue_idx, " Dispatched request nr_disp = %d", rd->row_queues[queue_idx].nr_dispatched); }
static inline void sio_dispatch_request(struct sio_data *sd, struct request *rq) { rq_fifo_clear(rq); elv_dispatch_add_tail(rq->q, rq); if (rq_data_dir(rq)) { sd->starved = 0; } else { if (!list_empty(&sd->fifo_list[SYNC][WRITE]) || !list_empty(&sd->fifo_list[ASYNC][WRITE])) sd->starved++; } }
/* return vios dispatched */ static u64 fiops_dispatch_request(struct fiops_data *fiopsd, struct fiops_ioc *ioc) { struct request *rq; struct request_queue *q = fiopsd->queue; rq = rq_entry_fifo(ioc->fifo.next); fiops_remove_request(rq); elv_dispatch_add_tail(q, rq); fiopsd->in_flight[rq_is_sync(rq)]++; ioc->in_flight++; return fiops_scaled_vios(fiopsd, ioc, rq); }
static inline void sio_dispatch_request(struct sio_data *sd, struct request *rq) { /* Remove the request from the fifo list and dispatch it. */ rq_fifo_clear(rq); elv_dispatch_add_tail(rq->q, rq); sd->batched++; if (rq_data_dir(rq)) { sd->starved = 0; } else { if (!list_empty(&sd->fifo_list[SYNC][WRITE]) || !list_empty(&sd->fifo_list[ASYNC][WRITE])) sd->starved++; } }
/* * move an entry to dispatch queue */ static void vr_move_request(struct vr_data *vd, struct request *rq) { struct request_queue *q = rq->q; if (blk_rq_pos(rq) > vd->last_sector) vd->head_dir = FORWARD; else vd->head_dir = BACKWARD; vd->last_sector = blk_rq_pos(rq); vd->next_rq = elv_rb_latter_request(NULL, rq); vd->prev_rq = elv_rb_former_request(NULL, rq); BUG_ON(vd->next_rq && vd->next_rq == vd->prev_rq); vr_remove_request(q, rq); elv_dispatch_add_tail(q, rq); vd->nbatched++; }
static int cscan_dispatch(struct request_queue *q, int force) { struct request *rq; struct cscan_data *cd = q->elevator->elevator_data; struct rb_node *node = rb_first(&(cd->sort_list[cd->curr])); if(!node) { cd->curr = 1-cd->curr; node = rb_first(&(cd->sort_list[cd->curr])); } if(node) { rq = rb_entry_rq(node); cd->last_sector = rq_end_sector(rq); elv_rb_del(&(cd->sort_list[cd->curr]), rq); elv_dispatch_add_tail(q, rq); return 1; } return 0; }
//if shouldBuild dirty bit is set, combine c-scan lists into one list and buildTable //use headposition and table build to make best path //turn dirty shouldBuild dirty bit off //dispatch first guy in path //set new head position static int optimal_dispatch(struct request_queue *q, int force) { int size = 0, ndx = 1; long cost1, cost2; struct list_head *entry; struct requestList *scan; struct optimal_data *nd = q->elevator->elevator_data; if(nd->shouldBuild){ //put everything in my cscan list into an array of requests //in cscan order entry = &nd->arrival_queue; while(((entry = entry->next)!=&nd->arrival_queue)&&size<nd->max_requests){ nd->mylist[size] = list_entry_rq(entry); size++; } if(size == 0){ nd->dispatchSize = 0; nd->currentNdx = 0; return 0; } //might be redundant if(size > nd->max_requests) size = nd->max_requests; buildTable(nd->mylist, size, nd->C); cost1 = ((size-1) + 1) * distance(blk_rq_pos(&nd->headpos), blk_rq_pos(nd->mylist[0])) + nd->C[0][size-1][0].cost; cost2 = ((size-1) + 1) * distance(blk_rq_pos(&nd->headpos), blk_rq_pos(nd->mylist[size-1])) + nd->C[0][size-1][1].cost; if(cost1 < cost2){ nd->dispatch_head[0] = nd->mylist[0]; //for each item in C[0][size-1][0]'s path, add to dispatch_head scan = nd->C[0][size-1][0].head; while(scan != NULL && scan->data != NULL){ nd->dispatch_head[ndx] = scan->data; ndx++; scan = scan->next; } } else{ nd->dispatch_head[0] = nd->mylist[size-1]; scan = nd->C[0][size-1][1].head; while(scan != NULL && scan->data != NULL){ nd->dispatch_head[ndx] = scan->data; ndx++; scan = scan->next; } } nd->dispatchSize = size; nd->currentNdx = 0; nd->shouldBuild = 0; } /* if (!list_empty(&nd->arrival_queue)) { struct request *rq; rq = list_entry(nd->arrival_queue.next, struct request, queuelist); nd->headpos.__sector =rq_end_sector(rq); list_del_init(&rq->queuelist); elv_dispatch_add_tail(q, rq); nd->currentNdx++; return 1; } */ if(nd->currentNdx < nd->dispatchSize){ struct request *rq; rq = nd->dispatch_head[nd->currentNdx]; nd->headpos.__sector = rq_end_sector(rq); list_del_init(&rq->queuelist); elv_dispatch_add_tail(q, rq); nd->currentNdx++; return 1; } return 0; }
static int osio_dispatch(struct request_queue *q, int force) { struct osio_data *od = q->elevator->elevator_data; const unsigned int non_empty[3] = {!list_empty(&od->fifo_head[OSIO_DIR_READ]), !list_empty(&od->fifo_head[OSIO_DIR_SYNC_WRITE]), !list_empty(&od->fifo_head[OSIO_DIR_ASYNC_WRITE]),}; struct request *rq = NULL; osio_dbg("1, od->fifo_dir = %d\n", od->fifo_dir); osio_dbg("1, non_empty[0] = %d\n", non_empty[0]); osio_dbg("1, non_empty[1] = %d\n", non_empty[1]); osio_dbg("1, non_empty[2] = %d\n", non_empty[2]); /* dispatch a batch of rq */ if (od->fifo_dir != OSIO_DIR_UNDEF) { if ((od->batching >= od->fifo_batch[od->fifo_dir]) || (!non_empty[od->fifo_dir])) { od->fifo_dir = OSIO_DIR_UNDEF; } else { goto dispatch_request; } } /* redecide the direction */ if (non_empty[OSIO_DIR_READ]) { goto dir_read; } if (non_empty[OSIO_DIR_SYNC_WRITE]) { goto dir_sync_write; } if (non_empty[OSIO_DIR_ASYNC_WRITE]) { goto dir_async_write; } return 0; dir_read: /* find a starved write rq */ if ((od->write_starved[OSIO_SYNC] > od->write_starved_line[OSIO_SYNC]) && non_empty[OSIO_DIR_SYNC_WRITE]) { goto dir_sync_write; } else if ((od->write_starved[OSIO_ASYNC] > od->write_starved_line[OSIO_ASYNC]) && non_empty[OSIO_DIR_ASYNC_WRITE]) { goto dir_async_write; } od->fifo_dir = OSIO_DIR_READ; od->batching = 0; od->write_starved[OSIO_SYNC] += non_empty[OSIO_DIR_SYNC_WRITE]; od->write_starved[OSIO_ASYNC] += non_empty[OSIO_DIR_ASYNC_WRITE]; goto dispatch_request; dir_sync_write: if ((od->write_starved[OSIO_ASYNC] > od->write_starved_line[OSIO_ASYNC]) && non_empty[OSIO_DIR_ASYNC_WRITE]) { goto dir_async_write; } od->fifo_dir = OSIO_DIR_SYNC_WRITE; od->batching = 0; od->write_starved[OSIO_SYNC] = 0; od->write_starved[OSIO_ASYNC] += non_empty[OSIO_DIR_ASYNC_WRITE]; goto dispatch_request; dir_async_write: od->fifo_dir = OSIO_DIR_ASYNC_WRITE; od->batching = 0; od->write_starved[OSIO_ASYNC] = 0; od->write_starved[OSIO_SYNC] += non_empty[OSIO_DIR_SYNC_WRITE]; goto dispatch_request; dispatch_request: /* dispatch req */ osio_dbg("2, od->fifo_dir = %d\n", od->fifo_dir); osio_dbg("2, od->batching = %d\n", od->batching); rq = rq_entry_fifo(od->fifo_head[od->fifo_dir].next); list_del_init(&rq->queuelist); elv_dispatch_add_tail(q, rq); od->batching ++; return 1; }
void ElvDeadline::deadline_move_to_dispatch(request *rq) { deadline_remove_request(rq); elv_dispatch_add_tail(rq); }