static void controller_smart_access_complete (controller *currctlr, ioreq_event *curr) { struct ioq *queue = currctlr->devices[curr->devno].queue; ioreq_event *done = ioreq_copy(curr); int devno = curr->devno; int numout; /* Responds to completion interrupt */ done->type = IO_INTERRUPT_COMPLETE; currctlr->outbusowned = controller_get_downward_busno(currctlr, done, NULL); controller_send_event_down_path(currctlr, done, currctlr->ovrhd_disk_complete); currctlr->outbusowned = -1; /* Handles request completion, including call-backs into cache */ curr = ioqueue_physical_access_done(queue, curr); while ((done = curr)) { curr = curr->next; /* call back into cache with completion -- let it do request_complete */ controller_smart_wakeup(currctlr, currctlr->cache->cache_disk_access_complete(currctlr->cache, done)); } /* Initiate another request, if any pending */ numout = ioqueue_get_reqoutstanding(queue); if ((numout < currctlr->devices[devno].maxoutstanding) && (curr = ioqueue_get_next_request(queue))) { controller_send_event_down_path(currctlr, curr, currctlr->ovrhd_disk_request); } }
static void ssd_access_complete_element(ioreq_event *curr) { ssd_t *currdisk; int elem_num; ssd_element *elem; ioreq_event *x; currdisk = getssd (curr->devno); elem_num = currdisk->timing_t->choose_element(currdisk->timing_t, curr->blkno); ASSERT(elem_num == curr->ssd_elem_num); elem = &currdisk->elements[elem_num]; if ((x = ioqueue_physical_access_done(elem->queue,curr)) == NULL) { fprintf(stderr, "ssd_access_complete: ioreq_event not found by ioqueue_physical_access_done call\n"); exit(1); } // all the reqs are over if (ioqueue_get_reqoutstanding(elem->queue) == 0) { elem->media_busy = FALSE; } ssd_complete_parent(curr, currdisk); addtoextraq((event *) curr); ssd_activate_elem(currdisk, elem_num); }
static void simpledisk_request_arrive (ioreq_event *curr) { ioreq_event *intrp; simpledisk_t *currdisk; #ifdef DEBUG_SIMPLEDISK fprintf (outputfile, "*** %f: simpledisk_request_arrive - devno %d, blkno %d, bcount %d, flags 0x%x\n", simtime, curr->devno, curr->blkno, curr->bcount, curr->flags ); #endif currdisk = getsimpledisk(curr->devno); /* verify that request is valid. */ if ((curr->blkno < 0) || (curr->bcount <= 0) || ((curr->blkno + curr->bcount) > currdisk->numblocks)) { fprintf(stderr, "Invalid set of blocks requested from simpledisk - blkno %lld, bcount %d, numblocks %lld\n", curr->blkno, curr->bcount, currdisk->numblocks); exit(1); } /* create a new request, set it up for initial interrupt */ currdisk->busowned = simpledisk_get_busno(curr); if (ioqueue_get_reqoutstanding (currdisk->queue) == 0) { ioqueue_add_new_request(currdisk->queue, curr); curr = ioqueue_get_next_request (currdisk->queue); intrp = curr; /* initiate media access if request is a READ */ if (curr->flags & READ) { ioreq_event *tmp = ioreq_copy (curr); currdisk->media_busy = TRUE; stat_update (&currdisk->stat.acctimestats, currdisk->acctime); tmp->time = simtime + currdisk->acctime; tmp->type = DEVICE_ACCESS_COMPLETE; addtointq ((event *)tmp); } /* if not disconnecting, then the READY_TO_TRANSFER is like a RECONNECT */ currdisk->reconnect_reason = IO_INTERRUPT_ARRIVE; if (curr->flags & READ) { intrp->cause = (currdisk->neverdisconnect) ? READY_TO_TRANSFER : DISCONNECT; } else { intrp->cause = READY_TO_TRANSFER; } } else { intrp = ioreq_copy(curr); ioqueue_add_new_request(currdisk->queue, curr); intrp->cause = DISCONNECT; } intrp->type = IO_INTERRUPT_ARRIVE; simpledisk_send_event_up_path(intrp, currdisk->bus_transaction_latency); }
static void ssd_access_complete_element(ioreq_event *curr) { ssd_t *currdisk; int elem_num; ssd_element *elem; ioreq_event *x; int lba; currdisk = getssd (curr->devno); elem_num = ssd_choose_element(currdisk->user_params, curr->blkno); ASSERT(elem_num == curr->ssd_elem_num); elem = &currdisk->elements[elem_num]; lba = ssd_logical_pageno(curr->blkno, currdisk); if(curr->flags & READ){ fprintf(outputfile5, "%10.6f %d %d %d\n", simtime, lba, elem_num, curr->blkno); } else { fprintf(outputfile4, "%10.6f %d %d %d\n", simtime, lba, elem_num, curr->blkno); } if ((x = ioqueue_physical_access_done(elem->queue,curr)) == NULL) { fprintf(stderr, "ssd_access_complete: ioreq_event not found by ioqueue_physical_access_done call\n"); exit(1); } ssd_dpower(currdisk, 0); // all the reqs are over if (ioqueue_get_reqoutstanding(elem->queue) == 0) { elem->media_busy = FALSE; } ssd_complete_parent(curr, currdisk); addtoextraq((event *) curr); // added by tiel // activate request create simtime, type, elem_num //{ // ioreq_event *temp = (ioreq_event *)getfromextraq(); // temp->type = SSD_ACTIVATE_ELEM; // temp->time = simtime + (2*currdisk->params.channel_switch_delay); // //temp->time = simtime; // temp->ssd_elem_num = elem_num; // addtointq ((event *)temp); //} //printf("time %f \n",simtime); ssd_activate_elem(currdisk, elem_num); }
static void controller_smart_issue_access (void *issuefuncparam, ioreq_event *curr) { controller *currctlr = issuefuncparam; struct ioq *queue = currctlr->devices[curr->devno].queue; int numout = ioqueue_get_reqoutstanding(queue); /* in case the cache changes to which device the request is sent */ //fprintf (stderr, "busno %x, buspath %x, slotno %x, slotpath %x\n", curr->busno, currctlr->devices[curr->devno].buspath.value, curr->slotno, currctlr->devices[curr->devno].slotpath.value); curr->busno = currctlr->devices[curr->devno].buspath.value; curr->slotno = currctlr->devices[curr->devno].slotpath.value; ioqueue_add_new_request(queue, curr); if (numout < currctlr->devices[curr->devno].maxoutstanding) { ioreq_event *sched = ioqueue_get_next_request(queue); controller_send_event_down_path(currctlr, sched, currctlr->ovrhd_disk_request); } }
static int check_send_out_request (iodriver *curriodriver, int devno) { int numout; if ((curriodriver->consttime == IODRIVER_TRACED_QUEUE_TIMES) || (curriodriver->consttime == IODRIVER_TRACED_BOTH_TIMES)) { return(FALSE); } numout = ioqueue_get_reqoutstanding(curriodriver->devices[devno].queue); if (curriodriver->usequeue == TRUE) { int queuectlr = curriodriver->devices[devno].queuectlr; if (queuectlr != -1) { numout = curriodriver->ctlrs[queuectlr].numoutstanding; /* fprintf (outputfile, "Check send_out_req: queuectlr %d, numout %d, maxout %d, send %d\n", queuectlr, numout, curriodriver->ctlrs[queuectlr].maxoutstanding, (numout < curriodriver->ctlrs[queuectlr].maxoutstanding)); */ return(numout < curriodriver->ctlrs[queuectlr].maxoutstanding); } else { return(numout < curriodriver->devices[devno].maxoutstanding); } } else { return(!numout); } }
static void ssd_access_complete_element(ioreq_event *curr) { ssd_t *currdisk; int elem_num; ssd_element *elem; ioreq_event *x; int lba; currdisk = getssd (curr->devno); elem_num = ssd_choose_element(currdisk->user_params, curr->blkno); ASSERT(elem_num == curr->ssd_elem_num); elem = &currdisk->elements[elem_num]; lba = ssd_logical_blockno(curr->blkno, currdisk); if(curr->flags & READ){ fprintf(outputfile5, "%10.6f %d %d %d\n", simtime, lba, elem_num, curr->blkno); } else { fprintf(outputfile4, "%10.6f %d %d %d\n", simtime, lba, elem_num, curr->blkno); } if ((x = ioqueue_physical_access_done(elem->queue,curr)) == NULL) { fprintf(stderr, "ssd_access_complete: ioreq_event not found by ioqueue_physical_access_done call\n"); exit(1); } ssd_dpower(currdisk, 0); // all the reqs are over if (ioqueue_get_reqoutstanding(elem->queue) == 0) { elem->media_busy = FALSE; } ssd_complete_parent(curr, currdisk); addtoextraq((event *) curr); ssd_activate_elem(currdisk, elem_num); }
/* * collects 1 request from each chip in the gang */ static void ssd_collect_req_in_gang (ssd_t *s, int gang_num, ssd_req ***rd_q, ssd_req ***wr_q, int *rd_total, int *wr_total) { int i; int start; gang_metadata *g; g = &s->gang_meta[gang_num]; // start from the first element of the gang start = gang_num * s->params.elements_per_gang; i = start; *rd_total = 0; *wr_total = 0; do { ssd_element *elem; ioreq_event *req; int tot_rd_reqs; int tot_wr_reqs; int j; elem = &s->elements[i]; ASSERT(ioqueue_get_reqoutstanding(elem->queue) == 0); j = i % s->params.elements_per_gang; // collect the requests tot_rd_reqs = 0; tot_wr_reqs = 0; if ((req = ioqueue_get_next_request(elem->queue)) != NULL) { int found; if (req->flags & READ) { found = ssd_already_present(rd_q[j], tot_rd_reqs, req); } else { found = ssd_already_present(wr_q[j], tot_wr_reqs, req); } if (!found) { // this is a valid request ssd_req *r = malloc(sizeof(ssd_req)); r->blk = req->blkno; r->count = req->bcount; r->is_read = req->flags & READ; r->org_req = req; r->plane_num = -1; // we don't know to which plane this req will be directed at if (req->flags & READ) { rd_q[j][tot_rd_reqs] = r; tot_rd_reqs ++; } else { wr_q[j][tot_wr_reqs] = r; tot_wr_reqs ++; } } else { // throw this request -- it doesn't make sense stat_update (&s->stat.acctimestats, 0); req->time = simtime; req->ssd_elem_num = i; req->ssd_gang_num = gang_num; req->type = DEVICE_ACCESS_COMPLETE; addtointq ((event *)req); } ASSERT((tot_rd_reqs < MAX_REQS) && (tot_wr_reqs < MAX_REQS)) } *rd_total = *rd_total + tot_rd_reqs; *wr_total = *wr_total + tot_wr_reqs; // go to the next element i = ssd_next_elem_in_gang(s, gang_num, i); } while (i != start);
static void ssd_activate_elem(ssd_t *currdisk, int elem_num) { ioreq_event *req; ssd_req **read_reqs; ssd_req **write_reqs; int i; int read_total = 0; int write_total = 0; double schtime = 0; int max_reqs; int tot_reqs_issued; double max_time_taken = 0; ssd_element *elem = &currdisk->elements[elem_num]; // if the media is busy, we can't do anything, so return if (elem->media_busy == TRUE) { return; } ASSERT(ioqueue_get_reqoutstanding(elem->queue) == 0); // we can invoke cleaning in the background whether there // is request waiting or not if (currdisk->params.cleaning_in_background) { // if cleaning was invoked, wait until // it is over ... if (ssd_invoke_element_cleaning(elem_num, currdisk)) { return; } } ASSERT(elem->metadata.reqs_waiting == ioqueue_get_number_in_queue(elem->queue)); if (elem->metadata.reqs_waiting > 0) { // invoke cleaning in foreground when there are requests waiting if (!currdisk->params.cleaning_in_background) { // if cleaning was invoked, wait until // it is over ... if (ssd_invoke_element_cleaning(elem_num, currdisk)) { return; } } // how many reqs can we issue at once if (currdisk->params.copy_back == SSD_COPY_BACK_DISABLE) { max_reqs = 1; } else { if (currdisk->params.num_parunits == 1) { max_reqs = 1; } else { max_reqs = MAX_REQS_ELEM_QUEUE; } } // ideally, we should issue one req per plane, overlapping them all. // in order to simplify the overlapping strategy, let's issue // requests of the same type together. read_reqs = (ssd_req **) malloc(max_reqs * sizeof(ssd_req *)); write_reqs = (ssd_req **) malloc(max_reqs * sizeof(ssd_req *)); // collect the requests while ((req = ioqueue_get_next_request(elem->queue)) != NULL) { int found = 0; elem->metadata.reqs_waiting --; // see if we already have the same request in the list. // this usually doesn't happen -- but on synthetic traces // this weird case can occur. if (req->flags & READ) { found = ssd_already_present(read_reqs, read_total, req); } else { found = ssd_already_present(write_reqs, write_total, req); } if (!found) { // this is a valid request ssd_req *r = malloc(sizeof(ssd_req)); r->blk = req->blkno; r->count = req->bcount; r->is_read = req->flags & READ; r->org_req = req; r->plane_num = -1; // we don't know to which plane this req will be directed at if (req->flags & READ) { read_reqs[read_total] = r; read_total ++; } else { write_reqs[write_total] = r; write_total ++; } // if we have more reqs than we can handle, quit if ((read_total >= max_reqs) || (write_total >= max_reqs)) { break; } } else { // throw this request -- it doesn't make sense stat_update (&currdisk->stat.acctimestats, 0); req->time = simtime; req->ssd_elem_num = elem_num; req->type = DEVICE_ACCESS_COMPLETE; addtointq ((event *)req); } } if (read_total > 0) { // first issue all the read requests (it doesn't matter what we // issue first). i chose read because reads are mostly synchronous. // find the time taken to serve these requests. ssd_compute_access_time(currdisk, elem_num, read_reqs, read_total); // add an event for each request completion for (i = 0; i < read_total; i ++) { elem->media_busy = TRUE; // find the maximum time taken by a request if (schtime < read_reqs[i]->schtime) { schtime = read_reqs[i]->schtime; } stat_update (&currdisk->stat.acctimestats, read_reqs[i]->acctime); read_reqs[i]->org_req->time = simtime + read_reqs[i]->schtime; read_reqs[i]->org_req->ssd_elem_num = elem_num; read_reqs[i]->org_req->type = DEVICE_ACCESS_COMPLETE; //printf("R: blk %d elem %d acctime %f simtime %f\n", read_reqs[i]->blk, // elem_num, read_reqs[i]->acctime, read_reqs[i]->org_req->time); addtointq ((event *)read_reqs[i]->org_req); free(read_reqs[i]); } } free(read_reqs); max_time_taken = schtime; if (write_total > 0) { // next issue the write requests ssd_compute_access_time(currdisk, elem_num, write_reqs, write_total); // add an event for each request completion. // note that we can issue the writes only after all the reads above are // over. so, include the maximum read time when creating the event. for (i = 0; i < write_total; i ++) { elem->media_busy = TRUE; stat_update (&currdisk->stat.acctimestats, write_reqs[i]->acctime); write_reqs[i]->org_req->time = simtime + schtime + write_reqs[i]->schtime; //printf("blk %d elem %d acc time %f\n", write_reqs[i]->blk, elem_num, write_reqs[i]->acctime); if (max_time_taken < (schtime+write_reqs[i]->schtime)) { max_time_taken = (schtime+write_reqs[i]->schtime); } write_reqs[i]->org_req->ssd_elem_num = elem_num; write_reqs[i]->org_req->type = DEVICE_ACCESS_COMPLETE; //printf("W: blk %d elem %d acctime %f simtime %f\n", write_reqs[i]->blk, // elem_num, write_reqs[i]->acctime, write_reqs[i]->org_req->time); addtointq ((event *)write_reqs[i]->org_req); free(write_reqs[i]); } } free(write_reqs); // statistics tot_reqs_issued = read_total + write_total; ASSERT(tot_reqs_issued > 0); currdisk->elements[elem_num].stat.tot_reqs_issued += tot_reqs_issued; currdisk->elements[elem_num].stat.tot_time_taken += max_time_taken; } }