static void controller_smart_access_complete (controller *currctlr, ioreq_event *curr) { struct ioq *queue = currctlr->devices[curr->devno].queue; ioreq_event *done = ioreq_copy(curr); int devno = curr->devno; int numout; /* Responds to completion interrupt */ done->type = IO_INTERRUPT_COMPLETE; currctlr->outbusowned = controller_get_downward_busno(currctlr, done, NULL); controller_send_event_down_path(currctlr, done, currctlr->ovrhd_disk_complete); currctlr->outbusowned = -1; /* Handles request completion, including call-backs into cache */ curr = ioqueue_physical_access_done(queue, curr); while ((done = curr)) { curr = curr->next; /* call back into cache with completion -- let it do request_complete */ controller_smart_wakeup(currctlr, currctlr->cache->cache_disk_access_complete(currctlr->cache, done)); } /* Initiate another request, if any pending */ numout = ioqueue_get_reqoutstanding(queue); if ((numout < currctlr->devices[devno].maxoutstanding) && (curr = ioqueue_get_next_request(queue))) { controller_send_event_down_path(currctlr, curr, currctlr->ovrhd_disk_request); } }
static ioreq_event * handle_new_request (iodriver *curriodriver, ioreq_event *curr) { struct ioq *queue = curriodriver->devices[(curr->devno)].queue; ioreq_event *ret = NULL; /* fprintf(outputfile, "\n*** handle_new_request:: time %f, devno %d, blkno %d, bcount %d\n\n", simtime, curr->devno, curr->blkno, curr->bcount); fprintf(outputfile, "handle_new_request:: calling ioqueue_add_new_request\n"); */ ioqueue_add_new_request(queue, curr); if (check_send_out_request(curriodriver, curr->devno)) { /* fprintf(outputfile, "handle_new_request:: calling ioqueue_get_next_request\n"); */ ret = ioqueue_get_next_request(queue); if (ret != NULL) { schedule_disk_access(curriodriver, ret); ret->time = IODRIVER_IMMEDSCHED_TIME * curriodriver->scale; } } return(ret); }
static void simpledisk_completion_done (ioreq_event *curr) { simpledisk_t *currdisk = getsimpledisk (curr->devno); // fprintf (outputfile, "Entering simpledisk_completion for disk %d: %12.6f\n", currdisk->devno, simtime); addtoextraq((event *) curr); if (currdisk->busowned != -1) { bus_ownership_release(currdisk->busowned); currdisk->busowned = -1; } /* check for and start next queued request, if any */ curr = ioqueue_get_next_request(currdisk->queue); if (curr != NULL) { ASSERT (currdisk->media_busy == FALSE); if (curr->flags & READ) { currdisk->media_busy = TRUE; stat_update (&currdisk->stat.acctimestats, currdisk->acctime); curr->time = simtime + currdisk->acctime; curr->type = DEVICE_ACCESS_COMPLETE; addtointq ((event *)curr); } else { curr->type = IO_INTERRUPT_ARRIVE; curr->cause = RECONNECT; simpledisk_send_event_up_path (curr, currdisk->bus_transaction_latency); currdisk->reconnect_reason = IO_INTERRUPT_ARRIVE; } } }
static void ssd_check_channel_activity (ssd_t *currdisk) { while (1) { ioreq_event *curr = currdisk->completion_queue; currdisk->channel_activity = curr; if (curr != NULL) { currdisk->completion_queue = curr->next; if (curr->flags & READ) { /* transfer data up the line: curr->bcount, which is still set to */ /* original requested value, indicates how many blks to transfer. */ curr->type = DEVICE_DATA_TRANSFER_COMPLETE; ssd_send_event_up_path(curr, (double) 0.0); } else { ssd_request_complete (curr); } } else { curr = ioqueue_get_next_request(currdisk->queue); currdisk->channel_activity = curr; if (curr != NULL) { if (curr->flags & READ) { ssd_media_access_request(curr); continue; } else { curr->cause = RECONNECT; curr->type = IO_INTERRUPT_ARRIVE; currdisk->reconnect_reason = IO_INTERRUPT_ARRIVE; ssd_send_event_up_path (curr, currdisk->bus_transaction_latency); } } } break; } }
static void ssd_request_arrive (ioreq_event *curr) { ssd_t *currdisk; // fprintf (outputfile, "Entering ssd_request_arrive: %12.6f\n", simtime); // fprintf (outputfile, "ssd = %d, blkno = %d, bcount = %d, read = %d\n",curr->devno, curr->blkno, curr->bcount, (READ & curr->flags)); currdisk = getssd(curr->devno); // verify that request is valid. if ((curr->blkno < 0) || (curr->bcount <= 0) || ((curr->blkno + curr->bcount) > currdisk->numblocks)) { fprintf(outputfile3, "Invalid set of blocks requested from ssd - blkno %d, bcount %d, numblocks %d\n", curr->blkno, curr->bcount, currdisk->numblocks); exit(1); } /* create a new request, set it up for initial interrupt */ ioqueue_add_new_request(currdisk->queue, curr); if (currdisk->channel_activity == NULL) { curr = ioqueue_get_next_request(currdisk->queue); currdisk->busowned = ssd_get_busno(curr); currdisk->channel_activity = curr; currdisk->reconnect_reason = IO_INTERRUPT_ARRIVE; if (curr->flags & READ) { ssd_media_access_request (curr); ssd_check_channel_activity(currdisk); } else { curr->cause = READY_TO_TRANSFER; curr->type = IO_INTERRUPT_ARRIVE; ssd_send_event_up_path(curr, currdisk->bus_transaction_latency); } } }
static void simpledisk_request_arrive (ioreq_event *curr) { ioreq_event *intrp; simpledisk_t *currdisk; #ifdef DEBUG_SIMPLEDISK fprintf (outputfile, "*** %f: simpledisk_request_arrive - devno %d, blkno %d, bcount %d, flags 0x%x\n", simtime, curr->devno, curr->blkno, curr->bcount, curr->flags ); #endif currdisk = getsimpledisk(curr->devno); /* verify that request is valid. */ if ((curr->blkno < 0) || (curr->bcount <= 0) || ((curr->blkno + curr->bcount) > currdisk->numblocks)) { fprintf(stderr, "Invalid set of blocks requested from simpledisk - blkno %lld, bcount %d, numblocks %lld\n", curr->blkno, curr->bcount, currdisk->numblocks); exit(1); } /* create a new request, set it up for initial interrupt */ currdisk->busowned = simpledisk_get_busno(curr); if (ioqueue_get_reqoutstanding (currdisk->queue) == 0) { ioqueue_add_new_request(currdisk->queue, curr); curr = ioqueue_get_next_request (currdisk->queue); intrp = curr; /* initiate media access if request is a READ */ if (curr->flags & READ) { ioreq_event *tmp = ioreq_copy (curr); currdisk->media_busy = TRUE; stat_update (&currdisk->stat.acctimestats, currdisk->acctime); tmp->time = simtime + currdisk->acctime; tmp->type = DEVICE_ACCESS_COMPLETE; addtointq ((event *)tmp); } /* if not disconnecting, then the READY_TO_TRANSFER is like a RECONNECT */ currdisk->reconnect_reason = IO_INTERRUPT_ARRIVE; if (curr->flags & READ) { intrp->cause = (currdisk->neverdisconnect) ? READY_TO_TRANSFER : DISCONNECT; } else { intrp->cause = READY_TO_TRANSFER; } } else { intrp = ioreq_copy(curr); ioqueue_add_new_request(currdisk->queue, curr); intrp->cause = DISCONNECT; } intrp->type = IO_INTERRUPT_ARRIVE; simpledisk_send_event_up_path(intrp, currdisk->bus_transaction_latency); }
static void controller_smart_issue_access (void *issuefuncparam, ioreq_event *curr) { controller *currctlr = issuefuncparam; struct ioq *queue = currctlr->devices[curr->devno].queue; int numout = ioqueue_get_reqoutstanding(queue); /* in case the cache changes to which device the request is sent */ //fprintf (stderr, "busno %x, buspath %x, slotno %x, slotpath %x\n", curr->busno, currctlr->devices[curr->devno].buspath.value, curr->slotno, currctlr->devices[curr->devno].slotpath.value); curr->busno = currctlr->devices[curr->devno].buspath.value; curr->slotno = currctlr->devices[curr->devno].slotpath.value; ioqueue_add_new_request(queue, curr); if (numout < currctlr->devices[curr->devno].maxoutstanding) { ioreq_event *sched = ioqueue_get_next_request(queue); controller_send_event_down_path(currctlr, sched, currctlr->ovrhd_disk_request); } }
void iodriver_access_complete (int iodriverno, intr_event *intrp) { int i; int numreqs; ioreq_event *tmp; ioreq_event *del; ioreq_event *req; int devno; int skip = 0; ctlr *ctl = NULL; time_t now; if (iodrivers[iodriverno]->type == STANDALONE) { req = ioreq_copy((ioreq_event *) intrp->infoptr); } else { req = (ioreq_event *) intrp->infoptr; } #ifdef DEBUG_IODRIVER fprintf (outputfile, "*** %f: iodriver_access_complete - devno %d, blkno %d, bcount %d, read %d\n", simtime, req->devno, req->blkno, req->bcount, (req->flags & READ)); fflush(outputfile); #endif time( & now ); disksim_exectrace( "Request completion: simtime %f, devno %d, blkno %lld, bcount %d, flags %X, time %s\n", simtime, req->devno, req->blkno, req->bcount, req->flags, asctime( localtime(& now)) ); if (iodrivers[iodriverno]->devices[(req->devno)].queuectlr != -1) { int ctlrno = iodrivers[iodriverno]->devices[(req->devno)].queuectlr; ctl = &iodrivers[iodriverno]->ctlrs[ctlrno]; tmp = ctl->oversized; numreqs = 1; while (((numreqs) || (tmp != ctl->oversized)) && (tmp) && (tmp->next) && ((tmp->next->devno != req->devno) || (tmp->next->opid != req->opid) || (req->blkno < tmp->next->blkno) || (req->blkno >= (tmp->next->blkno + tmp->next->bcount)))) { // fprintf (outputfile, "oversized request in list: opid %d, blkno %lld, bcount %d\n", tmp->opid, tmp->blkno, tmp->bcount); numreqs = 0; tmp = tmp->next; } if ((tmp) && (tmp->next->devno == req->devno) && (tmp->next->opid == req->opid) && (req->blkno >= tmp->next->blkno) && (req->blkno < (tmp->next->blkno + tmp->next->bcount))) { fprintf (outputfile, "%f, part of oversized request completed: opid %d, blkno %lld, bcount %d, maxreqsize %d\n", simtime, req->opid, req->blkno, req->bcount, ctl->maxreqsize); if ((req->blkno + ctl->maxreqsize) < (tmp->next->blkno + tmp->next->bcount)) { fprintf (outputfile, "more to go\n"); req->blkno += ctl->maxreqsize; req->bcount = min(ctl->maxreqsize, (tmp->next->blkno + tmp->next->bcount - req->blkno)); goto schedule_next; } else { fprintf (outputfile, "done for real\n"); addtoextraq((event *) req); req = tmp->next; tmp->next = tmp->next->next; if (ctl->oversized == req) { ctl->oversized = (req != req->next) ? req->next : NULL; } req->next = NULL; } } } devno = req->devno; req = ioqueue_physical_access_done(iodrivers[iodriverno]->devices[devno].queue, req); if (ctl) { ctl->numoutstanding--; } // special case for validate: if (disksim->traceformat == VALIDATE) { tmp = (ioreq_event *) getfromextraq(); io_validate_do_stats1(); tmp = iotrace_validate_get_ioreq_event(disksim->iotracefile, tmp); if (tmp) { io_validate_do_stats2(tmp); tmp->type = IO_REQUEST_ARRIVE; addtointq((event *) tmp); disksim_exectrace("Request issue: simtime %f, devno %d, blkno %lld, time %f\n", simtime, tmp->devno, tmp->blkno, tmp->time); } else { disksim_simstop(); } } else if (disksim->closedios) { tmp = (ioreq_event *) io_get_next_external_event(disksim->iotracefile); if (tmp) { io_using_external_event ((event *)tmp); tmp->time = simtime + disksim->closedthinktime; tmp->type = IO_REQUEST_ARRIVE; addtointq((event *) tmp); } else { disksim_simstop(); } } while (req) { tmp = req; req = req->next; tmp->next = NULL; update_iodriver_statistics(); if ((numreqs = logorg_mapcomplete(sysorgs, numsysorgs, tmp)) == COMPLETE) { /* update up overall I/O system stats for this completed request */ ioreq_event *temp = ioqueue_get_specific_request (OVERALLQUEUE, tmp); ioreq_event *temp2 = ioqueue_physical_access_done (OVERALLQUEUE, temp); ASSERT (temp2 != NULL); addtoextraq((event *)temp); temp = NULL; if (iodrivers[iodriverno]->type != STANDALONE) { iodriver_add_to_intrp_eventlist(intrp, io_done_notify(tmp), iodrivers[iodriverno]->scale); } else { io_done_notify (tmp); } } else if (numreqs > 0) { for (i = 0; i < numreqs; i++) { del = tmp->next; tmp->next = del->next; del->next = NULL; del->type = IO_REQUEST_ARRIVE; del->flags |= MAPPED; skip |= (del->devno == devno); if (iodrivers[iodriverno]->type == STANDALONE) { del->time += simtime + 0.0000000001; /* to affect an ordering */ addtointq((event *) del); } else { iodriver_add_to_intrp_eventlist(intrp, (event *) del, iodrivers[iodriverno]->scale); } } } addtoextraq((event *) tmp); } if ((iodrivers[iodriverno]->consttime == IODRIVER_TRACED_QUEUE_TIMES) || (iodrivers[iodriverno]->consttime == IODRIVER_TRACED_BOTH_TIMES)) { if (ioqueue_get_number_in_queue(iodrivers[iodriverno]->devices[devno].queue) > 0) { iodrivers[iodriverno]->devices[devno].flag = 1; iodrivers[iodriverno]->devices[devno].lastevent = simtime; } return; } if (skip) { return; } // fprintf(outputfile, "iodriver_access_complete:: calling ioqueue_get_next_request\n"); req = ioqueue_get_next_request(iodrivers[iodriverno]->devices[devno].queue); // fprintf (outputfile, "next scheduled: req %p, req->blkno %d, req->flags %x\n", req, ((req) ? req->blkno : 0), ((req) ? req->flags : 0)); schedule_next: if (req) { req->type = IO_ACCESS_ARRIVE; req->next = NULL; if (ctl) { ctl->numoutstanding++; } if (iodrivers[iodriverno]->type == STANDALONE) { req->time = simtime; addtointq((event *) req); } else { iodriver_add_to_intrp_eventlist(intrp, (event *) req, iodrivers[iodriverno]->scale); } } }
/* * collects 1 request from each chip in the gang */ static void ssd_collect_req_in_gang (ssd_t *s, int gang_num, ssd_req ***rd_q, ssd_req ***wr_q, int *rd_total, int *wr_total) { int i; int start; gang_metadata *g; g = &s->gang_meta[gang_num]; // start from the first element of the gang start = gang_num * s->params.elements_per_gang; i = start; *rd_total = 0; *wr_total = 0; do { ssd_element *elem; ioreq_event *req; int tot_rd_reqs; int tot_wr_reqs; int j; elem = &s->elements[i]; ASSERT(ioqueue_get_reqoutstanding(elem->queue) == 0); j = i % s->params.elements_per_gang; // collect the requests tot_rd_reqs = 0; tot_wr_reqs = 0; if ((req = ioqueue_get_next_request(elem->queue)) != NULL) { int found; if (req->flags & READ) { found = ssd_already_present(rd_q[j], tot_rd_reqs, req); } else { found = ssd_already_present(wr_q[j], tot_wr_reqs, req); } if (!found) { // this is a valid request ssd_req *r = malloc(sizeof(ssd_req)); r->blk = req->blkno; r->count = req->bcount; r->is_read = req->flags & READ; r->org_req = req; r->plane_num = -1; // we don't know to which plane this req will be directed at if (req->flags & READ) { rd_q[j][tot_rd_reqs] = r; tot_rd_reqs ++; } else { wr_q[j][tot_wr_reqs] = r; tot_wr_reqs ++; } } else { // throw this request -- it doesn't make sense stat_update (&s->stat.acctimestats, 0); req->time = simtime; req->ssd_elem_num = i; req->ssd_gang_num = gang_num; req->type = DEVICE_ACCESS_COMPLETE; addtointq ((event *)req); } ASSERT((tot_rd_reqs < MAX_REQS) && (tot_wr_reqs < MAX_REQS)) } *rd_total = *rd_total + tot_rd_reqs; *wr_total = *wr_total + tot_wr_reqs; // go to the next element i = ssd_next_elem_in_gang(s, gang_num, i); } while (i != start);
static void ssd_activate_elem(ssd_t *currdisk, int elem_num) { ioreq_event *req; ssd_req **read_reqs; ssd_req **write_reqs; int i; int read_total = 0; int write_total = 0; double schtime = 0; int max_reqs; int tot_reqs_issued; double max_time_taken = 0; ssd_element *elem = &currdisk->elements[elem_num]; // if the media is busy, we can't do anything, so return if (elem->media_busy == TRUE) { return; } ASSERT(ioqueue_get_reqoutstanding(elem->queue) == 0); // we can invoke cleaning in the background whether there // is request waiting or not if (currdisk->params.cleaning_in_background) { // if cleaning was invoked, wait until // it is over ... if (ssd_invoke_element_cleaning(elem_num, currdisk)) { return; } } ASSERT(elem->metadata.reqs_waiting == ioqueue_get_number_in_queue(elem->queue)); if (elem->metadata.reqs_waiting > 0) { // invoke cleaning in foreground when there are requests waiting if (!currdisk->params.cleaning_in_background) { // if cleaning was invoked, wait until // it is over ... if (ssd_invoke_element_cleaning(elem_num, currdisk)) { return; } } // how many reqs can we issue at once if (currdisk->params.copy_back == SSD_COPY_BACK_DISABLE) { max_reqs = 1; } else { if (currdisk->params.num_parunits == 1) { max_reqs = 1; } else { max_reqs = MAX_REQS_ELEM_QUEUE; } } // ideally, we should issue one req per plane, overlapping them all. // in order to simplify the overlapping strategy, let's issue // requests of the same type together. read_reqs = (ssd_req **) malloc(max_reqs * sizeof(ssd_req *)); write_reqs = (ssd_req **) malloc(max_reqs * sizeof(ssd_req *)); // collect the requests while ((req = ioqueue_get_next_request(elem->queue)) != NULL) { int found = 0; elem->metadata.reqs_waiting --; // see if we already have the same request in the list. // this usually doesn't happen -- but on synthetic traces // this weird case can occur. if (req->flags & READ) { found = ssd_already_present(read_reqs, read_total, req); } else { found = ssd_already_present(write_reqs, write_total, req); } if (!found) { // this is a valid request ssd_req *r = malloc(sizeof(ssd_req)); r->blk = req->blkno; r->count = req->bcount; r->is_read = req->flags & READ; r->org_req = req; r->plane_num = -1; // we don't know to which plane this req will be directed at if (req->flags & READ) { read_reqs[read_total] = r; read_total ++; } else { write_reqs[write_total] = r; write_total ++; } // if we have more reqs than we can handle, quit if ((read_total >= max_reqs) || (write_total >= max_reqs)) { break; } } else { // throw this request -- it doesn't make sense stat_update (&currdisk->stat.acctimestats, 0); req->time = simtime; req->ssd_elem_num = elem_num; req->type = DEVICE_ACCESS_COMPLETE; addtointq ((event *)req); } } if (read_total > 0) { // first issue all the read requests (it doesn't matter what we // issue first). i chose read because reads are mostly synchronous. // find the time taken to serve these requests. ssd_compute_access_time(currdisk, elem_num, read_reqs, read_total); // add an event for each request completion for (i = 0; i < read_total; i ++) { elem->media_busy = TRUE; // find the maximum time taken by a request if (schtime < read_reqs[i]->schtime) { schtime = read_reqs[i]->schtime; } stat_update (&currdisk->stat.acctimestats, read_reqs[i]->acctime); read_reqs[i]->org_req->time = simtime + read_reqs[i]->schtime; read_reqs[i]->org_req->ssd_elem_num = elem_num; read_reqs[i]->org_req->type = DEVICE_ACCESS_COMPLETE; //printf("R: blk %d elem %d acctime %f simtime %f\n", read_reqs[i]->blk, // elem_num, read_reqs[i]->acctime, read_reqs[i]->org_req->time); addtointq ((event *)read_reqs[i]->org_req); free(read_reqs[i]); } } free(read_reqs); max_time_taken = schtime; if (write_total > 0) { // next issue the write requests ssd_compute_access_time(currdisk, elem_num, write_reqs, write_total); // add an event for each request completion. // note that we can issue the writes only after all the reads above are // over. so, include the maximum read time when creating the event. for (i = 0; i < write_total; i ++) { elem->media_busy = TRUE; stat_update (&currdisk->stat.acctimestats, write_reqs[i]->acctime); write_reqs[i]->org_req->time = simtime + schtime + write_reqs[i]->schtime; //printf("blk %d elem %d acc time %f\n", write_reqs[i]->blk, elem_num, write_reqs[i]->acctime); if (max_time_taken < (schtime+write_reqs[i]->schtime)) { max_time_taken = (schtime+write_reqs[i]->schtime); } write_reqs[i]->org_req->ssd_elem_num = elem_num; write_reqs[i]->org_req->type = DEVICE_ACCESS_COMPLETE; //printf("W: blk %d elem %d acctime %f simtime %f\n", write_reqs[i]->blk, // elem_num, write_reqs[i]->acctime, write_reqs[i]->org_req->time); addtointq ((event *)write_reqs[i]->org_req); free(write_reqs[i]); } } free(write_reqs); // statistics tot_reqs_issued = read_total + write_total; ASSERT(tot_reqs_issued > 0); currdisk->elements[elem_num].stat.tot_reqs_issued += tot_reqs_issued; currdisk->elements[elem_num].stat.tot_time_taken += max_time_taken; } }