int logorg_check_dependencies (logorg *currlogorg, outstand *req, ioreq_event *curr) { int i; depends *tmpdep; depends *del = NULL; int numreqs = 0; int devno = 0; int numdeps; ioreq_event *temp; tmpdep = req->depend; if (tmpdep == NULL) { return(0); } if ((tmpdep->devno == curr->devno) && (tmpdep->blkno == curr->blkno)) { req->depend = tmpdep->next; } else { while (tmpdep->next) { if ((tmpdep->next->devno == curr->devno) && (tmpdep->next->blkno == curr->blkno)) { del = tmpdep->next; tmpdep->next = del->next; tmpdep = del; break; } tmpdep = tmpdep->next; } if (del == NULL) { return(0); } } curr->next = curr; numdeps = tmpdep->numdeps; i = 0; while (i < numdeps) { temp = tmpdep->deps[devno]; temp->opid--; if (temp->opid == 0) { numreqs++; temp->next = curr->next; curr->next = temp; temp->opid = req->opid; temp->time = 0.0; if (req->flags & TIME_CRITICAL) { temp->flags |= TIME_CRITICAL; } } if ((i++) >= numdeps) { break; } devno = logorg_modulus_update(1, devno, 10); if (devno == 0) { del = tmpdep; tmpdep = tmpdep->cont; addtoextraq((event *) del); } } addtoextraq((event *) tmpdep); return(numreqs); }
static void cachedev_free_block_clean (struct cache_if *c, ioreq_event *req) { struct cache_dev *cache = (struct cache_dev *)c; struct cache_dev_event *rwdesc; #ifdef DEBUG_CACHEDEV fprintf (outputfile, "*** %f: Entered cachedev::cache_free_block_clean: blkno %d, bcount %d, devno %d\n", simtime, req->blkno, req->bcount, req->devno); #endif /* For now, just find relevant rwdesc and free it. */ /* Later, write it to the cache device (and update the cache map thusly. */ rwdesc = cachedev_find_ongoing_request (cache, req); ASSERT (rwdesc != NULL); if (rwdesc->type == CACHE_EVENT_READ) { cache->bufferspace -= req->bcount; cachedev_remove_ongoing_request (cache, rwdesc); addtoextraq ((event *) rwdesc); } else { ASSERT (rwdesc->type == CACHE_EVENT_POPULATE_ALSO); rwdesc->type = CACHE_EVENT_POPULATE_ONLY; } }
static void cachedev_wakeup_complete (struct cache_if *c, void *d) // really struct cache_dev_event { struct cache_dev_event *desc = (struct cache_dev_event *)d; struct cache_dev *cache = (struct cache_dev *)c; ASSERT (0); #ifdef DEBUG_CACHEDEV fprintf (outputfile, "*** %f: Entered cachedev::cachedev_wakeup_complete (does nothing)\n", simtime ); #endif // ??? #if 0 switch(desc->type) { case CACHE_EVENT_READ: cache_read_continue(cache, desc); break; case CACHE_EVENT_WRITE: cache_write_continue(cache, desc); break; case CACHE_EVENT_FLUSH: (*desc->donefunc)(desc->doneparam, desc->req); addtoextraq((event *) desc); break; default: ddbg_assert2(0, "Unknown cachedev event type"); break; } #endif }
static void simpledisk_reconnect_done (ioreq_event *curr) { simpledisk_t *currdisk; // fprintf (outputfile, "Entering simpledisk_reconnect_done for disk %d: %12.6f\n", curr->devno, simtime); currdisk = getsimpledisk (curr->devno); if (curr->flags & READ) { if (currdisk->neverdisconnect) { /* Just holding on to bus; data transfer will be initiated when */ /* media access is complete. */ addtoextraq((event *) curr); } else { /* data transfer: curr->bcount, which is still set to original */ /* requested value, indicates how many blks to transfer. */ curr->type = DEVICE_DATA_TRANSFER_COMPLETE; simpledisk_send_event_up_path(curr, (double) 0.0); } } else { if (currdisk->reconnect_reason == DEVICE_ACCESS_COMPLETE) { simpledisk_request_complete (curr); } else { /* data transfer: curr->bcount, which is still set to original */ /* requested value, indicates how many blks to transfer. */ curr->type = DEVICE_DATA_TRANSFER_COMPLETE; simpledisk_send_event_up_path(curr, (double) 0.0); } } }
static void controller_53c700_data_transfer_complete (controller *currctlr, ioreq_event *curr) { ioreq_event *tmp; #ifdef DEBUG_CTLRDUMB fprintf (outputfile, "*** %f: controller_53c700_data_transfer_complete - devno %d, blkno %d\n, bcount %d, flags 0x%x\n", simtime, curr->devno, curr->blkno, curr->bcount, curr->flags ); fflush(outputfile ); #endif tmp = (ioreq_event *) curr->tempptr1; tmp->bcount -= curr->bcount; addtoextraq((event *) curr); if (tmp->bcount < 0) { fprintf(stderr, "Transfered more than requested at controller_data_transfer_done\n"); exit(1); } else if (tmp->bcount == 0) { if (tmp->next) { tmp->next->prev = tmp->prev; } if (tmp->prev) { tmp->prev->next = tmp->next; } else { currctlr->datatransfers = tmp->next; } tmp->time = simtime; addtointq((event *) tmp); } else { fprintf(stderr, "Haven't required less than all out transfer at controller_data_transfer_done\n"); exit(1); } }
static void simpledisk_completion_done (ioreq_event *curr) { simpledisk_t *currdisk = getsimpledisk (curr->devno); // fprintf (outputfile, "Entering simpledisk_completion for disk %d: %12.6f\n", currdisk->devno, simtime); addtoextraq((event *) curr); if (currdisk->busowned != -1) { bus_ownership_release(currdisk->busowned); currdisk->busowned = -1; } /* check for and start next queued request, if any */ curr = ioqueue_get_next_request(currdisk->queue); if (curr != NULL) { ASSERT (currdisk->media_busy == FALSE); if (curr->flags & READ) { currdisk->media_busy = TRUE; stat_update (&currdisk->stat.acctimestats, currdisk->acctime); curr->time = simtime + currdisk->acctime; curr->type = DEVICE_ACCESS_COMPLETE; addtointq ((event *)curr); } else { curr->type = IO_INTERRUPT_ARRIVE; curr->cause = RECONNECT; simpledisk_send_event_up_path (curr, currdisk->bus_transaction_latency); currdisk->reconnect_reason = IO_INTERRUPT_ARRIVE; } } }
static void controller_53c700_reset_complete (controller *currctlr, ioreq_event *curr) { ioreq_event *tmp; #ifdef DEBUG_CTLRDUMB fprintf (outputfile, "*** %f: controller_53c700_reset_complete - devno %d, blkno %d, bcount %d, flags %X\n", simtime, curr->devno, curr->blkno, curr->bcount, curr->flags ); fflush( outputfile); #endif switch (currctlr->state) { case COMPLETION_PENDING: case DISCONNECT_PENDING: break; default: fprintf(stderr, "Controller not in appropriate state for reset completion\n"); exit(1); } currctlr->state = FREE; addtoextraq((event *) curr); if (currctlr->connections != NULL) { /* fprintf (outputfile, "Pending reconnection for controller %d\n", ctlno); */ tmp = currctlr->connections; currctlr->connections = tmp->next; currctlr->state = RECONNECTING; controller_send_event_up_path(currctlr, tmp, currctlr->ovrhd_reset); } }
void iodriver_trace_request_start (int iodriverno, ioreq_event *curr) { ioreq_event *tmp; device *currdev = &iodrivers[iodriverno]->devices[(curr->devno)]; double tdiff = simtime - currdev->lastevent; if (currdev->flag == 1) { stat_update(&initiatenextstats, tdiff); } else if (currdev->flag == 2) { stat_update(&emptyqueuestats, tdiff); } currdev->flag = 0; tmp = ioqueue_get_specific_request(currdev->queue, curr); addtoextraq((event *) curr); ASSERT(tmp != NULL); schedule_disk_access(iodrivers[iodriverno], tmp); tmp->time = simtime; tmp->type = IO_ACCESS_ARRIVE; tmp->slotno = 0; if (tmp->time == simtime) { iodriver_schedule(iodriverno, tmp); } else { addtointq((event *) tmp); } }
static void ssd_access_complete_element(ioreq_event *curr) { ssd_t *currdisk; int elem_num; ssd_element *elem; ioreq_event *x; currdisk = getssd (curr->devno); elem_num = currdisk->timing_t->choose_element(currdisk->timing_t, curr->blkno); ASSERT(elem_num == curr->ssd_elem_num); elem = &currdisk->elements[elem_num]; if ((x = ioqueue_physical_access_done(elem->queue,curr)) == NULL) { fprintf(stderr, "ssd_access_complete: ioreq_event not found by ioqueue_physical_access_done call\n"); exit(1); } // all the reqs are over if (ioqueue_get_reqoutstanding(elem->queue) == 0) { elem->media_busy = FALSE; } ssd_complete_parent(curr, currdisk); addtoextraq((event *) curr); ssd_activate_elem(currdisk, elem_num); }
static void cachedev_empty_donefunc (void *doneparam, ioreq_event *req) { #ifdef DEBUG_CACHEDEV fprintf(outputfile, "*** %f: Entered cachedev::cachedev_empty_donefunc - adding to extraq type %d, devno %d, blkno %d, bcount %d, flags 0x%x\n", simtime, req->type, req->devno, req->blkno, req->bcount, req->flags ); fflush(outputfile); #endif addtoextraq((event *) req); }
static void iodriver_check_c700_based_status (iodriver *curriodriver, int devno, int cause, int type, LBA_t blkno) { ctlr *ctl; ioreq_event *tmp; ctl = curriodriver->devices[devno].ctl; if ((ctl == NULL) || (!(ctl->flags & DRIVER_C700))) { return; } if (type == IO_INTERRUPT_ARRIVE) { if (ctl->pendio != NULL) { tmp = ctl->pendio->next; if ((tmp->devno == devno) && (tmp->blkno == blkno)) { if (ctl->pendio == tmp) { ctl->pendio = NULL; } else { ctl->pendio->next = tmp->next; } addtoextraq((event *) tmp); } } switch (cause) { case COMPLETION: case DISCONNECT: case RECONNECT: case READY_TO_TRANSFER: ctl->flags |= DRIVER_CTLR_BUSY; break; default: fprintf(stderr, "Unknown interrupt cause at iodriver_check_c700_based_status - cause %d\n", cause); exit(1); } } else { switch (cause) { case COMPLETION: case DISCONNECT: ctl->flags &= ~DRIVER_CTLR_BUSY; if (ctl->pendio != NULL) { tmp = ioreq_copy(ctl->pendio->next); tmp->time = simtime; addtointq((event *) tmp); } break; case RECONNECT: case READY_TO_TRANSFER: ctl->flags |= DRIVER_CTLR_BUSY; break; default: fprintf(stderr, "Unknown interrupt cause at iodriver_check_c700_based_status - cause %d\n", cause); exit(1); } } }
static void ssd_access_complete_element(ioreq_event *curr) { ssd_t *currdisk; int elem_num; ssd_element *elem; ioreq_event *x; int lba; currdisk = getssd (curr->devno); elem_num = ssd_choose_element(currdisk->user_params, curr->blkno); ASSERT(elem_num == curr->ssd_elem_num); elem = &currdisk->elements[elem_num]; lba = ssd_logical_pageno(curr->blkno, currdisk); if(curr->flags & READ){ fprintf(outputfile5, "%10.6f %d %d %d\n", simtime, lba, elem_num, curr->blkno); } else { fprintf(outputfile4, "%10.6f %d %d %d\n", simtime, lba, elem_num, curr->blkno); } if ((x = ioqueue_physical_access_done(elem->queue,curr)) == NULL) { fprintf(stderr, "ssd_access_complete: ioreq_event not found by ioqueue_physical_access_done call\n"); exit(1); } ssd_dpower(currdisk, 0); // all the reqs are over if (ioqueue_get_reqoutstanding(elem->queue) == 0) { elem->media_busy = FALSE; } ssd_complete_parent(curr, currdisk); addtoextraq((event *) curr); // added by tiel // activate request create simtime, type, elem_num //{ // ioreq_event *temp = (ioreq_event *)getfromextraq(); // temp->type = SSD_ACTIVATE_ELEM; // temp->time = simtime + (2*currdisk->params.channel_switch_delay); // //temp->time = simtime; // temp->ssd_elem_num = elem_num; // addtointq ((event *)temp); //} //printf("time %f \n",simtime); ssd_activate_elem(currdisk, elem_num); }
void ssd_activate(ioreq_event *curr) { ssd_t *currdisk; int elem_num; currdisk = getssd(curr->devno); elem_num = curr->ssd_elem_num; // release this event addtoextraq((event *) curr); ssd_activate_elem(currdisk, elem_num); }
static void controller_smart_interrupt_complete (controller *currctlr, ioreq_event *curr) { switch (curr->cause) { case COMPLETION: addtoextraq((event *) curr); break; default: fprintf(stderr, "Unknown interrupt cause in smart_interrupt_complete: %d\n", curr->cause); exit(1); } }
void iodriver_interrupt_complete (int iodriverno, intr_event *intrp) { /* fprintf (outputfile, "%f, Interrupt completing - cause = %d, blkno %lld\n", simtime, ((ioreq_event *) intrp->infoptr)->cause, ((ioreq_event *) intrp->infoptr)->blkno); */ if (iodrivers[iodriverno]->type == STANDALONE) { if (((ioreq_event *) intrp->infoptr)->cause == COMPLETION) { iodriver_access_complete(iodriverno, intrp); } iodriver_respond_to_device(iodriverno, intrp); } addtoextraq((event *) intrp); }
void iodriver_respond_to_device (int iodriverno, intr_event *intrp) { ioreq_event *req = NULL; int devno; int cause; if ((iodrivers[iodriverno]->consttime != 0.0) && (iodrivers[iodriverno]->consttime != IODRIVER_TRACED_QUEUE_TIMES)) { if (iodrivers[iodriverno]->type == STANDALONE) { addtoextraq((event *) intrp->infoptr); } return; } req = (ioreq_event *) intrp->infoptr; /* fprintf (outputfile, "%f, Responding to device - cause = %d, blkno %lld\n", simtime, req->cause, req->blkno); */ req->type = IO_INTERRUPT_COMPLETE; devno = req->devno; cause = req->cause; switch (cause) { case COMPLETION: if (iodrivers[iodriverno]->type != STANDALONE) { req = ioreq_copy((ioreq_event *) intrp->infoptr); } case DISCONNECT: case RECONNECT: iodriver_send_event_down_path(req); break; case READY_TO_TRANSFER: addtoextraq((event *) req); break; default: fprintf(stderr, "Unknown io_interrupt cause - %d\n", req->cause); exit(1); } iodriver_check_c700_based_status(iodrivers[iodriverno], devno, cause, IO_INTERRUPT_COMPLETE, 0); }
static void simpledisk_disconnect_done (ioreq_event *curr) { simpledisk_t *currdisk; currdisk = getsimpledisk (curr->devno); // fprintf (outputfile, "Entering simpledisk_disconnect for disk %d: %12.6f\n", currdisk->devno, simtime); addtoextraq((event *) curr); if (currdisk->busowned != -1) { bus_ownership_release(currdisk->busowned); currdisk->busowned = -1; } }
/* * cleaning in an element is over. */ static void ssd_clean_element_complete(ioreq_event *curr) { ssd_t *currdisk; int elem_num; currdisk = getssd (curr->devno); elem_num = curr->ssd_elem_num; ASSERT(currdisk->elements[elem_num].media_busy == TRUE); // release this event addtoextraq((event *) curr); // activate the gang to serve the next set of requests currdisk->elements[elem_num].media_busy = 0; ssd_activate_elem(currdisk, elem_num); }
/* completion disconnect done */ static void ssd_completion_done (ioreq_event *curr) { ssd_t *currdisk = getssd (curr->devno); ssd_assert_current_activity(currdisk, curr); // fprintf (outputfile, "Entering ssd_completion for disk %d: %12.6f\n", currdisk->devno, simtime); addtoextraq((event *) curr); if (currdisk->busowned != -1) { bus_ownership_release(currdisk->busowned); currdisk->busowned = -1; } ssd_check_channel_activity (currdisk); }
static void simpledisk_disconnect_done (ioreq_event *curr) { simpledisk_t *currdisk; currdisk = getsimpledisk (curr->devno); #ifdef DEBUG_SIMPLEDISK fprintf (outputfile, "*** %f: simpledisk_disconnect_done - devno %d, blkno %d, bcount %d, flags 0x%x\n", simtime, curr->devno, curr->blkno, curr->bcount, curr->flags ); #endif addtoextraq((event *) curr); if (currdisk->busowned != -1) { bus_ownership_release(currdisk->busowned); currdisk->busowned = -1; } }
static int logorg_join_seqreqs (ioreq_event *reqlist, ioreq_event *curr, int seqgive) { ioreq_event *temp; ioreq_event *del; int numreqs = 0; int distance; temp = reqlist; if (temp) { while (temp->next) { distance = temp->next->blkno - temp->blkno - temp->bcount; /* fprintf (outputfile, "In logorg_join_seqreqs, devno %d, blkno %d, bcount %d, read %d, distance %d\n", temp->devno, temp->blkno, temp->bcount, (temp->flags & READ), distance); */ if (distance < 0) { fprintf(stderr, "Integrity check failure at logorg_join_seqreqs - blkno %d, bcount %d, blkno %d, read %d\n", temp->blkno, temp->bcount, temp->next->blkno, (temp->flags & READ)); exit(1); } if (((temp->flags & READ) == (temp->next->flags & READ)) && (distance <= seqgive)) { del = temp->next; temp->next = del->next; temp->bcount += del->bcount + distance; temp->opid |= del->opid; addtoextraq((event *) del); } else { del = temp; temp = del->next; del->next = curr->next; curr->next = del; del->time = curr->time; del->buf = curr->buf; numreqs++; } } numreqs++; temp->next = curr->next; curr->next = temp; temp->time = curr->time; temp->buf = curr->buf; } return(numreqs); }
static void get_device_maxoutstanding (iodriver *curriodriver, device * dev) { ioreq_event *chk = (ioreq_event *) getfromextraq(); chk->busno = dev->buspath.value; chk->slotno = dev->slotpath.value; chk->devno = dev->devno; chk->type = IO_QLEN_MAXCHECK; iodriver_send_event_down_path(chk); dev->queuectlr = chk->tempint1; dev->maxoutstanding = (chk->tempint1 == -1) ? chk->tempint2 : -1; if (chk->tempint1 != -1) { curriodriver->ctlrs[chk->tempint1].maxoutstanding = chk->tempint2; curriodriver->ctlrs[chk->tempint1].maxreqsize = chk->bcount; } /* fprintf (outputfile, "Maxoutstanding: tempint1 %d, tempint2 %d, bcount %d\n", chk->tempint1, chk->tempint2, chk->bcount); */ addtoextraq((event *) chk); }
static void controller_53c700_request_arrive (controller *currctlr, ioreq_event *curr) { #ifdef DEBUG_CTLRDUMB fprintf (outputfile, "*** %f: controller_53c700_request_arrive - devno %d, blkno %d\n, bcount %d, flags 0x%x\n", simtime, curr->devno, curr->blkno, curr->bcount, curr->flags ); fflush(outputfile ); #endif switch (currctlr->state) { case FREE: break; case RECONNECTING: addtoextraq((event *) curr); return; default: fprintf(stderr, "Request arriving at non-FREE controller\n"); exit(1); } controller_send_event_down_path(currctlr, curr, currctlr->ovrhd_disk_request); currctlr->state = REQUEST_PENDING; }
void addlisttoextraq (event **headptr) { event *tmp1, *tmp2; tmp1 = *headptr; if(!tmp1) return; /* while ((tmp = *headptr)) { */ /* *headptr = tmp->next; */ /* addtoextraq(tmp); */ /* } */ do { tmp2 = tmp1->next; addtoextraq(tmp1); tmp1 = tmp2; } while(tmp1 && (tmp1 != (*headptr))); *headptr = NULL; }
static void controller_smart_disk_data_transfer_complete (controller *currctlr, ioreq_event *curr) { ioreq_event *tmp = (ioreq_event *) curr->tempptr1; tmp->bcount -= curr->bcount; addtoextraq((event *) curr); ASSERT(tmp->bcount >= 0); if (tmp->bcount == 0) { if (tmp->next) { tmp->next->prev = tmp->prev; } if (tmp->prev) { tmp->prev->next = tmp->next; } else { currctlr->datatransfers = tmp->next; } tmp->time = simtime; addtointq((event *) tmp); } else { fprintf(stderr, "Haven't required less than all out transfer at controller_smart_disk_data_transfer_complete\n"); exit(1); } }
static void ssd_access_complete_element(ioreq_event *curr) { ssd_t *currdisk; int elem_num; ssd_element *elem; ioreq_event *x; int lba; currdisk = getssd (curr->devno); elem_num = ssd_choose_element(currdisk->user_params, curr->blkno); ASSERT(elem_num == curr->ssd_elem_num); elem = &currdisk->elements[elem_num]; lba = ssd_logical_blockno(curr->blkno, currdisk); if(curr->flags & READ){ fprintf(outputfile5, "%10.6f %d %d %d\n", simtime, lba, elem_num, curr->blkno); } else { fprintf(outputfile4, "%10.6f %d %d %d\n", simtime, lba, elem_num, curr->blkno); } if ((x = ioqueue_physical_access_done(elem->queue,curr)) == NULL) { fprintf(stderr, "ssd_access_complete: ioreq_event not found by ioqueue_physical_access_done call\n"); exit(1); } ssd_dpower(currdisk, 0); // all the reqs are over if (ioqueue_get_reqoutstanding(elem->queue) == 0) { elem->media_busy = FALSE; } ssd_complete_parent(curr, currdisk); addtoextraq((event *) curr); ssd_activate_elem(currdisk, elem_num); }
static void ssd_reconnect_done (ioreq_event *curr) { ssd_t *currdisk; // fprintf (outputfile, "Entering ssd_reconnect_done for disk %d: %12.6f\n", curr->devno, simtime); currdisk = getssd (curr->devno); ssd_assert_current_activity(currdisk, curr); if (curr->flags & READ) { addtoextraq((event *) curr); ssd_check_channel_activity (currdisk); } else { if (currdisk->reconnect_reason == DEVICE_ACCESS_COMPLETE) { ssd_request_complete (curr); } else { /* data transfer: curr->bcount, which is still set to original */ /* requested value, indicates how many blks to transfer. */ curr->type = DEVICE_DATA_TRANSFER_COMPLETE; ssd_send_event_up_path(curr, (double) 0.0); } } }
static void * cachedev_disk_access_complete (struct cache_if *c, ioreq_event *curr) { struct cache_dev *cache = (struct cache_dev *)c; struct cache_dev_event *rwdesc = (struct cache_dev_event *)curr->buf; struct cache_dev_event *tmp = NULL; #ifdef DEBUG_CACHEDEV fprintf (outputfile, "*** %f: Entered cachedev::cache_disk_access_complete: cacheDevEventType %d, buf %p, type %d, devno %d, blkno %d, bcount %d, flags 0x%x\n", simtime, rwdesc->type, curr->buf, curr->type, curr->devno, curr->blkno, curr->bcount, curr->flags); #endif switch(rwdesc->type) { case CACHE_EVENT_READ: /* Consider writing same buffer to cache_devno, in order to populate it.*/ /* Not clear whether it is more appropriate to do it from here or from */ /* "free_block_clean" -- do it here for now to get more overlap. */ if (curr->devno == cache->real_devno) { ioreq_event *flushreq = ioreq_copy(rwdesc->req); flushreq->type = IO_ACCESS_ARRIVE; flushreq->buf = rwdesc; flushreq->flags = WRITE; flushreq->devno = cache->cache_devno; rwdesc->type = CACHE_EVENT_POPULATE_ALSO; #ifdef DEBUG_CACHEDEV fprintf (outputfile, "*** %f: Entered cachedev::cache_disk_access_complete - flushing memory cache to disk: type %d, devno %d, blkno %d, bcount %d, flags 0x%x, buf %p\n", simtime, flushreq->type, flushreq->devno, flushreq->blkno, flushreq->bcount, flushreq->flags, flushreq->buf); #endif (*cache->issuefunc)(cache->issueparam, flushreq); cache->stat.popwrites++; cache->stat.popwriteblocks += rwdesc->req->bcount; } /* Ongoing read request can now proceed, so call donefunc from get_block*/ (*rwdesc->donefunc)(rwdesc->doneparam,rwdesc->req); break; case CACHE_EVENT_WRITE: /* finished writing to cache-device */ if (curr->devno == cache->cache_devno) { cachedev_setbits (cache->validmap, curr); cachedev_setbits (cache->dirtymap, curr); if (cache->writescheme == CACHE_WRITE_THRU) { ioreq_event *flushreq = ioreq_copy(rwdesc->req); flushreq->type = IO_ACCESS_ARRIVE; flushreq->buf = rwdesc; flushreq->flags = WRITE; flushreq->devno = cache->real_devno; rwdesc->type = CACHE_EVENT_FLUSH; (*cache->issuefunc)(cache->issueparam, flushreq); cache->stat.destagewrites++; cache->stat.destagewriteblocks += rwdesc->req->bcount; } } (*rwdesc->donefunc)(rwdesc->doneparam,rwdesc->req); if (rwdesc->type != CACHE_EVENT_FLUSH) { cachedev_remove_ongoing_request (cache, rwdesc); addtoextraq ((event *) rwdesc); cache->bufferspace -= curr->bcount; } break; case CACHE_EVENT_POPULATE_ONLY: cachedev_setbits (cache->validmap, curr); cachedev_remove_ongoing_request (cache, rwdesc); addtoextraq ((event *) rwdesc); cache->bufferspace -= curr->bcount; break; case CACHE_EVENT_POPULATE_ALSO: cachedev_setbits (cache->validmap, curr); rwdesc->type = CACHE_EVENT_READ; break; case CACHE_EVENT_FLUSH: cachedev_clearbits (cache->dirtymap, curr); cachedev_remove_ongoing_request (cache, rwdesc); addtoextraq ((event *) rwdesc); cache->bufferspace -= curr->bcount; break; case CACHE_EVENT_IDLEFLUSH_READ: { ioreq_event *flushreq = ioreq_copy (curr); flushreq->type = IO_ACCESS_ARRIVE; flushreq->flags = WRITE; flushreq->devno = cache->real_devno; rwdesc->type = CACHE_EVENT_IDLEFLUSH_FLUSH; (*cache->issuefunc)(cache->issueparam, flushreq); cache->stat.destagewrites++; cache->stat.destagewriteblocks += curr->bcount; } break; case CACHE_EVENT_IDLEFLUSH_FLUSH: cachedev_clearbits (cache->dirtymap, curr); cachedev_remove_ongoing_request (cache, rwdesc); addtoextraq ((event *) rwdesc); cachedev_idlework_callback (cache, curr->devno); cache->bufferspace -= curr->bcount; break; default: ddbg_assert2(0, "Unknown cachedev event type"); break; } addtoextraq((event *) curr); /* returned cacheevent will get forwarded to cachedev_wakeup_continue... */ return(tmp); }
void iodriver_access_complete (int iodriverno, intr_event *intrp) { int i; int numreqs; ioreq_event *tmp; ioreq_event *del; ioreq_event *req; int devno; int skip = 0; ctlr *ctl = NULL; time_t now; if (iodrivers[iodriverno]->type == STANDALONE) { req = ioreq_copy((ioreq_event *) intrp->infoptr); } else { req = (ioreq_event *) intrp->infoptr; } #ifdef DEBUG_IODRIVER fprintf (outputfile, "*** %f: iodriver_access_complete - devno %d, blkno %d, bcount %d, read %d\n", simtime, req->devno, req->blkno, req->bcount, (req->flags & READ)); fflush(outputfile); #endif time( & now ); disksim_exectrace( "Request completion: simtime %f, devno %d, blkno %lld, bcount %d, flags %X, time %s\n", simtime, req->devno, req->blkno, req->bcount, req->flags, asctime( localtime(& now)) ); if (iodrivers[iodriverno]->devices[(req->devno)].queuectlr != -1) { int ctlrno = iodrivers[iodriverno]->devices[(req->devno)].queuectlr; ctl = &iodrivers[iodriverno]->ctlrs[ctlrno]; tmp = ctl->oversized; numreqs = 1; while (((numreqs) || (tmp != ctl->oversized)) && (tmp) && (tmp->next) && ((tmp->next->devno != req->devno) || (tmp->next->opid != req->opid) || (req->blkno < tmp->next->blkno) || (req->blkno >= (tmp->next->blkno + tmp->next->bcount)))) { // fprintf (outputfile, "oversized request in list: opid %d, blkno %lld, bcount %d\n", tmp->opid, tmp->blkno, tmp->bcount); numreqs = 0; tmp = tmp->next; } if ((tmp) && (tmp->next->devno == req->devno) && (tmp->next->opid == req->opid) && (req->blkno >= tmp->next->blkno) && (req->blkno < (tmp->next->blkno + tmp->next->bcount))) { fprintf (outputfile, "%f, part of oversized request completed: opid %d, blkno %lld, bcount %d, maxreqsize %d\n", simtime, req->opid, req->blkno, req->bcount, ctl->maxreqsize); if ((req->blkno + ctl->maxreqsize) < (tmp->next->blkno + tmp->next->bcount)) { fprintf (outputfile, "more to go\n"); req->blkno += ctl->maxreqsize; req->bcount = min(ctl->maxreqsize, (tmp->next->blkno + tmp->next->bcount - req->blkno)); goto schedule_next; } else { fprintf (outputfile, "done for real\n"); addtoextraq((event *) req); req = tmp->next; tmp->next = tmp->next->next; if (ctl->oversized == req) { ctl->oversized = (req != req->next) ? req->next : NULL; } req->next = NULL; } } } devno = req->devno; req = ioqueue_physical_access_done(iodrivers[iodriverno]->devices[devno].queue, req); if (ctl) { ctl->numoutstanding--; } // special case for validate: if (disksim->traceformat == VALIDATE) { tmp = (ioreq_event *) getfromextraq(); io_validate_do_stats1(); tmp = iotrace_validate_get_ioreq_event(disksim->iotracefile, tmp); if (tmp) { io_validate_do_stats2(tmp); tmp->type = IO_REQUEST_ARRIVE; addtointq((event *) tmp); disksim_exectrace("Request issue: simtime %f, devno %d, blkno %lld, time %f\n", simtime, tmp->devno, tmp->blkno, tmp->time); } else { disksim_simstop(); } } else if (disksim->closedios) { tmp = (ioreq_event *) io_get_next_external_event(disksim->iotracefile); if (tmp) { io_using_external_event ((event *)tmp); tmp->time = simtime + disksim->closedthinktime; tmp->type = IO_REQUEST_ARRIVE; addtointq((event *) tmp); } else { disksim_simstop(); } } while (req) { tmp = req; req = req->next; tmp->next = NULL; update_iodriver_statistics(); if ((numreqs = logorg_mapcomplete(sysorgs, numsysorgs, tmp)) == COMPLETE) { /* update up overall I/O system stats for this completed request */ ioreq_event *temp = ioqueue_get_specific_request (OVERALLQUEUE, tmp); ioreq_event *temp2 = ioqueue_physical_access_done (OVERALLQUEUE, temp); ASSERT (temp2 != NULL); addtoextraq((event *)temp); temp = NULL; if (iodrivers[iodriverno]->type != STANDALONE) { iodriver_add_to_intrp_eventlist(intrp, io_done_notify(tmp), iodrivers[iodriverno]->scale); } else { io_done_notify (tmp); } } else if (numreqs > 0) { for (i = 0; i < numreqs; i++) { del = tmp->next; tmp->next = del->next; del->next = NULL; del->type = IO_REQUEST_ARRIVE; del->flags |= MAPPED; skip |= (del->devno == devno); if (iodrivers[iodriverno]->type == STANDALONE) { del->time += simtime + 0.0000000001; /* to affect an ordering */ addtointq((event *) del); } else { iodriver_add_to_intrp_eventlist(intrp, (event *) del, iodrivers[iodriverno]->scale); } } } addtoextraq((event *) tmp); } if ((iodrivers[iodriverno]->consttime == IODRIVER_TRACED_QUEUE_TIMES) || (iodrivers[iodriverno]->consttime == IODRIVER_TRACED_BOTH_TIMES)) { if (ioqueue_get_number_in_queue(iodrivers[iodriverno]->devices[devno].queue) > 0) { iodrivers[iodriverno]->devices[devno].flag = 1; iodrivers[iodriverno]->devices[devno].lastevent = simtime; } return; } if (skip) { return; } // fprintf(outputfile, "iodriver_access_complete:: calling ioqueue_get_next_request\n"); req = ioqueue_get_next_request(iodrivers[iodriverno]->devices[devno].queue); // fprintf (outputfile, "next scheduled: req %p, req->blkno %d, req->flags %x\n", req, ((req) ? req->blkno : 0), ((req) ? req->flags : 0)); schedule_next: if (req) { req->type = IO_ACCESS_ARRIVE; req->next = NULL; if (ctl) { ctl->numoutstanding++; } if (iodrivers[iodriverno]->type == STANDALONE) { req->time = simtime; addtointq((event *) req); } else { iodriver_add_to_intrp_eventlist(intrp, (event *) req, iodrivers[iodriverno]->scale); } } }
void iodriver_schedule (int iodriverno, ioreq_event *curr) { ctlr *ctl; #ifdef DEBUG_IODRIVER fprintf (outputfile, "%f: iodriver_schedule - devno %d, blkno %lld, bcount %d, read %d\n", simtime, curr->devno, curr->blkno, curr->bcount, (curr->flags & READ)); #endif ASSERT1(curr->type == IO_ACCESS_ARRIVE, "curr->type", curr->type); if ((iodrivers[iodriverno]->consttime != 0.0) && (iodrivers[iodriverno]->consttime != IODRIVER_TRACED_QUEUE_TIMES)) { curr->type = IO_INTERRUPT; if (iodrivers[iodriverno]->consttime > 0.0) { curr->time = iodrivers[iodriverno]->consttime; } else { curr->time = ((double) curr->tempint2 / (double) 1000); } curr->cause = COMPLETION; intr_request((event *) curr); return; } ctl = iodrivers[iodriverno]->devices[(curr->devno)].ctl; if ((ctl) && (ctl->flags & DRIVER_C700)) { if ((ctl->pendio) && ((curr->devno != ctl->pendio->next->devno) || (curr->opid != ctl->pendio->next->opid) || (curr->blkno != ctl->pendio->next->blkno))) { curr->next = ctl->pendio->next; ctl->pendio->next = curr; ctl->pendio = curr; return; } else if (ctl->pendio == NULL) { ctl->pendio = ioreq_copy(curr); ctl->pendio->next = ctl->pendio; } if (ctl->flags & DRIVER_CTLR_BUSY) { addtoextraq((event *) curr); return; } } curr->busno = iodrivers[iodriverno]->devices[(curr->devno)].buspath.value; curr->slotno = iodrivers[iodriverno]->devices[(curr->devno)].slotpath.value; if (iodrivers[iodriverno]->devices[(curr->devno)].queuectlr != -1) { int ctlrno = iodrivers[iodriverno]->devices[(curr->devno)].queuectlr; ctl = &iodrivers[iodriverno]->ctlrs[ctlrno]; if ((ctl->maxreqsize) && (curr->bcount > ctl->maxreqsize)) { ioreq_event *totalreq = ioreq_copy(curr); /* fprintf (outputfile, "%f, oversized request: opid %d, blkno %lld, bcount %d, maxreqsize %d\n", simtime, curr->opid, curr->blkno, curr->bcount, ctl->maxreqsize); */ curr->bcount = ctl->maxreqsize; if (ctl->oversized) { totalreq->next = ctl->oversized->next; ctl->oversized->next = totalreq; } else { totalreq->next = totalreq; ctl->oversized = totalreq; } } } iodriver_send_event_down_path(curr); /* fprintf (outputfile, "Leaving iodriver_schedule\n"); */ }