static void ssd_media_access_request_element (ioreq_event *curr) { ssd_t *currdisk = getssd(curr->devno); int blkno = curr->blkno; int count = curr->bcount; /* **** CAREFUL ... HIJACKING tempint2 and tempptr2 fields here **** */ curr->tempint2 = count; while (count != 0) { // find the element (package) to direct the request int elem_num = currdisk->timing_t->choose_element(currdisk->timing_t, blkno); ssd_element *elem = &currdisk->elements[elem_num]; // create a new sub-request for the element ioreq_event *tmp = (ioreq_event *)getfromextraq(); tmp->devno = curr->devno; tmp->busno = curr->busno; tmp->flags = curr->flags; tmp->blkno = blkno; tmp->bcount = ssd_choose_aligned_count(currdisk->params.page_size, blkno, count); ASSERT(tmp->bcount == currdisk->params.page_size); tmp->tempptr2 = curr; blkno += tmp->bcount; count -= tmp->bcount; elem->metadata.reqs_waiting ++; // add the request to the corresponding element's queue ioqueue_add_new_request(elem->queue, (ioreq_event *)tmp); ssd_activate_elem(currdisk, elem_num); } }
static void ssd_request_arrive (ioreq_event *curr) { ssd_t *currdisk; // fprintf (outputfile, "Entering ssd_request_arrive: %12.6f\n", simtime); // fprintf (outputfile, "ssd = %d, blkno = %d, bcount = %d, read = %d\n",curr->devno, curr->blkno, curr->bcount, (READ & curr->flags)); currdisk = getssd(curr->devno); // verify that request is valid. if ((curr->blkno < 0) || (curr->bcount <= 0) || ((curr->blkno + curr->bcount) > currdisk->numblocks)) { fprintf(outputfile3, "Invalid set of blocks requested from ssd - blkno %d, bcount %d, numblocks %d\n", curr->blkno, curr->bcount, currdisk->numblocks); exit(1); } /* create a new request, set it up for initial interrupt */ ioqueue_add_new_request(currdisk->queue, curr); if (currdisk->channel_activity == NULL) { curr = ioqueue_get_next_request(currdisk->queue); currdisk->busowned = ssd_get_busno(curr); currdisk->channel_activity = curr; currdisk->reconnect_reason = IO_INTERRUPT_ARRIVE; if (curr->flags & READ) { ssd_media_access_request (curr); ssd_check_channel_activity(currdisk); } else { curr->cause = READY_TO_TRANSFER; curr->type = IO_INTERRUPT_ARRIVE; ssd_send_event_up_path(curr, currdisk->bus_transaction_latency); } } }
static void ssd_reconnect_done (ioreq_event *curr) { ssd_t *currdisk; // fprintf (outputfile, "Entering ssd_reconnect_done for disk %d: %12.6f\n", curr->devno, simtime); currdisk = getssd (curr->devno); ssd_assert_current_activity(currdisk, curr); if (curr->flags & READ) { if (currdisk->neverdisconnect) { /* Just holding on to bus; data transfer will be initiated when */ /* media access is complete. */ addtoextraq((event *) curr); ssd_check_channel_activity (currdisk); } else { /* data transfer: curr->bcount, which is still set to original */ /* requested value, indicates how many blks to transfer. */ curr->type = DEVICE_DATA_TRANSFER_COMPLETE; ssd_send_event_up_path(curr, (double) 0.0); } } else { if (currdisk->reconnect_reason == DEVICE_ACCESS_COMPLETE) { ssd_request_complete (curr); } else { /* data transfer: curr->bcount, which is still set to original */ /* requested value, indicates how many blks to transfer. */ curr->type = DEVICE_DATA_TRANSFER_COMPLETE; ssd_send_event_up_path(curr, (double) 0.0); } } }
static void ssd_access_complete_element(ioreq_event *curr) { ssd_t *currdisk; int elem_num; ssd_element *elem; ioreq_event *x; currdisk = getssd (curr->devno); elem_num = currdisk->timing_t->choose_element(currdisk->timing_t, curr->blkno); ASSERT(elem_num == curr->ssd_elem_num); elem = &currdisk->elements[elem_num]; if ((x = ioqueue_physical_access_done(elem->queue,curr)) == NULL) { fprintf(stderr, "ssd_access_complete: ioreq_event not found by ioqueue_physical_access_done call\n"); exit(1); } // all the reqs are over if (ioqueue_get_reqoutstanding(elem->queue) == 0) { elem->media_busy = FALSE; } ssd_complete_parent(curr, currdisk); addtoextraq((event *) curr); ssd_activate_elem(currdisk, elem_num); }
struct device_header * getdevbyname(char *name, int *gdevnum, /* global device number */ int *ldevnum, /* type-specific device number */ int *type) /* device type */ { int c; for(c = 0; c < disksim->deviceinfo->devs_len; c++) { if(!disksim->deviceinfo->devicenames[c]) continue; if(!strcmp(name, disksim->deviceinfo->devicenames[c])) { if(gdevnum) *gdevnum = c; if(ldevnum) *ldevnum = devicenos[c]; if(type) *type = devicetypes[c]; switch(devicetypes[c]) { case DEVICETYPE_DISK: return (struct device_header *)getdisk(devicenos[c]); break; case DEVICETYPE_SIMPLEDISK: return (struct device_header *)getsimpledisk(devicenos[c]); break; case DEVICETYPE_MEMS: return (struct device_header *)getmems(devicenos[c]); break; case DEVICETYPE_SSD: /* SSD: */ return (struct device_header *)getssd(devicenos[c]); break; } } } return 0; }
void ssd_printsetstats (int *set, int setsize, char *sourcestr) { int i; struct ioq * queueset[MAXDEVICES*SSD_MAX_ELEMENTS]; int queuecnt = 0; int reqcnt = 0; char prefix[80]; //using more secure functions sprintf_s4(prefix, 80, "%sssd ", sourcestr); for (i=0; i<setsize; i++) { ssd_t *currdisk = getssd (set[i]); struct ioq *q = currdisk->queue; queueset[queuecnt] = q; queuecnt++; reqcnt += ioqueue_get_number_of_requests(q); } if (reqcnt == 0) { fprintf (outputfile, "\nNo ssd requests for members of this set\n\n"); return; } ioqueue_printstats(queueset, queuecnt, prefix); ssd_acctime_printstats(set, setsize, prefix); ssd_other_printstats(set, setsize, prefix); }
void ssd_event_arrive (ioreq_event *curr) { ssd_t *currdisk; // fprintf (outputfile, "Entered ssd_event_arrive: time %f (simtime %f)\n", curr->time, simtime); // fprintf (outputfile, " - devno %d, blkno %d, type %d, cause %d, read = %d\n", curr->devno, curr->blkno, curr->type, curr->cause, curr->flags & READ); currdisk = getssd (curr->devno); switch (curr->type) { case IO_ACCESS_ARRIVE: curr->time = simtime + currdisk->overhead; curr->type = DEVICE_OVERHEAD_COMPLETE; addtointq((event *) curr); break; case DEVICE_OVERHEAD_COMPLETE: ssd_request_arrive(curr); break; case DEVICE_ACCESS_COMPLETE: ssd_access_complete (curr); break; case DEVICE_DATA_TRANSFER_COMPLETE: ssd_bustransfer_complete(curr); break; case IO_INTERRUPT_COMPLETE: ssd_interrupt_complete(curr); break; case IO_QLEN_MAXCHECK: /* Used only at initialization time to set up queue stuff */ curr->tempint1 = -1; curr->tempint2 = ssd_get_maxoutstanding(curr->devno); curr->bcount = 0; break; case SSD_CLEAN_GANG: ssd_clean_gang_complete(curr); break; case SSD_CLEAN_ELEMENT: ssd_clean_element_complete(curr); break; default: fprintf(stderr, "Unrecognized event type at ssd_event_arrive\n"); exit(1); } // fprintf (outputfile, "Exiting ssd_event_arrive\n"); }
int ssd_get_busno (ioreq_event *curr) { ssd_t *currdisk; intchar busno; int depth; currdisk = getssd (curr->devno); busno.value = curr->busno; depth = currdisk->depth[0]; return(busno.byte[depth]); }
double ssd_get_blktranstime (ioreq_event *curr) { ssd_t *currdisk; double tmptime; currdisk = getssd (curr->devno); tmptime = bus_get_transfer_time(ssd_get_busno(curr), 1, (curr->flags & READ)); if (tmptime < currdisk->blktranstime) { tmptime = currdisk->blktranstime; } return(tmptime); }
static void ssd_access_complete_element(ioreq_event *curr) { ssd_t *currdisk; int elem_num; ssd_element *elem; ioreq_event *x; int lba; currdisk = getssd (curr->devno); elem_num = ssd_choose_element(currdisk->user_params, curr->blkno); ASSERT(elem_num == curr->ssd_elem_num); elem = &currdisk->elements[elem_num]; lba = ssd_logical_pageno(curr->blkno, currdisk); if(curr->flags & READ){ fprintf(outputfile5, "%10.6f %d %d %d\n", simtime, lba, elem_num, curr->blkno); } else { fprintf(outputfile4, "%10.6f %d %d %d\n", simtime, lba, elem_num, curr->blkno); } if ((x = ioqueue_physical_access_done(elem->queue,curr)) == NULL) { fprintf(stderr, "ssd_access_complete: ioreq_event not found by ioqueue_physical_access_done call\n"); exit(1); } ssd_dpower(currdisk, 0); // all the reqs are over if (ioqueue_get_reqoutstanding(elem->queue) == 0) { elem->media_busy = FALSE; } ssd_complete_parent(curr, currdisk); addtoextraq((event *) curr); // added by tiel // activate request create simtime, type, elem_num //{ // ioreq_event *temp = (ioreq_event *)getfromextraq(); // temp->type = SSD_ACTIVATE_ELEM; // temp->time = simtime + (2*currdisk->params.channel_switch_delay); // //temp->time = simtime; // temp->ssd_elem_num = elem_num; // addtointq ((event *)temp); //} //printf("time %f \n",simtime); ssd_activate_elem(currdisk, elem_num); }
void ssd_activate(ioreq_event *curr) { ssd_t *currdisk; int elem_num; currdisk = getssd(curr->devno); elem_num = curr->ssd_elem_num; // release this event addtoextraq((event *) curr); ssd_activate_elem(currdisk, elem_num); }
void ssd_cleanstats (void) { int i, j; for (i=0; i<MAXDEVICES; i++) { ssd_t *currdisk = getssd (i); if (currdisk) { ioqueue_cleanstats(currdisk->queue); for (j=0; j<currdisk->params.nelements; j++) ioqueue_cleanstats(currdisk->elements[j].queue); } } }
static void ssd_acctime_printstats (int *set, int setsize, char *prefix) { int i; statgen * statset[MAXDEVICES]; if (device_printacctimestats) { for (i=0; i<setsize; i++) { ssd_t *currdisk = getssd (set[i]); statset[i] = &currdisk->stat.acctimestats; } stat_print_set(statset, setsize, prefix); } }
static void ssd_bustransfer_complete (ioreq_event *curr) { // fprintf (outputfile, "Entering ssd_bustransfer_complete for disk %d: %12.6f\n", curr->devno, simtime); if (curr->flags & READ) { ssd_request_complete (curr); } else { ssd_t *currdisk = getssd (curr->devno); ssd_assert_current_activity(currdisk, curr); ssd_media_access_request (curr); ssd_check_channel_activity (currdisk); } }
static void ssd_other_printstats (int *set, int setsize, char *prefix) { int i; int numbuswaits = 0; double waitingforbus = 0.0; for (i=0; i<setsize; i++) { ssd_t *currdisk = getssd (set[i]); numbuswaits += currdisk->stat.numbuswaits; waitingforbus += currdisk->stat.waitingforbus; } fprintf(outputfile, "%sTotal bus wait time: %f\n", prefix, waitingforbus); fprintf(outputfile, "%sNumber of bus waits: %d\n", prefix, numbuswaits); }
//extern void intialize_device_physical_parameters(); static void ssd_statinit (int devno, int firsttime) { ssd_t *currdisk; currdisk = getssd (devno); if (firsttime) { stat_initialize(statdeffile, statdesc_acctimestats, &currdisk->stat.acctimestats); } else { stat_reset(&currdisk->stat.acctimestats); } currdisk->stat.requestedbus = 0.0; currdisk->stat.waitingforbus = 0.0; currdisk->stat.numbuswaits = 0; }
void ssd_process_event(ioreq_event *curr) { ssd_t *currdisk; currdisk = getssd (curr->devno); switch (curr->type) { // disksim IO event case DEVICE_OVERHEAD_COMPLETE: ssd_request_arrive(curr); break; case DEVICE_ACCESS_COMPLETE: ssd_access_complete (curr); break; case DEVICE_DATA_TRANSFER_COMPLETE: ssd_bustransfer_complete(curr); break; case IO_INTERRUPT_COMPLETE: ssd_interrupt_complete(curr); break; // SSD IO event //added by tiel case SSD_ACTIVATE_ELEM: ssd_activate(curr); break; case SSD_CLEAN_GANG: ssd_clean_gang_complete(curr); break; case SSD_CLEAN_ELEMENT: ssd_clean_element_complete(curr); break; case SSD_CLEAN_LOG: ssd_clean_logblock_complete(curr); break; default: fprintf(outputfile3, "Unrecognized event type!\n"); exit(1); } }
void ssd_resetstats (void) { int i; for (i=0; i<MAXDEVICES; i++) { ssd_t *currdisk = getssd (i); if (currdisk) { int j; ioqueue_resetstats(currdisk->queue); for (j=0; j<currdisk->params.nelements; j++) { ioqueue_resetstats(currdisk->elements[j].queue); } ssd_statinit(i, 0); } } }
/* * cleaning in an element is over. */ static void ssd_clean_element_complete(ioreq_event *curr) { ssd_t *currdisk; int elem_num; currdisk = getssd (curr->devno); elem_num = curr->ssd_elem_num; ASSERT(currdisk->elements[elem_num].media_busy == TRUE); // release this event addtoextraq((event *) curr); // activate the gang to serve the next set of requests currdisk->elements[elem_num].media_busy = 0; ssd_activate_elem(currdisk, elem_num); }
/* completion disconnect done */ static void ssd_completion_done (ioreq_event *curr) { ssd_t *currdisk = getssd (curr->devno); ssd_assert_current_activity(currdisk, curr); // fprintf (outputfile, "Entering ssd_completion for disk %d: %12.6f\n", currdisk->devno, simtime); addtoextraq((event *) curr); if (currdisk->busowned != -1) { bus_ownership_release(currdisk->busowned); currdisk->busowned = -1; } ssd_check_channel_activity (currdisk); }
/* * ssd_send_event_up_path() * * Acquires the bus (if not already acquired), then uses bus_delay to * send the event up the path. * * If the bus is already owned by this device or can be acquired * immediately (interleaved bus), the event is sent immediately. * Otherwise, ssd_bus_ownership_grant will later send the event. */ static void ssd_send_event_up_path (ioreq_event *curr, double delay) { ssd_t *currdisk; int busno; int slotno; // fprintf (outputfile, "ssd_send_event_up_path - devno %d, type %d, cause %d, blkno %d\n", curr->devno, curr->type, curr->cause, curr->blkno); currdisk = getssd (curr->devno); ssd_assert_current_activity(currdisk, curr); busno = ssd_get_busno(curr); slotno = currdisk->slotno[0]; /* Put new request at head of buswait queue */ curr->next = currdisk->buswait; currdisk->buswait = curr; curr->tempint1 = busno; curr->time = delay; if (currdisk->busowned == -1) { // fprintf (outputfile, "Must get ownership of the bus first\n"); if (curr->next) { //fprintf(stderr,"Multiple bus requestors detected in ssd_send_event_up_path\n"); /* This should be ok -- counting on the bus module to sequence 'em */ } if (bus_ownership_get(busno, slotno, curr) == FALSE) { /* Remember when we started waiting (only place this is written) */ currdisk->stat.requestedbus = simtime; } else { currdisk->busowned = busno; bus_delay(busno, DEVICE, curr->devno, delay, curr); /* Never for SCSI */ } } else if (currdisk->busowned == busno) { //fprintf (outputfile, "Already own bus - so send it on up\n"); bus_delay(busno, DEVICE, curr->devno, delay, curr); } else { fprintf(stderr, "Wrong bus owned for transfer desired\n"); exit(1); } }
int ssd_set_depth (int devno, int inbusno, int depth, int slotno) { ssd_t *currdisk; int cnt; currdisk = getssd (devno); assert(currdisk); cnt = currdisk->numinbuses; currdisk->numinbuses++; if ((cnt + 1) > MAXINBUSES) { fprintf(stderr, "Too many inbuses specified for ssd %d - %d\n", devno, (cnt+1)); exit(1); } currdisk->inbuses[cnt] = inbusno; currdisk->depth[cnt] = depth; currdisk->slotno[cnt] = slotno; return(0); }
void ssd_get_mapping (int maptype, int devno, int blkno, int *cylptr, int *surfaceptr, int *blkptr) { ssd_t *currdisk = getssd (devno); if ((blkno < 0) || (blkno >= currdisk->numblocks)) { fprintf(stderr, "Invalid blkno at ssd_get_mapping: %d\n", blkno); exit(1); } if (cylptr) { *cylptr = blkno; } if (surfaceptr) { *surfaceptr = 0; } if (blkptr) { *blkptr = 0; } }
/* * send completion up the line */ static void ssd_request_complete(ioreq_event *curr) { ssd_t *currdisk; ioreq_event *x; // fprintf (outputfile, "Entering ssd_request_complete: %12.6f\n", simtime); currdisk = getssd (curr->devno); ssd_assert_current_activity(currdisk, curr); if ((x = ioqueue_physical_access_done(currdisk->queue,curr)) == NULL) { fprintf(stderr, "ssd_request_complete: ioreq_event not found by ioqueue_physical_access_done call\n"); exit(1); } /* send completion interrupt */ curr->type = IO_INTERRUPT_ARRIVE; curr->cause = COMPLETION; ssd_send_event_up_path(curr, currdisk->bus_transaction_latency); }
void ssd_bus_delay_complete (int devno, ioreq_event *curr, int sentbusno) { ssd_t *currdisk; intchar slotno; intchar busno; int depth; currdisk = getssd (devno); ssd_assert_current_activity(currdisk, curr); // fprintf (outputfile, "Entered ssd_bus_delay_complete\n"); // EPW: I think the buswait logic doesn't do anything, is confusing, and risks // overusing the "next" field, although an item shouldn't currently be a queue. if (curr == currdisk->buswait) { currdisk->buswait = curr->next; } else { ioreq_event *tmp = currdisk->buswait; while ((tmp->next != NULL) && (tmp->next != curr)) { tmp = tmp->next; } if (tmp->next != curr) { // fixed a warning here //fprintf(stderr, "Bus delay complete for unknown ssd request - devno %d, busno %d\n", devno, busno.value); fprintf(stderr, "Bus delay complete for unknown ssd request - devno %d, busno %d\n", devno, curr->busno); exit(1); } tmp->next = curr->next; } busno.value = curr->busno; slotno.value = curr->slotno; depth = currdisk->depth[0]; slotno.byte[depth] = slotno.byte[depth] >> 4; curr->time = 0.0; if (depth == 0) { intr_request ((event *)curr); } else { bus_deliver_event(busno.byte[depth], slotno.byte[depth], curr); } }
/* * ssd_bus_ownership_grant * Calls bus_delay to handle the event that the disk has been granted the bus. I believe * this is always initiated by a call to ssd_send_even_up_path. */ void ssd_bus_ownership_grant (int devno, ioreq_event *curr, int busno, double arbdelay) { ssd_t *currdisk; ioreq_event *tmp; currdisk = getssd (devno); ssd_assert_current_activity(currdisk, curr); tmp = currdisk->buswait; while ((tmp != NULL) && (tmp != curr)) { tmp = tmp->next; } if (tmp == NULL) { fprintf(stderr, "Bus ownership granted to unknown ssd request - devno %d, busno %d\n", devno, busno); exit(1); } currdisk->busowned = busno; currdisk->stat.waitingforbus += arbdelay; //ASSERT (arbdelay == (simtime - currdisk->stat.requestedbus)); currdisk->stat.numbuswaits++; bus_delay(busno, DEVICE, devno, tmp->time, tmp); }
static void ssd_bustransfer_complete (ioreq_event *curr) { // fprintf (outputfile, "Entering ssd_bustransfer_complete for disk %d: %12.6f\n", curr->devno, simtime); if (curr->flags & READ) { ssd_request_complete (curr); } else { ssd_t *currdisk = getssd (curr->devno); ssd_assert_current_activity(currdisk, curr); if (currdisk->neverdisconnect == FALSE) { /* disconnect from bus */ ioreq_event *tmp = ioreq_copy (curr); tmp->type = IO_INTERRUPT_ARRIVE; tmp->cause = DISCONNECT; ssd_send_event_up_path (tmp, currdisk->bus_transaction_latency); ssd_media_access_request (curr); } else { ssd_media_access_request (curr); ssd_check_channel_activity (currdisk); } } }
static void ssd_access_complete(ioreq_event *curr) { ssd_t *currdisk = getssd (curr->devno);; switch(currdisk->params.alloc_pool_logic) { case SSD_ALLOC_POOL_PLANE: case SSD_ALLOC_POOL_CHIP: ssd_access_complete_element(curr); break; case SSD_ALLOC_POOL_GANG: #if SYNC_GANG ssd_access_complete_gang_sync(curr); #else ssd_access_complete_gang(curr); #endif break; default: printf("Unknown alloc pool logic %d\n", currdisk->params.alloc_pool_logic); ASSERT(0); } }
static void ssd_access_complete_element(ioreq_event *curr) { ssd_t *currdisk; int elem_num; ssd_element *elem; ioreq_event *x; int lba; currdisk = getssd (curr->devno); elem_num = ssd_choose_element(currdisk->user_params, curr->blkno); ASSERT(elem_num == curr->ssd_elem_num); elem = &currdisk->elements[elem_num]; lba = ssd_logical_blockno(curr->blkno, currdisk); if(curr->flags & READ){ fprintf(outputfile5, "%10.6f %d %d %d\n", simtime, lba, elem_num, curr->blkno); } else { fprintf(outputfile4, "%10.6f %d %d %d\n", simtime, lba, elem_num, curr->blkno); } if ((x = ioqueue_physical_access_done(elem->queue,curr)) == NULL) { fprintf(stderr, "ssd_access_complete: ioreq_event not found by ioqueue_physical_access_done call\n"); exit(1); } ssd_dpower(currdisk, 0); // all the reqs are over if (ioqueue_get_reqoutstanding(elem->queue) == 0) { elem->media_busy = FALSE; } ssd_complete_parent(curr, currdisk); addtoextraq((event *) curr); ssd_activate_elem(currdisk, elem_num); }
void ssd_initialize (void) { int i, j; if (disksim->ssdinfo == NULL) { ssd_initialize_diskinfo (); } initialize_device_physical_parameters(); //Initliaze the physical parameters of the device. ssd_setcallbacks(); // fprintf(stdout, "MAXDEVICES = %d, numssds %d\n", MAXDEVICES, numssds); // vp - changing the MAXDEVICES in the below 'for' loop to numssds for (i=0; i<numssds; i++) { int exp_size; ssd_t *currdisk = getssd (i); if (!currdisk) continue; ssd_alloc_queues(currdisk); //also allocates the refresh queue currdisk->params.refresh_interval*=1000; //in ms. currdisk->next_refresh_time = currdisk->params.refresh_interval; currdisk->params.refresh_service_time = currdisk->params.block_erase_latency + ( (currdisk->params.page_write_latency + currdisk->params.page_read_latency) * currdisk->params.pages_per_block); if (currdisk->params.checkpoint_time == 0) currdisk->params.checkpoint_time = 0.5; currdisk->params.checkpoint_time *= (60* 60 *1000);//hours to milli seconds. //vp - some verifications: ssd_verify_parameters(currdisk); //vp - this was not initialized and caused so many bugs currdisk->devno = i; currdisk->numblocks = currdisk->params.nelements * currdisk->params.blocks_per_element * currdisk->params.pages_per_block * currdisk->params.page_size; currdisk->reconnect_reason = -1; addlisttoextraq ((event **) &currdisk->buswait); currdisk->busowned = -1; currdisk->completion_queue = NULL; /* hack to init queue structure */ ioqueue_initialize (currdisk->queue, i); ssd_statinit(i, TRUE); currdisk->timing_t = ssd_new_timing_t(&currdisk->params); // initialize the gang exp_size = ssd_elem_export_size(currdisk); for (j = 0; j < SSD_NUM_GANG(currdisk); j ++) { int tot_pages = exp_size * currdisk->params.elements_per_gang; currdisk->gang_meta[j].busy = 0; currdisk->gang_meta[j].cleaning = 0; currdisk->gang_meta[j].reqs_waiting = 0; currdisk->gang_meta[j].oldest = 0; currdisk->gang_meta[j].pg2elem = (ssd_elem_number*)malloc(sizeof(ssd_elem_number) * tot_pages); memset(currdisk->gang_meta[j].pg2elem, 0, sizeof(ssd_elem_number) * tot_pages); ioqueue_initialize (currdisk->gang_meta[j].queue, i); } for (j=0; j<currdisk->params.nelements; j++) { ssd_element *elem = &currdisk->elements[j]; ioqueue_initialize (elem->queue, i); /* hack to init queue structure */ elem->media_busy = FALSE; // vp - pins are also free elem->pin_busy = FALSE; // vp - initialize the planes in the element ssd_plane_init(elem, currdisk, i); // vp - initialize the ssd element metadata // FIXME: where to free these data? if (currdisk->params.write_policy == DISKSIM_SSD_WRITE_POLICY_OSR) { ssd_element_metadata_init(j, &(elem->metadata), currdisk); } //vp - initialize the stat structure memset(&elem->stat, 0, sizeof(ssd_element_stat)); } } fprintf(stderr,"Finished loading state from snapshot\n"); }