static double ssd_write_one_active_page(int blkno, int count, int elem_num, ssd_t *s) { double cost = 0; int cleaning_invoked = 0; ssd_element_metadata *metadata; ssd_power_element_stat *power_stat; int lbn; metadata = &(s->elements[elem_num].metadata); power_stat = &(s->elements[elem_num].power_stat); // get the logical page number corresponding to this blkno lbn = ssd_logical_pageno(blkno, s); // see if there are any free pages left inside the active block. // as per the osr design, the last page is used as a summary page. // so if the active_page is already pointing to the summary page, // then we need to find another free block to act as active block. if (ssd_last_page_in_block(metadata->active_block, s)) { // do we need to create more free blocks for future writes? if (ssd_start_cleaning(-1, elem_num, s)) { printf ("We should not clean here ...\n"); ASSERT(0); // if we're cleaning in the background, this should // not get executed if (s->params.cleaning_in_background) { exit(1); } cleaning_invoked = 1; cost += ssd_clean_element_no_copyback(elem_num, s); } // if we had invoked the cleaning, we must again check if we // need an active block before allocating one. this check is // needed because the above cleaning procedure might have // allocated new active blocks during the process of cleaning, // which might still have free pages for writing. if (!cleaning_invoked || ssd_last_page_in_block(metadata->active_block, s)) { _ssd_alloc_active_block(-1, elem_num, s); } } // issue the write to the current active page cost += _ssd_write_page_osr(s, metadata, lbn, power_stat, blkno); cost += ssd_data_transfer_cost(s, count); ssd_power_flash_calculate(SSD_POWER_FLASH_BUS_DATA_TRANSFER, ssd_data_transfer_cost(s,s->params.page_size), power_stat, s); return cost; }
static double ssd_write_one_active_page(int blkno, int count, int elem_num, ssd_t *s) { double cost = 0; int cleaning_invoked = 0; ssd_element_metadata *metadata; ssd_power_element_stat *power_stat; int lbn; int offset; int tmp_block; int apn; metadata = &(s->elements[elem_num].metadata); power_stat = &(s->elements[elem_num].power_stat); // get the logical page number corresponding to this blkno lbn = ssd_logical_blockno(blkno, s); apn = blkno/s->params.page_size; offset = (apn/s->params.nelements)%s->params.pages_per_block; // check lbn table if(metadata->lba_table[lbn] == -1 ) { metadata->lba_table[lbn] = metadata->active_block; cost += _ssd_write_page_osr(s, metadata, lbn, offset, power_stat); _ssd_alloc_active_block(-1, elem_num, s); } else { //if already mapped, check log block tmp_block = metadata->lba_table[lbn]; if(metadata->block_usage[tmp_block].page[offset] == -1) cost += _ssd_write_page_osr(s, metadata, lbn, offset, power_stat); else { if (metadata->block_usage[tmp_block].log_index == -1) { metadata->block_usage[tmp_block].log_index = _ssd_alloc_log_block(-1, elem_num, s, tmp_block); cost += _ssd_write_log_block_osr(s, metadata, lbn, offset, power_stat); } else { if(_last_page_in_log_block(metadata, s, tmp_block)){ ssd_invoke_logblock_cleaning(s, elem_num, lbn); metadata->block_usage[tmp_block].log_index = _ssd_alloc_log_block(-1, elem_num, s, tmp_block); } cost += _ssd_write_log_block_osr(s, metadata, lbn, offset, power_stat); } } } // issue the write to the current active page cost += ssd_data_transfer_cost(s, count); ssd_power_flash_calculate(SSD_POWER_FLASH_BUS_DATA_TRANSFER, ssd_data_transfer_cost(s,s->params.page_size), power_stat, s); return cost; }
double ssd_read_policy_simple(int count, ssd_t *s) { double cost = s->params.page_read_latency; cost += ssd_data_transfer_cost(s, count); return cost; }
double ssd_read_policy_simple(int count, ssd_t *s, ssd_power_element_stat *power_stat) { double cost = 0; double cost2 = 0; cost = s->params.page_read_latency; ssd_power_flash_calculate(SSD_POWER_FLASH_READ, cost, power_stat, s); cost2 = ssd_data_transfer_cost(s, count); ssd_power_flash_calculate(SSD_POWER_FLASH_BUS_DATA_TRANSFER, cost2, power_stat, s); return cost + cost2; }
/* * calculates the cost of reading and writing a block of data across planes. */ static double ssd_crossover_cost (ssd_t *s, ssd_element_metadata *metadata, int from_blk, int to_blk) { if (ssd_same_plane_blocks(s, metadata, from_blk, to_blk)) { return 0; } else { double xfer_cost; // we need to read and write back across the pins xfer_cost = ssd_data_transfer_cost(s, s->params.page_size); return (2 * xfer_cost); } }
/* * calculates the cost of reading and writing a block of data across planes. */ static double ssd_crossover_cost (ssd_t *s, ssd_element_metadata *metadata, ssd_power_element_stat *power_stat, int from_blk, int to_blk) { if (ssd_same_plane_blocks(s, metadata, from_blk, to_blk)) { return 0; } else { double xfer_cost; // we need to read and write back across the pins xfer_cost = ssd_data_transfer_cost(s, s->params.page_size); ssd_power_flash_calculate(SSD_POWER_FLASH_BUS_DATA_TRANSFER, 2*xfer_cost, power_stat, s); return (2 * xfer_cost); } }
static double ssd_issue_overlapped_ios(ssd_req **reqs, int total, int elem_num, ssd_t *s) { double max_cost = 0; double parunit_op_cost[SSD_MAX_PARUNITS_PER_ELEM]; double parunit_tot_cost[SSD_MAX_PARUNITS_PER_ELEM]; ssd_element_metadata *metadata; ssd_power_element_stat *power_stat; int lbn; int offset; int i; int read_cycle = 0; listnode **parunits; // all the requests must be of the same type for (i = 1; i < total; i ++) { ASSERT(reqs[i]->is_read == reqs[0]->is_read); } // is this a set of read requests? if (reqs[0]->is_read) { read_cycle = 1; } memset(parunit_tot_cost, 0, sizeof(double)*SSD_MAX_PARUNITS_PER_ELEM); // find the planes to which the reqs are to be issued metadata = &(s->elements[elem_num].metadata); power_stat = &(s->elements[elem_num].power_stat); parunits = ssd_pick_parunits(reqs, total, elem_num, metadata, s); // repeat until we've served all the requests while (1) { double max_op_cost = 0; double read_xfer_cost = 0.0; double write_xfer_cost = 0.0; int active_parunits = 0; int op_count = 0; // do we still have any request to service? for (i = 0; i < SSD_PARUNITS_PER_ELEM(s); i ++) { if (ll_get_size(parunits[i]) > 0) { active_parunits ++; } } // no more requests -- get out if (active_parunits == 0) { break; } // clear this arrays for storing costs memset(parunit_op_cost, 0, sizeof(double)*SSD_MAX_PARUNITS_PER_ELEM); // begin a round of serving. we serve one request per // parallel unit. if an unit has more than one request // in the list, they have to be serialized. max_cost = 0; for (i = 0; i < SSD_PARUNITS_PER_ELEM(s); i ++) { int size; size = ll_get_size(parunits[i]); if (size > 0) { int apn; // this parallel unit has a request to serve ssd_req *r; listnode *n = ll_get_nth_node(parunits[i], 0); op_count ++; ASSERT(op_count <= active_parunits); // get the request r = (ssd_req *)n->data; lbn = ssd_logical_blockno(r->blk, s); apn = r->blk/s->params.page_size; offset = (apn/s->params.nelements)%(s->params.pages_per_block-1); parunit_op_cost[i] = 0; if (r->is_read) { int block = metadata->lba_table[lbn]; if(block == -1){ parunit_op_cost[i] = s->params.page_read_latency; //Micky ssd_power_flash_calculate(SSD_POWER_FLASH_READ, s->params.page_read_latency, power_stat, s); }else if(metadata->block_usage[block].log_index == -1){ parunit_op_cost[i] = s->params.page_read_latency; //Micky ssd_power_flash_calculate(SSD_POWER_FLASH_READ, s->params.page_read_latency, power_stat, s); }else{ parunit_op_cost[i] = s->params.page_read_latency; //Micky ssd_power_flash_calculate(SSD_POWER_FLASH_READ, s->params.page_read_latency, power_stat, s); parunit_op_cost[i] += s->params.page_read_latency; ssd_power_flash_calculate(SSD_POWER_FLASH_READ, s->params.page_read_latency, power_stat, s); s->spare_read++; } //tiel xfer cost read_xfer_cost += ssd_data_transfer_cost(s,r->count); } else { //for write int plane_num = r->plane_num; // issue the write to the current active page. // we need to transfer the data across the serial pins for write. metadata->active_block = metadata->plane_meta[plane_num].active_block; // check lbn table if(metadata->lba_table[lbn] == -1 ) { metadata->lba_table[lbn] = metadata->active_block; parunit_op_cost[i] = _ssd_write_page_osr(s, metadata, lbn, offset, power_stat); _ssd_alloc_active_block(plane_num, elem_num, s); } else { //if already mapped, check log block int tmp_block = metadata->lba_table[lbn]; if(metadata->block_usage[tmp_block].page[offset] == -1) { parunit_op_cost[i] = _ssd_write_page_osr(s, metadata, lbn, offset, power_stat); } else { if (metadata->block_usage[tmp_block].log_index == -1) { metadata->block_usage[tmp_block].log_index = _ssd_alloc_log_block(plane_num, elem_num, s, tmp_block); parunit_op_cost[i] = _ssd_write_log_block_osr(s, metadata, lbn, offset, power_stat); } else { if(_last_page_in_log_block(metadata, s, tmp_block)){ int new_block; parunit_op_cost[i] += ssd_invoke_logblock_cleaning(elem_num, s, lbn); new_block = metadata->lba_table[lbn]; if(metadata->block_usage[new_block].log_index == -1){ metadata->block_usage[new_block].log_index = _ssd_alloc_log_block(plane_num, elem_num, s, tmp_block); } }else{ parunit_op_cost[i] += _ssd_write_log_block_osr(s, metadata, lbn, offset, power_stat); } } } } write_xfer_cost += ssd_data_transfer_cost(s,r->count); } ASSERT(r->count <= s->params.page_size); // calc the cost: the access time should be something like this // for read if (read_cycle) { if (SSD_PARUNITS_PER_ELEM(s) > 4) { printf("modify acc time here ...\n"); ASSERT(0); } if (op_count == 1) { r->acctime = parunit_op_cost[i] + read_xfer_cost; r->schtime = parunit_tot_cost[i] + r->acctime; } else { r->acctime = ssd_data_transfer_cost(s,r->count); r->schtime = parunit_tot_cost[i] + read_xfer_cost + parunit_op_cost[i]; } } else { // for write r->acctime = parunit_op_cost[i]; r->schtime = parunit_tot_cost[i] + write_xfer_cost + r->acctime; } // find the maximum cost for this round of operations if (max_cost < r->schtime) { max_cost = r->schtime; } // release the node from the linked list ll_release_node(parunits[i], n); } } ssd_power_flash_calculate(SSD_POWER_FLASH_BUS_DATA_TRANSFER, read_xfer_cost, power_stat, s); ssd_power_flash_calculate(SSD_POWER_FLASH_BUS_DATA_TRANSFER, write_xfer_cost, power_stat, s); // we can start the next round of operations only after all // the operations in the first round are over because we're // limited by the one set of pins to all the parunits for (i = 0; i < SSD_PARUNITS_PER_ELEM(s); i ++) { parunit_tot_cost[i] = max_cost; } } for (i = 0; i < SSD_PARUNITS_PER_ELEM(s); i ++) { ll_release(parunits[i]); } free(parunits); power_stat->acc_time += max_cost; return max_cost; }
static double ssd_issue_overlapped_ios(ssd_req **reqs, int total, int elem_num, ssd_t *s) { double max_cost = 0; double parunit_op_cost[SSD_MAX_PARUNITS_PER_ELEM]; double parunit_tot_cost[SSD_MAX_PARUNITS_PER_ELEM]; ssd_element_metadata *metadata; int lpn; int i; int read_cycle = 0; listnode **parunits; // all the requests must be of the same type for (i = 1; i < total; i ++) { ASSERT(reqs[i]->is_read == reqs[0]->is_read); } // is this a set of read requests? if (reqs[0]->is_read) { read_cycle = 1; } memset(parunit_tot_cost, 0, sizeof(double)*SSD_MAX_PARUNITS_PER_ELEM); // find the planes to which the reqs are to be issued metadata = &(s->elements[elem_num].metadata); parunits = ssd_pick_parunits(reqs, total, elem_num, metadata, s); // repeat until we've served all the requests while (1) { //double tot_xfer_cost = 0; double max_op_cost = 0; int active_parunits = 0; int op_count = 0; // do we still have any request to service? for (i = 0; i < SSD_PARUNITS_PER_ELEM(s); i ++) { if (ll_get_size(parunits[i]) > 0) { active_parunits ++; } } // no more requests -- get out if (active_parunits == 0) { break; } // clear this arrays for storing costs memset(parunit_op_cost, 0, sizeof(double)*SSD_MAX_PARUNITS_PER_ELEM); // begin a round of serving. we serve one request per // parallel unit. if an unit has more than one request // in the list, they have to be serialized. max_cost = 0; for (i = 0; i < SSD_PARUNITS_PER_ELEM(s); i ++) { int size; size = ll_get_size(parunits[i]); if (size > 0) { // this parallel unit has a request to serve ssd_req *r; listnode *n = ll_get_nth_node(parunits[i], 0); op_count ++; ASSERT(op_count <= active_parunits); // get the request r = (ssd_req *)n->data; lpn = ssd_logical_pageno(r->blk, s); if (r->is_read) { parunit_op_cost[i] = s->params.page_read_latency; } else { int plane_num = r->plane_num; // if this is the last page on the block, allocate a new block if (ssd_last_page_in_block(metadata->plane_meta[plane_num].active_page, s)) { _ssd_alloc_active_block(plane_num, elem_num, s); } // issue the write to the current active page. // we need to transfer the data across the serial pins for write. metadata->active_page = metadata->plane_meta[plane_num].active_page; //printf("elem %d plane %d ", elem_num, plane_num); parunit_op_cost[i] = _ssd_write_page_osr(s, metadata, lpn); } ASSERT(r->count <= s->params.page_size); // calc the cost: the access time should be something like this // for read if (read_cycle) { if (SSD_PARUNITS_PER_ELEM(s) > 4) { printf("modify acc time here ...\n"); ASSERT(0); } if (op_count == 1) { r->acctime = parunit_op_cost[i] + ssd_data_transfer_cost(s,s->params.page_size); r->schtime = parunit_tot_cost[i] + (op_count-1)*ssd_data_transfer_cost(s,s->params.page_size) + r->acctime; } else { r->acctime = ssd_data_transfer_cost(s,s->params.page_size); r->schtime = parunit_tot_cost[i] + op_count*ssd_data_transfer_cost(s,s->params.page_size) + parunit_op_cost[i]; } } else { // for write r->acctime = parunit_op_cost[i] + ssd_data_transfer_cost(s,s->params.page_size); r->schtime = parunit_tot_cost[i] + (op_count-1)*ssd_data_transfer_cost(s,s->params.page_size) + r->acctime; } // find the maximum cost for this round of operations if (max_cost < r->schtime) { max_cost = r->schtime; } // release the node from the linked list ll_release_node(parunits[i], n); } } // we can start the next round of operations only after all // the operations in the first round are over because we're // limited by the one set of pins to all the parunits for (i = 0; i < SSD_PARUNITS_PER_ELEM(s); i ++) { parunit_tot_cost[i] = max_cost; } } for (i = 0; i < SSD_PARUNITS_PER_ELEM(s); i ++) { ll_release(parunits[i]); } free(parunits); return max_cost; }
/* * implements the simple write policy wrote by ted. */ static double ssd_write_policy_simple(int blkno, int count, int elem_num, ssd_t *s) { double cost; int pages_moved; // for statistics struct my_timing_t *tt = (struct my_timing_t *)(s->timing_t); // the page position on-chip is calculated as follows (N is the number of chips per SSD): // [integer division is assumed] // absolute page number (APN) = block number / page_size // PN = ((APN - (APN % (Stride * N))) / N) + (APN % Stride) // int blockpos, lastpos; int ppb = tt->params->pages_per_block; int last_pn = tt->next_write_page[elem_num]; int apn = blkno/tt->params->page_size; int pn = ((apn - (apn % (tt->params->element_stride_pages*tt->params->nelements)))/ tt->params->nelements) + (apn % tt->params->element_stride_pages); blockpos = pn % ppb; lastpos = last_pn % ppb; if (last_pn >= 0 && pn >= last_pn && (pn/ppb == last_pn/ppb)) { // the cost of one page write cost = tt->params->page_write_latency; // plus the cost of copying the intermediate pages cost += (pn-last_pn) * (tt->params->page_read_latency + tt->params->page_write_latency); // stat - pages moved other than the page we wrote pages_moved = pn - last_pn; } else { // the cost of one page write cost = tt->params->page_write_latency; // plus the cost of an erase cost += tt->params->block_erase_latency; // stat s->elements[elem_num].stat.num_clean ++; // plus the cost of copying the blocks prior to this pn cost += blockpos * (tt->params->page_read_latency + tt->params->page_write_latency); // stat pages_moved = blockpos; if (last_pn >= 0) { // plus the cost of copying remaining pages in last written block cost += (ppb-lastpos) * (tt->params->page_read_latency + tt->params->page_write_latency); // stat pages_moved += ppb - lastpos; } } pn = pn + 1; if ((blockpos) == 0) tt->next_write_page[elem_num] = -1; else tt->next_write_page[elem_num] = pn; // stat s->elements[elem_num].stat.pages_moved += pages_moved; // plus the cost of moving the required sectors on/off chip cost += ssd_data_transfer_cost(s, count); return cost; }
/* * writes a single page into the active block of a ssd element. * this code assumes that there is a valid active page where the write can go * without invoking new cleaning. */ double _ssd_write_page_osr(ssd_t *s, ssd_element_metadata *metadata, int lpn) { double cost; unsigned int active_page = metadata->active_page; unsigned int active_block = SSD_PAGE_TO_BLOCK(active_page, s); unsigned int pagepos_in_block = active_page % s->params.pages_per_block; unsigned int active_plane = metadata->block_usage[active_block].plane_num; // see if this logical page no is already mapped. if (metadata->lba_table[lpn] != -1) { // since the lpn is going to be written to a new location, // its previous copy is invalid now. therefore reduce the block // usage of the previous copy's block. unsigned int prev_page = metadata->lba_table[lpn]; unsigned int prev_block = SSD_PAGE_TO_BLOCK(prev_page, s); unsigned int pagepos_in_prev_block = prev_page % s->params.pages_per_block; unsigned int prev_plane = metadata->block_usage[prev_block].plane_num; // make sure the version numbers are correct ssd_assert_page_version(prev_page, active_page, metadata, s); if (metadata->block_usage[prev_block].page[pagepos_in_prev_block] != lpn) { fprintf(stderr, "Error: lpn %d not found in prev block %d pos %d\n", lpn, prev_block, pagepos_in_prev_block); ASSERT(0); } else { metadata->block_usage[prev_block].page[pagepos_in_prev_block] = -1; metadata->block_usage[prev_block].num_valid --; metadata->plane_meta[prev_plane].valid_pages --; ssd_assert_valid_pages(prev_plane, metadata, s); } } else { fprintf(stderr, "Error: This case should not be executed\n"); } // add the entry to the lba table metadata->lba_table[lpn] = active_page; // increment the usage count on the active block metadata->block_usage[active_block].page[pagepos_in_block] = lpn; metadata->block_usage[active_block].num_valid ++; metadata->plane_meta[active_plane].valid_pages ++; ssd_assert_valid_pages(active_plane, metadata, s); // some sanity checking if (metadata->block_usage[active_block].num_valid >= s->params.pages_per_block) { fprintf(stderr, "Error: len %d of block %d is greater than or equal to pages per block %d\n", metadata->block_usage[active_block].num_valid, active_block, s->params.pages_per_block); exit(1); } // add the cost of the write cost = s->params.page_write_latency; //printf("lpn %d active pg %d\n", lpn, active_page); // go to the next free page metadata->active_page = active_page + 1; metadata->plane_meta[active_plane].active_page = metadata->active_page; // if this is the last data page on the block, let us write the // summary page also if (ssd_last_page_in_block(metadata->active_page, s)) { // cost of transferring the summary page data cost += ssd_data_transfer_cost(s, SSD_SECTORS_PER_SUMMARY_PAGE); // cost of writing the summary page data cost += s->params.page_write_latency; // seal the last summary page. since we use the summary page // as a metadata, we don't count it as a valid data page. metadata->block_usage[active_block].page[s->params.pages_per_block - 1] = -1; metadata->block_usage[active_block].state = SSD_BLOCK_SEALED; //printf("SUMMARY: lpn %d active pg %d\n", lpn, active_page); } return cost; }
static void ssd_media_access_request_element (ioreq_event *curr) { ssd_t *currdisk = getssd(curr->devno); int blkno = curr->blkno; int count = curr->bcount; //added by tiel int i = 0; double max_threshold = currdisk->params.nelements * currdisk->params.page_size; /* **** CAREFUL ... HIJACKING tempint2 and tempptr2 fields here **** */ curr->tempint2 = count; //while (count != 0) { while (count > 0) { // find the element (package) to direct the request int elem_num = ssd_choose_element(currdisk->user_params, blkno); ssd_element *elem = &currdisk->elements[elem_num]; // create a new sub-request for the element ioreq_event *tmp = (ioreq_event *)getfromextraq(); tmp->devno = curr->devno; tmp->busno = curr->busno; tmp->flags = curr->flags; tmp->blkno = blkno; tmp->bcount = ssd_choose_aligned_count(currdisk->params.page_size, blkno, count); /*if(curr->bcount > max_threshold) tmp->tempint1 = 1;*/ //ASSERT(tmp->bcount == currdisk->params.page_size); tmp->tempptr2 = curr; blkno += tmp->bcount; count -= tmp->bcount; elem->metadata.reqs_waiting ++; // add the request to the corresponding element's queue ioqueue_add_new_request(elem->queue, (ioreq_event *)tmp); // added by tiel // activate request create simtime, type, elem_num { int ch_num; double wtime, ctime; ioreq_event *temp = (ioreq_event *)getfromextraq(); temp->type = SSD_ACTIVATE_ELEM; //Insert Channel/Way delay //Channel Number = Chip number % Number of Channel ch_num = elem_num % currdisk->params.nchannel; wtime = currdisk->CH[ch_num].arrival_time + ssd_data_transfer_cost(currdisk,currdisk->params.page_size); ctime = simtime + (i * currdisk->params.channel_switch_delay); if(currdisk->params.nchannel == currdisk->params.nelements){ temp->time = ctime; currdisk->CH[ch_num].ccount++; }else if(simtime > wtime || currdisk->CH[ch_num].flag == -1){ //channel data setting currdisk->CH[ch_num].arrival_time = ctime; currdisk->CH[ch_num].flag = curr->flags; temp->time = ctime; currdisk->CH[ch_num].ccount++; }else if(currdisk->CH[ch_num].flag ==READ){ if(wtime > ctime){ if(curr->flags == READ){ temp->time = wtime; }else{ temp->time = wtime + currdisk->params.page_read_latency; } currdisk->CH[ch_num].wcount++; }else{ temp->time = ctime; currdisk->CH[ch_num].ccount++; } currdisk->CH[ch_num].arrival_time = temp->time; currdisk->CH[ch_num].flag = curr->flags; }else if(currdisk->CH[ch_num].flag == WRITE){ if(wtime > ctime){ temp->time = wtime; currdisk->CH[ch_num].wcount++; }else{ temp->time = ctime; currdisk->CH[ch_num].ccount++; } currdisk->CH[ch_num].arrival_time = temp->time; currdisk->CH[ch_num].flag = curr->flags; } temp->ssd_elem_num = elem_num; addtointq ((event *)temp); i ++; } } }
/* * writes a single page into the active block of a ssd element. * this code assumes that there is a valid active page where the write can go * without invoking new cleaning. */ double _ssd_write_page_osr(ssd_t *s, ssd_element_metadata *metadata, int lbn, ssd_power_element_stat *power_stat, int blkno) { double cost; //unsigned int active_page = metadata->active_page; unsigned int active_block = metadata->active_block; //unsigned int pagepos_in_block = active_page % s->params.pages_per_block; unsigned int active_plane = metadata->block_usage[active_block].plane_num; unsigned int p_index = (blkno/s->params.page_size)%s->params.pages_per_block; int prev_num_valid = 0; int i; int elem_num = ssd_choose_element(s->user_params, blkno); // see if this logical page no is already mapped. if (metadata->lba_table[lbn] != -1) { // since the lpn is going to be written to a new location, // its previous copy is invalid now. therefore reduce the block // usage of the previous copy's block. unsigned int prev_block = metadata->lba_table[lbn]; //unsigned int prev_block = SSD_PAGE_TO_BLOCK(prev_page, s); //unsigned int pagepos_in_prev_block = prev_page % s->params.pages_per_block; unsigned int prev_plane = metadata->block_usage[prev_block].plane_num; // make sure the version numbers are correct //ssd_assert_page_version(prev_page, active_page, metadata, s); // if (metadata->block_usage[prev_block].page[0] != lpn) { // fprintf(stderr, "Error: lpn %d not found in prev block %d pos %d\n", // lpn, prev_block, pagepos_in_prev_block); // ASSERT(0); // } else { for( i = 0 ; i < (s->params.pages_per_block-1) ; i++) { metadata->block_usage[active_block].page[i] = metadata->block_usage[prev_block].page[i]; metadata->block_usage[prev_block].page[i] = -1; } prev_num_valid = metadata->block_usage[prev_block].num_valid; metadata->block_usage[prev_block].num_valid = 0; metadata->plane_meta[prev_plane].valid_pages -= prev_num_valid; ssd_assert_valid_pages(prev_plane, metadata, s); // } } else { fprintf(stderr, "Error: This case should not be executed\n"); } // add the entry to the lba table metadata->lba_table[lbn] = active_block; // increment the usage count on the active block if(metadata->block_usage[active_block].page[p_index] == 1) { int temp = 0; metadata->block_usage[active_block].num_valid = prev_num_valid; metadata->plane_meta[active_plane].valid_pages += prev_num_valid; //add cost cost = s->params.page_write_latency * prev_num_valid; ssd_power_flash_calculate(SSD_POWER_FLASH_WRITE, cost, power_stat, s); temp = prev_num_valid - 1; cost += ssd_data_transfer_cost(s, temp * 8); ssd_power_flash_calculate(SSD_POWER_FLASH_BUS_DATA_TRANSFER, ssd_data_transfer_cost(s, temp * 8), power_stat, s); //ssd_assert_valid_pages(active_plane, metadata, s); } else { // add the cost of the write cost = s->params.page_write_latency * (prev_num_valid +1); //printf("lpn %d active pg %d\n", lpn, active_page); //@20090831-Micky:add the power consumption of the write ssd_power_flash_calculate(SSD_POWER_FLASH_WRITE, cost, power_stat, s); cost += ssd_data_transfer_cost(s, prev_num_valid * 8); ssd_power_flash_calculate(SSD_POWER_FLASH_BUS_DATA_TRANSFER, ssd_data_transfer_cost(s, prev_num_valid * 8), power_stat, s); } // some sanity checking if (metadata->block_usage[active_block].num_valid >= s->params.pages_per_block) { fprintf(stderr, "Error: len %d of block %d is greater than or equal to pages per block %d\n", metadata->block_usage[active_block].num_valid, active_block, s->params.pages_per_block); exit(1); } // seal the last summary page. since we use the summary page // as a metadata, we don't count it as a valid data page. metadata->block_usage[active_block].page[s->params.pages_per_block - 1] = -1; metadata->block_usage[active_block].state = SSD_BLOCK_SEALED; // cost of transferring the summary page data cost += ssd_data_transfer_cost(s, SSD_SECTORS_PER_SUMMARY_PAGE); ssd_power_flash_calculate(SSD_POWER_FLASH_BUS_DATA_TRANSFER, ssd_data_transfer_cost(s, SSD_SECTORS_PER_SUMMARY_PAGE), power_stat, s); // cost of writing the summary page data cost += s->params.page_write_latency; //@20090831-Micky:add the power consumption of the write ssd_power_flash_calculate(SSD_POWER_FLASH_WRITE, s->params.page_write_latency, power_stat, s); return cost; }