static double ssd_clean_block_fully(int plane_num, int elem_num, ssd_t *s) { int blk; double cost = 0; double mcost = 0; ssd_element_metadata *metadata = &s->elements[elem_num].metadata; plane_metadata *pm = &metadata->plane_meta[plane_num]; ASSERT((pm->clean_in_progress == 0) && (pm->clean_in_block = -1)); blk = ssd_pick_block_to_clean(plane_num, elem_num, &mcost, metadata, s); ASSERT(metadata->block_usage[blk].plane_num == plane_num); cost = _ssd_clean_block_fully(blk, plane_num, elem_num, metadata, s); return (cost+mcost); }
/* * pick a random block with at least 1 empty page slot and clean it */ static double ssd_clean_blocks_random(int plane_num, int elem_num, ssd_t *s) { double cost = 0; #if 1 printf("ssd_clean_blocks_random: not yet fixed\n"); exit(1); #else long blk = 0; ssd_element_metadata *metadata = &(s->elements[elem_num].metadata); do { // get a random number to select a block blk = DISKSIM_lrand48() % s->params.blocks_per_element; // if this is plane specific cleaning, then skip all the // blocks that don't belong to this plane. if ((plane_num != -1) && (metadata->block_usage[blk].plane_num != plane_num)) { continue; } // clean only those blocks that are used. if (ssd_can_clean_block(s, metadata, blk)) { int valid_pages = metadata->block_usage[blk].num_valid; // if all the pages in the block are valid, continue to // select another random block if (valid_pages == s->params.pages_per_block) { continue; } else { // invoke cleaning until we reach the high watermark cost += _ssd_clean_block_fully(blk, elem_num, metadata, s); if (ssd_stop_cleaning(plane_num, elem_num, s)) { // we're done with creating enough free blocks. so quit. break; } } } else { // block is already free. so continue. continue; } } while (1); #endif return cost; }
double ssd_refresh_block(block_metadata *block_metadata, ssd_t *s) { double cost = 0; int refresh_invoked = 0; int is_refresh = 1; int elem_num = block_metadata->elem_num; int plane_num = block_metadata->plane_num; ssd_element *elem = &s->elements[elem_num]; ssd_element_metadata *metadata = &(s->elements[elem_num].metadata); plane_metadata *pm = &metadata->plane_meta[plane_num]; // element must be free ASSERT(elem->media_busy == FALSE); // calculate cost of refresh operation. // Check if the plane is already being cleaned or refreshed. If so, skip this block. ASSERT(pm->clean_in_progress == FALSE); metadata->active_page = metadata->plane_meta[plane_num].active_page; cost = _ssd_clean_block_fully(block_metadata->block_num, plane_num, elem_num, metadata, s,is_refresh); ASSERT(pm->clean_in_progress == FALSE); return cost; }
/* * first we create a hash table of blocks according to their * usage. then we select blocks with the least usage and clean * them. */ static double ssd_clean_blocks_greedy(int plane_num, int elem_num, ssd_t *s) { double cost = 0; double avg_lifetime; int i; usage_table *table; ssd_element_metadata *metadata = &(s->elements[elem_num].metadata); ///////////////////////////////////////////////////////////////////////////// // build the histogram table = ssd_build_usage_table(elem_num, s); ////////////////////////////////////////////////////////////////////////////// // find the average life time of all the blocks in this element avg_lifetime = ssd_compute_avg_lifetime(plane_num, elem_num, s); ///////////////////////////////////////////////////////////////////////////// // we now have a hash table of blocks, where the key of each // bucket is the usage count and each bucket has all the blocks with // the same usage count (i.e., the same num of valid pages). for (i = 0; i <= s->params.pages_per_block; i ++) { int j; usage_table *entry; // get the bucket of blocks with 'i' valid pages entry = &(table[i]); // free all the blocks with 'i' valid pages for (j = 0; j < entry->len; j ++) { int blk = entry->block[j]; int block_life = metadata->block_usage[blk].rem_lifetime; // if this is plane specific cleaning, then skip all the // blocks that don't belong to this plane. if ((plane_num != -1) && (metadata->block_usage[blk].plane_num != plane_num)) { continue; } // if the block is already dead, skip it if (block_life == 0) { continue; } // clean only those blocks that are sealed. if (ssd_can_clean_block(s, metadata, blk)) { // if we care about wear-leveling, then we must rate limit overly cleaned blocks if (s->params.cleaning_policy == DISKSIM_SSD_CLEANING_POLICY_GREEDY_WEAR_AWARE) { // see if this block's remaining lifetime is within // a certain threshold of the average remaining lifetime // of all blocks in this element if (block_life < (SSD_LIFETIME_THRESHOLD_X * avg_lifetime)) { // we have to rate limit this block as it has exceeded // its cleaning limits printf("Rate limiting block %d (block life %d avg life %f\n", blk, block_life, avg_lifetime); if (ssd_rate_limit(block_life, avg_lifetime)) { // skip this block and go to the next one continue; } } } // okies, finally here we're with the block to be cleaned. // invoke cleaning until we reach the high watermark. cost += _ssd_clean_block_fully(blk, metadata->block_usage[blk].plane_num, elem_num, metadata, s); if (ssd_stop_cleaning(plane_num, elem_num, s)) { // no more cleaning is required -- so quit. break; } } } if (ssd_stop_cleaning(plane_num, elem_num, s)) { // no more cleaning is required -- so quit. break; } } // release the table ssd_release_usage_table(table, s); // see if we were able to generate enough free blocks if (!ssd_stop_cleaning(plane_num, elem_num, s)) { printf("Yuck! we couldn't generate enough free pages in plane %d elem %d ssd %d\n", plane_num, elem_num, s->devno); } return cost; }
/* * migrate data from a cold block to "to_blk" */ int ssd_migrate_cold_data(int to_blk, double *mcost, int plane_num, int elem_num, ssd_t *s) { int i; int from_blk = -1; double oldest_erase_time = simtime; double cost = 0; int bitpos; #if SSD_ASSERT_ALL int f1; int f2; #endif ssd_element_metadata *metadata = &(s->elements[elem_num].metadata); // first select the coldest of all blocks. // one way to select is to find the one that has the oldest // erasure time. if (plane_num == -1) { for (i = 0; i < s->params.blocks_per_element; i ++) { if (metadata->block_usage[i].num_valid > 0) { if (metadata->block_usage[i].time_of_last_erasure < oldest_erase_time) { oldest_erase_time = metadata->block_usage[i].time_of_last_erasure; from_blk = i; } } } } else { #if SSD_ASSERT_ALL f1 = ssd_free_bits(plane_num, elem_num, metadata, s); ASSERT(f1 == metadata->plane_meta[metadata->block_usage[to_blk].plane_num].free_blocks); #endif bitpos = plane_num * s->params.blocks_per_plane; for (i = bitpos; i < bitpos + (int)s->params.blocks_per_plane; i ++) { int block = ssd_bitpos_to_block(i, s); ASSERT(metadata->block_usage[block].plane_num == plane_num); if (metadata->block_usage[block].num_valid > 0) { if (metadata->block_usage[block].time_of_last_erasure < oldest_erase_time) { oldest_erase_time = metadata->block_usage[block].time_of_last_erasure; from_blk = block; } } } } ASSERT(from_blk != -1); if (plane_num != -1) { ASSERT(metadata->block_usage[from_blk].plane_num == metadata->block_usage[to_blk].plane_num); } // next, clean the block to which we'll transfer the // cold data cost += _ssd_clean_block_fully(to_blk, metadata->block_usage[to_blk].plane_num, elem_num, metadata, s); #if SSD_ASSERT_ALL if (plane_num != -1) { f2 = ssd_free_bits(plane_num, elem_num, metadata, s); ASSERT(f2 == metadata->plane_meta[metadata->block_usage[to_blk].plane_num].free_blocks); } #endif // then, migrate the cold data to the worn out block. // for which, we first read all the valid data cost += metadata->block_usage[from_blk].num_valid * s->params.page_read_latency; // include the write cost cost += metadata->block_usage[from_blk].num_valid * s->params.page_write_latency; // if the src and dest blocks are on different planes // include the transfer cost also cost += ssd_crossover_cost(s, metadata, from_blk, to_blk); // the cost of erasing the cold block (represented by from_blk) // will be added later ... // finally, update the metadata metadata->block_usage[to_blk].bsn = metadata->block_usage[from_blk].bsn; metadata->block_usage[to_blk].num_valid = metadata->block_usage[from_blk].num_valid; metadata->block_usage[from_blk].num_valid = 0; for (i = 0; i < s->params.pages_per_block; i ++) { int lpn = metadata->block_usage[from_blk].page[i]; if (lpn != -1) { ASSERT(metadata->lba_table[lpn] == (from_blk * s->params.pages_per_block + i)); metadata->lba_table[lpn] = to_blk * s->params.pages_per_block + i; } metadata->block_usage[to_blk].page[i] = metadata->block_usage[from_blk].page[i]; } metadata->block_usage[to_blk].state = metadata->block_usage[from_blk].state; bitpos = ssd_block_to_bitpos(s, to_blk); ssd_set_bit(metadata->free_blocks, bitpos); metadata->tot_free_blocks --; metadata->plane_meta[metadata->block_usage[to_blk].plane_num].free_blocks --; #if SSD_ASSERT_ALL if (plane_num != -1) { f2 = ssd_free_bits(plane_num, elem_num, metadata, s); ASSERT(f2 == metadata->plane_meta[metadata->block_usage[to_blk].plane_num].free_blocks); } #endif ssd_assert_free_blocks(s, metadata); ASSERT(metadata->block_usage[from_blk].num_valid == 0); #if ASSERT_FREEBITS if (plane_num != -1) { f2 = ssd_free_bits(plane_num, elem_num, metadata, s); ASSERT(f1 == f2); } #endif *mcost = cost; // stat metadata->tot_migrations ++; metadata->tot_pgs_migrated += metadata->block_usage[to_blk].num_valid; metadata->mig_cost += cost; return from_blk; }