int COPY_VALID_PAGES(int32_t old_pbn, int32_t new_pbn) { int i; int copy_page_nb = 0; unsigned int old_flash_nb = CALC_FLASH(old_pbn); unsigned int old_block_nb = CALC_BLOCK(old_pbn); unsigned int new_flash_nb = CALC_FLASH(new_pbn); unsigned int new_block_nb = CALC_BLOCK(new_pbn);; block_state_entry* old_b_s_entry = GET_BLOCK_STATE_ENTRY(old_pbn); char* valid_array = old_b_s_entry->valid_array; for(i=0;i<PAGE_NB;i++){ if(valid_array[i] == 'V'){ SSD_PAGE_READ(old_flash_nb, old_block_nb, i, -1, GC_READ, -1); SSD_PAGE_WRITE(new_flash_nb, new_block_nb, i, -1, GC_WRITE, -1); UPDATE_BLOCK_STATE_ENTRY(new_pbn, i, VALID); UPDATE_BLOCK_STATE_ENTRY(old_pbn, i, INVALID); copy_page_nb++; } } return copy_page_nb; }
int UPDATE_NEW_PAGE_MAPPING(int32_t lpn, int32_t ppn, float ratio) { /* Update Page Mapping Table */ mapping_table[lpn] = ppn; LOG("Update lpn %d new ppn %d", lpn, ppn); /* Update Inverse Page Mapping Table */ UPDATE_INVERSE_BLOCK_VALIDITY(CALC_FLASH(ppn), CALC_BLOCK(ppn), CALC_PAGE(ppn), VALID); UPDATE_INVERSE_BLOCK_MAPPING(CALC_FLASH(ppn), CALC_BLOCK(ppn), DATA_BLOCK); UPDATE_INVERSE_PAGE_MAPPING(ppn, NEW_PPN, lpn); return SUCCESS; }
int UPDATE_NEW_PAGE_MAPPING(int32_t lpn, int32_t ppn) { /* Update Page Mapping Table */ #ifdef FTL_MAP_CACHE CACHE_UPDATE_PPN(lpn, ppn); #else mapping_table[lpn] = ppn; #endif /* Update Inverse Page Mapping Table */ UPDATE_BLOCK_STATE_ENTRY(CALC_FLASH(ppn), CALC_BLOCK(ppn), CALC_PAGE(ppn), VALID); UPDATE_BLOCK_STATE(CALC_FLASH(ppn), CALC_BLOCK(ppn), DATA_BLOCK); UPDATE_INVERSE_MAPPING(ppn, lpn); return SUCCESS; }
int _FTL_OBJ_COPYBACK(int32_t source, int32_t destination) { page_node *source_p; source_p = lookup_page(source); // source_p can be NULL if the GC is working on some old pages that belonged to an object we deleted already if (source_p != NULL) { // invalidate the source page UPDATE_INVERSE_BLOCK_VALIDITY(CALC_FLASH(source), CALC_BLOCK(source), CALC_PAGE(source), INVALID); // mark new page as valid and used UPDATE_NEW_PAGE_MAPPING_NO_LOGICAL(destination); // change the object's page mapping to the new page HASH_DEL(global_page_table, source_p); source_p->page_id = destination; HASH_ADD_INT(global_page_table, page_id, source_p); } #ifdef FTL_DEBUG else { printf("Warning[%s] %u copyback page not mapped to an object \n", __FUNCTION__, source); } #endif return SUCCESS; }
int UPDATE_NEW_PAGE_MAPPING(IDEState *s, int32_t lpn, int32_t ppn) { SSDState *ssd = &(s->ssd); /* Update Page Mapping Table */ #ifdef FTL_MAP_CACHE CACHE_UPDATE_PPN(s, lpn, ppn); #else ssd->mapping_table[lpn] = ppn; #endif /* Update Inverse Page Mapping Table */ UPDATE_BLOCK_STATE_ENTRY(s, CALC_FLASH(s, ppn), CALC_BLOCK(s, ppn), CALC_PAGE(s, ppn), VALID); UPDATE_BLOCK_STATE(s, CALC_FLASH(s, ppn), CALC_BLOCK(s, ppn), DATA_BLOCK); UPDATE_INVERSE_MAPPING(s, ppn, lpn); return SUCCESS; }
int UPDATE_NEW_PAGE_MAPPING(int32_t lpn, int32_t ppn, double ratio, int index) { /* Update Page Mapping Table */ mapping_table[lpn][index] = ppn; /* update rewrites */ assert(rewrites[lpn]<MAX_WOM_REWRITES); rewrites[lpn]++; if(rewrites[lpn]==2 && ratio_table[lpn][0] * WOM_FACTOR -1.0 >= 10e-7){ printf("ERROR: rewriting too much. ratio old %lf new %lf. old * WOM_FACTOR %lf\n",ratio_table[lpn][index], ratio, ratio_table[lpn][0] * WOM_FACTOR); assert(ratio * WOM_FACTOR -1.0 < 10e-7); } ratio_table[lpn][index] = ratio; //LOG("Update lpn %d index %d new ppn %d", lpn, index, ppn); /* Update Inverse Page Mapping Table */ UPDATE_INVERSE_BLOCK_VALIDITY(CALC_FLASH(ppn), CALC_BLOCK(ppn), CALC_PAGE(ppn), VALID, ratio); UPDATE_INVERSE_BLOCK_MAPPING(CALC_FLASH(ppn), CALC_BLOCK(ppn), DATA_BLOCK); if(rewrites[lpn]==1) UPDATE_INVERSE_PAGE_MAPPING(ppn, NEW_PPN, lpn, index); return SUCCESS; }
int remove_object(stored_object *object) { page_node *current_page; page_node *invalidated_page; // object could not exist in the hashtable yet because it could just be cleanup in case create_object failed // if we do perform HASH_DEL on an object that is not in the hashtable, the whole hashtable will be deleted if (object->hh.tbl != NULL) HASH_DEL(objects_table, object); current_page = object->pages; while (current_page != NULL) { // invalidate the physical page and update its mapping UPDATE_INVERSE_BLOCK_VALIDITY(CALC_FLASH(current_page->page_id), CALC_BLOCK(current_page->page_id), CALC_PAGE(current_page->page_id), INVALID); #ifdef GC_ON // should we really perform GC for every page? we know we are invalidating a lot of them now... GC_CHECK(CALC_FLASH(current_page->page_id), CALC_BLOCK(current_page->page_id), true); #endif // get next page and free the current one invalidated_page = current_page; current_page = current_page->next; if (invalidated_page->hh.tbl != NULL) HASH_DEL(global_page_table, invalidated_page); free(invalidated_page); } // free the object's memory free(object); return SUCCESS; }
int UPDATE_OLD_PAGE_MAPPING(int32_t lpn) { int32_t old_ppn; old_ppn = GET_MAPPING_INFO(lpn); if(old_ppn == -1){ #ifdef FTL_DEBUG //printf("[%s] New page \n",__FUNCTION__); #endif //LOG("Wrote new page for lpn %d (old_ppn %d)", lpn, old_ppn); return SUCCESS; } else{ LOG("Rewrote lpn %d (old_ppn %d)", lpn, old_ppn); UPDATE_INVERSE_BLOCK_VALIDITY(CALC_FLASH(old_ppn), CALC_BLOCK(old_ppn), CALC_PAGE(old_ppn), INVALID); //LOG("invalidated old_ppn=%d", old_ppn); UPDATE_INVERSE_PAGE_MAPPING(old_ppn, INVALIDATE, lpn); } return SUCCESS; }
int UPDATE_OLD_PAGE_MAPPING(int32_t lpn, int index) { int32_t *old_ppn; old_ppn = GET_MAPPING_INFO(lpn); if(old_ppn[index] == -1) { #ifdef FTL_DEBUG printf("[%s] New page \n",__FUNCTION__); #endif LOG("Wrote new page for lpn %d index %d (old_ppn %d)", lpn, index, old_ppn[index]); return SUCCESS; } else { LOG("Rewrote lpn %d index %d (old_ppn %d)", lpn, index, old_ppn[index]); UPDATE_INVERSE_BLOCK_VALIDITY(CALC_FLASH(old_ppn[index]), CALC_BLOCK(old_ppn[index]), CALC_PAGE(old_ppn[index]), INVALID, ratio_table[lpn][index]); UPDATE_INVERSE_PAGE_MAPPING(old_ppn[index], INVALIDATE, lpn, index); old_ppn[index] = -1; // this portion of the lpn is now empty } return SUCCESS; }
int UPDATE_OLD_PAGE_MAPPING(int32_t lpn) { int32_t old_ppn; #ifdef FTL_MAP_CACHE old_ppn = CACHE_GET_PPN(lpn); #else old_ppn = GET_MAPPING_INFO(lpn); #endif if(old_ppn == -1){ #ifdef FTL_DEBUG printf("[%s] New page \n", __FUNCTION__); #endif return SUCCESS; } else{ UPDATE_BLOCK_STATE_ENTRY(CALC_FLASH(old_ppn), CALC_BLOCK(old_ppn), CALC_PAGE(old_ppn), INVALID); UPDATE_INVERSE_MAPPING(old_ppn, -1); } return SUCCESS; }
int BM_GARBAGE_COLLECTION(int32_t victim_pbn) { #ifdef FTL_DEBUG printf("[%s] Start\n", __FUNCTION__); #endif int32_t new_pbn; int32_t v_pbn; int valid_page_nb; int copy_page_nb=0; block_state_entry* temp_b_s_entry = GET_BLOCK_STATE_ENTRY(victim_pbn); /* If the victim block is not a replacement block */ if(temp_b_s_entry->rp_root_pbn == -1){ v_pbn = victim_pbn; } /* If the victim block is a replacement block */ else{ v_pbn = temp_b_s_entry->rp_root_pbn; } block_state_entry* rp_b_s_entry; block_state_entry* root_b_s_entry = GET_BLOCK_STATE_ENTRY(v_pbn); rp_block_entry* rp_b_entry = root_b_s_entry->rp_head; int n_rp_blocks = root_b_s_entry->rp_count; /* Get logical block number of the victim Block */ int32_t lbn = GET_INVERSE_MAPPING_INFO(v_pbn); if(lbn == -1){ printf("ERROR[%s] It is a replacement block !\n", __FUNCTION__); return -1; } /* If the victim block has no replacement block, */ if(n_rp_blocks == 0){ /* Get new empty block */ GET_NEW_BLOCK(VICTIM_OVERALL, EMPTY_TABLE_ENTRY_NB, &new_pbn); /* Copy the valid pages in the victim block to the new empty block */ copy_page_nb = COPY_VALID_PAGES(v_pbn, new_pbn); SSD_BLOCK_ERASE(CALC_FLASH(v_pbn), CALC_BLOCK(v_pbn)); /* Update block mapping information */ UPDATE_OLD_BLOCK_MAPPING(lbn); UPDATE_NEW_BLOCK_MAPPING(lbn, new_pbn); UPDATE_BLOCK_STATE(v_pbn, EMPTY_BLOCK); INSERT_EMPTY_BLOCK(v_pbn); } else{ /* Get the last replacement block */ if(n_rp_blocks == 1){ rp_b_entry = root_b_s_entry->rp_head; } else if(n_rp_blocks == 2){ rp_b_entry = root_b_s_entry->rp_head->next; } /* Get the valid page number of last(tail) replacement block */ rp_b_s_entry = GET_BLOCK_STATE_ENTRY(rp_b_entry->pbn); valid_page_nb = rp_b_s_entry->valid_page_nb; int n_invalid_pages = COUNT_INVALID_PAGES(rp_b_s_entry); /* If all pages in the block is valid, then */ if(valid_page_nb == PAGE_NB || n_invalid_pages == 0){ if(valid_page_nb != PAGE_NB){ int temp_copy_page_nb; rp_block_entry* target_rp_b_entry; rp_block_entry* first_rp_b_entry; if(n_rp_blocks == 1){ target_rp_b_entry = root_b_s_entry->rp_head; copy_page_nb = COPY_VALID_PAGES(v_pbn, target_rp_b_entry->pbn); } else if(n_rp_blocks == 2){ target_rp_b_entry = root_b_s_entry->rp_head->next; first_rp_b_entry = root_b_s_entry->rp_head; temp_copy_page_nb = COPY_VALID_PAGES(v_pbn, target_rp_b_entry->pbn); copy_page_nb = COPY_VALID_PAGES(first_rp_b_entry->pbn, target_rp_b_entry->pbn); copy_page_nb += temp_copy_page_nb; } } /* Mapping Information Update Start */ new_pbn = rp_b_entry->pbn; /* Update the last rp entry of the last replacement block */ rp_b_s_entry->rp_root_pbn = -1; /* free the last rp entry entry */ free(rp_b_entry); if(n_rp_blocks == 2){ /* Update the root entry of the first replacement block */ temp_b_s_entry = GET_BLOCK_STATE_ENTRY(root_b_s_entry->rp_head->pbn); temp_b_s_entry->rp_root_pbn = -1; /* Get rid of first rp block */ SSD_BLOCK_ERASE(CALC_FLASH(root_b_s_entry->rp_head->pbn), CALC_BLOCK(root_b_s_entry->rp_head->pbn)); UPDATE_BLOCK_STATE(root_b_s_entry->rp_head->pbn, EMPTY_BLOCK); INSERT_EMPTY_BLOCK(root_b_s_entry->rp_head->pbn); /* free first rp block entry */ free(root_b_s_entry->rp_head); } /* Update rp block table */ root_b_s_entry->rp_count = 0; root_b_s_entry->rp_head = NULL; /* Get rid of original block */ SSD_BLOCK_ERASE(CALC_FLASH(v_pbn), CALC_BLOCK(v_pbn)); UPDATE_INVERSE_MAPPING(v_pbn, -1); UPDATE_BLOCK_STATE(v_pbn, EMPTY_BLOCK); INSERT_EMPTY_BLOCK(v_pbn); /* Update mapping metadata */ UPDATE_NEW_BLOCK_MAPPING(lbn, new_pbn); } else{ /* Get new empty block */ GET_NEW_BLOCK(VICTIM_OVERALL, EMPTY_TABLE_ENTRY_NB, &new_pbn); /* Copy the valid pages to new empty block */ copy_page_nb = MERGE_RP_BLOCKS(v_pbn, new_pbn); /* Update Block Mapping Information */ UPDATE_OLD_BLOCK_MAPPING(lbn); UPDATE_NEW_BLOCK_MAPPING(lbn, new_pbn); } } #ifdef FTL_DEBUG printf("[%s] End\n", __FUNCTION__); #endif return copy_page_nb; }
int _FTL_OBJ_WRITE(object_id_t object_id, unsigned int offset, unsigned int length) { stored_object *object; page_node *current_page = NULL,*temp_page; uint32_t page_id; int io_page_nb; int curr_io_page_nb; unsigned int ret = FAIL; object = lookup_object(object_id); // file not found if (object == NULL) return FAIL; // calculate the overhead of allocating the request. io_page_nb will be the total number of pages we're gonna write io_alloc_overhead = ALLOC_IO_REQUEST(offset, length, WRITE, &io_page_nb); // if the offset is past the current size of the stored_object we need to append new pages until we can start writing while (offset > object->size) { if (GET_NEW_PAGE(VICTIM_OVERALL, EMPTY_TABLE_ENTRY_NB, &page_id) == FAIL) { // not enough memory presumably printf("ERROR[FTL_WRITE] Get new page fail \n"); return FAIL; } if(!add_page(object, page_id)) return FAIL; // mark new page as valid and used UPDATE_NEW_PAGE_MAPPING_NO_LOGICAL(page_id); } for (curr_io_page_nb = 0; curr_io_page_nb < io_page_nb; curr_io_page_nb++) { // if this is the first iteration we need to find the page by offset, otherwise we can go with the page chain if (current_page == NULL) current_page = page_by_offset(object, offset); else current_page = current_page->next; // get the pge we'll be writing to if (GET_NEW_PAGE(VICTIM_OVERALL, EMPTY_TABLE_ENTRY_NB, &page_id) == FAIL) { printf("ERROR[FTL_WRITE] Get new page fail \n"); return FAIL; } if((temp_page=lookup_page(page_id))) { printf("ERROR[FTL_WRITE] Object %lu already contains page %d\n",temp_page->object_id,page_id); return FAIL; } // mark new page as valid and used UPDATE_NEW_PAGE_MAPPING_NO_LOGICAL(page_id); if (current_page == NULL) // writing at the end of the object and need to allocate more space for it { current_page = add_page(object, page_id); if(!current_page) return FAIL; } else // writing over parts of the object { // invalidate the old physical page and replace the page_node's page UPDATE_INVERSE_BLOCK_VALIDITY(CALC_FLASH(current_page->page_id), CALC_BLOCK(current_page->page_id), CALC_PAGE(current_page->page_id), INVALID); UPDATE_INVERSE_PAGE_MAPPING(current_page->page_id, -1); HASH_DEL(global_page_table, current_page); current_page->page_id = page_id; HASH_ADD_INT(global_page_table, page_id, current_page); } #ifdef GC_ON // must improve this because it is very possible that we will do multiple GCs on the same flash chip and block // probably gonna add an array to hold the unique ones and in the end GC all of them GC_CHECK(CALC_FLASH(current_page->page_id), CALC_BLOCK(current_page->page_id), false); #endif ret = SSD_PAGE_WRITE(CALC_FLASH(page_id), CALC_BLOCK(page_id), CALC_PAGE(page_id), curr_io_page_nb, WRITE, io_page_nb); // send a physical write action being done to the statistics gathering if (ret == SUCCESS) { FTL_STATISTICS_GATHERING(page_id , PHYSICAL_WRITE); } #ifdef FTL_DEBUG if (ret == FAIL) { printf("Error[FTL_WRITE] %d page write fail \n", page_id); } #endif // page_node *page; // printf("Object page map:{"); // for(page=object->pages; page; page=page->next) // printf("%d->",page->page_id); // printf("}\n"); } INCREASE_IO_REQUEST_SEQ_NB(); #ifdef MONITOR_ON char szTemp[1024]; sprintf(szTemp, "WRITE PAGE %d ", length); WRITE_LOG(szTemp); sprintf(szTemp, "WB CORRECT %d", curr_io_page_nb); WRITE_LOG(szTemp); #endif #ifdef FTL_DEBUG printf("[%s] Complete\n",__FUNCTION__); #endif return ret; }
/* add ratio of newly written page, one by one. whenever we dont have enough space to add to bin, split lpn to two parts, dump page, and add overflow to the newly emptied bin*/ int _FTL_BUF(int lpn, double ratio, int is_gc) { int i, bpn_idx, b_lpn, temp_bpn_idx = -1; int ret; int32_t new_ppn; int io_page_nb; int length = SECTORS_PER_PAGE; /* write 1 page at most*/ int bin_idx = 0; int max_lpns = 0; double overflow_ratio; total_compression_ratio += ratio; total_lpn_count++; //if(fmod(ratio, 0.001) < 0.001) // ratio = ratio - fmod(ratio, 0.001); LOG("lpn %d ratio %f", lpn, ratio); /* check if already buffered*/ for(bpn_idx=0; bpn_idx<MAX_COMPRESSED_LPN_PER_PPN*BIN_COUNT; bpn_idx++){ /* already buffered */ if(bpns[bpn_idx].lpn == lpn){ LOG("lpn %d already buffered in bpn %d (index %d ratio %f)", lpn, bpn_idx, bpns[bpn_idx].index, bpns[bpn_idx].ratio); remove_from_bin(bpn_idx);// remove from bin init_bpn(&bpns[bpn_idx]); } } /* find empty bpn. must find one!*/ bpn_idx = find_empty_bpn(); assert(bpn_idx != MAX_COMPRESSED_LPN_PER_PPN*BIN_COUNT); //printf("found bpn_idx %d\n", bpn_idx); /* put in bin, and invalidate old */ overflow_ratio = update_bin_bpn(bin_idx, bpn_idx, lpn, ratio, 0); LOG("overflow_ratio %f", overflow_ratio); //printf("add lpn %d ratio %f to bin %d. new bin count %d\n", bpns[bpn_idx].lpn, ratio, i, bins[i].count); LOG("lpn %d invalidate both indexes", lpn); UPDATE_OLD_PAGE_MAPPING(lpn, 0); UPDATE_OLD_PAGE_MAPPING(lpn, 1); /* bin not full */ if (overflow_ratio == 0.0000) return 0; dump_bin: /* bin too full, caused overflow. save ratio of overflow*/ /* need to dump!*/ if(!is_gc) io_alloc_overhead = ALLOC_IO_REQUEST(0, length, WRITE, &io_page_nb); ret = GET_NEW_PAGE(VICTIM_OVERALL, EMPTY_TABLE_ENTRY_NB, &new_ppn); if(ret == FAIL){ printf("ERROR[FTL_WRITE] Get new page fail \n"); assert(0); return -1; } ret = SSD_PAGE_WRITE(CALC_FLASH(new_ppn), CALC_BLOCK(new_ppn), CALC_PAGE(new_ppn), 0, (is_gc)?GC_WRITE:WRITE, io_page_nb); LOG("dumped bin to new_ppn %d", new_ppn); #ifdef FTL_DEBUG if(ret == FAIL){ printf("Error[_FTL_BIN_PACK] %d page write fail \n", new_ppn); assert(0); return -1; } #endif /* choose bin to dump*/ bin_idx = 0; max_lpns = bins[0].count; LOG("dump bin %d (has %d lpns)\n", bin_idx, bins[bin_idx].count); /* Update mappings for all moved compressed LPNs */ for(i=0;i<MAX_COMPRESSED_LPN_PER_PPN;i++){ temp_bpn_idx = bins[bin_idx].bpns[i]; if(temp_bpn_idx <0){ assert(i>0); break; } b_lpn = bpns[temp_bpn_idx].lpn; LOG("b_lpn %d update new index %d", b_lpn, bpns[temp_bpn_idx].index); UPDATE_NEW_PAGE_MAPPING(b_lpn, new_ppn, bpns[temp_bpn_idx].ratio,// updates relevant valid_array entry as well bpns[temp_bpn_idx].index); // index //printf("update mapping for lpn %d raiot %.02f new_ppn %d\n", b_lpn, bpns[temp_bpn_idx].ratio, new_ppn); /* dont forget to update reelvant bpn*/ init_bpn(&bpns[temp_bpn_idx]); } /* init evicted bin, insert new bpn, and increment bin_idx */ init_bin(&bins[bin_idx]); /* insert to bin, this time with the leftover overflow_ratio*/ bpn_idx = find_empty_bpn(); assert(bpn_idx < MAX_COMPRESSED_LPN_PER_PPN*BIN_COUNT); LOG("update overflow index 1"); update_bin_bpn(bin_idx, bpn_idx, lpn, overflow_ratio, (overflow_ratio < ratio)?1:0 /*overflow -> index 1*/); /* finish writing */ if(!is_gc) INCREASE_IO_REQUEST_SEQ_NB(); #ifdef GC_ON if(!is_gc) GC_CHECK(CALC_FLASH(new_ppn), CALC_BLOCK(new_ppn)); #endif #ifdef MONITOR_ON char szTemp[1024]; sprintf(szTemp, "WRITE PAGE %d ", length); WRITE_LOG(szTemp); sprintf(szTemp, "WB CORRECT %d", 1); WRITE_LOG(szTemp); #endif return 1; }
int _FTL_READ(int32_t sector_nb, unsigned int length) { #ifdef FTL_DEBUG printf("[%s] Start: sector_nb %d length %u\n",__FUNCTION__, sector_nb, length); #endif if(sector_nb + length > SECTOR_NB){ printf("Error[FTL_READ] Exceed Sector number\n"); return FAIL; } int32_t lpn; int32_t *ppn; int32_t lba = sector_nb; unsigned int remain = length; unsigned int left_skip = sector_nb % SECTORS_PER_PAGE; unsigned int right_skip; unsigned int read_sects; unsigned int i, real_read_length = 0; unsigned int ret = FAIL; int read_page_nb = 0; int io_page_nb; while(remain > 0){ if(remain > SECTORS_PER_PAGE - left_skip){ right_skip = 0; } else{ right_skip = SECTORS_PER_PAGE - left_skip - remain; } read_sects = SECTORS_PER_PAGE - left_skip - right_skip; lpn = lba / (int32_t)SECTORS_PER_PAGE; ppn = GET_MAPPING_INFO(lpn); if(ppn[0] == -1){ return FAIL; } real_read_length += SECTORS_PER_PAGE; if(ppn[1] != -1){ real_read_length += SECTORS_PER_PAGE; } lba += read_sects; remain -= read_sects; left_skip = 0; } io_alloc_overhead = ALLOC_IO_REQUEST(sector_nb, real_read_length, READ, &io_page_nb); remain = length; lba = sector_nb; left_skip = sector_nb % SECTORS_PER_PAGE; while(remain > 0){ if(remain > SECTORS_PER_PAGE - left_skip){ right_skip = 0; } else{ right_skip = SECTORS_PER_PAGE - left_skip - remain; } read_sects = SECTORS_PER_PAGE - left_skip - right_skip; lpn = lba / (int32_t)SECTORS_PER_PAGE; ppn = GET_MAPPING_INFO(lpn); for(i=0; i<2; i++){ if(ppn[i] == -1){ #ifdef FTL_DEBUG printf("Error[%s] No Mapping info\n",__FUNCTION__); #endif /* its legal for 2nd index to be empty*/ if(i==1) break; } //LOG("lpn %d ppn %d", lpn, ppn); ret = SSD_PAGE_READ(CALC_FLASH(ppn[i]), CALC_BLOCK(ppn[i]), CALC_PAGE(ppn[i]), read_page_nb, READ, io_page_nb); if(ret == FAIL){ printf("Error[%s] %u page read fail \n", __FUNCTION__, ppn[i]); } read_page_nb++; } lba += read_sects; remain -= read_sects; left_skip = 0; } INCREASE_IO_REQUEST_SEQ_NB(); #ifdef MONITOR_ON char szTemp[1024]; sprintf(szTemp, "READ PAGE %d ", length); WRITE_LOG(szTemp); #endif #ifdef FTL_DEBUG printf("[%s] Complete\n",__FUNCTION__); #endif return ret; }
int MERGE_RP_BLOCKS(int32_t old_pbn, int32_t new_pbn) { int i; int32_t rp_pbn; block_state_entry* b_s_entry; int copy_page_nb = 0; int total_copy_page_nb = 0; block_state_entry* temp_b_s_entry; block_state_entry* root_b_s_entry = GET_BLOCK_STATE_ENTRY(old_pbn); rp_block_entry* rp_b_entry = root_b_s_entry->rp_head; int n_rp_blocks = root_b_s_entry->rp_count; /* Copy The Valid pages of the replacement blocks to new block */ for(i=0;i<n_rp_blocks;i++){ if(rp_b_entry==NULL){ printf("ERROR[%s] rp_b_entry has NULL pointer. \n", __FUNCTION__); return -1; } rp_pbn = rp_b_entry->pbn; copy_page_nb = COPY_VALID_PAGES(rp_pbn, new_pbn); total_copy_page_nb += copy_page_nb; /* Update the rp block table */ temp_b_s_entry = GET_BLOCK_STATE_ENTRY(rp_pbn); temp_b_s_entry->rp_root_pbn = -1; /* Update Metadata */ SSD_BLOCK_ERASE(CALC_FLASH(rp_pbn), CALC_BLOCK(rp_pbn)); temp_b_s_entry->erase_count++; UPDATE_BLOCK_STATE(rp_pbn, EMPTY_BLOCK); INSERT_EMPTY_BLOCK(rp_pbn); rp_b_entry = rp_b_entry->next; } /* Updat replacement block table */ if(n_rp_blocks == 1){ free(root_b_s_entry->rp_head); } if(n_rp_blocks == 2){ free(root_b_s_entry->rp_head->next); free(root_b_s_entry->rp_head); } root_b_s_entry->rp_head = NULL; root_b_s_entry->rp_count = 0; /* Copy The Valid pages of the original block to new block */ copy_page_nb = COPY_VALID_PAGES(old_pbn, new_pbn); SSD_BLOCK_ERASE(CALC_FLASH(old_pbn), CALC_BLOCK(old_pbn)); total_copy_page_nb += copy_page_nb; /* Update the original block Metadata */ root_b_s_entry->erase_count++; UPDATE_BLOCK_STATE(old_pbn, EMPTY_BLOCK); INSERT_EMPTY_BLOCK(old_pbn); return total_copy_page_nb; }
int _FTL_WRITE(int32_t sector_nb, unsigned int length) { #ifdef FTL_DEBUG printf("[%s] Start: sector_nb %d length %u\n",__FUNCTION__, sector_nb, length); #endif int io_page_nb; if(sector_nb + length > SECTOR_NB){ printf("Error[FTL_WRITE] Exceed Sector number\n"); return FAIL; } else{ io_alloc_overhead = ALLOC_IO_REQUEST(sector_nb, length, WRITE, &io_page_nb); } int32_t lba = sector_nb; int32_t lpn; int32_t new_ppn; unsigned int remain = length; unsigned int left_skip = sector_nb % SECTORS_PER_PAGE; unsigned int right_skip; unsigned int write_sects; unsigned int ret = FAIL; int write_page_nb=0; total_lpn_count += length / SECTORS_PER_PAGE; while(remain > 0){ if(remain > SECTORS_PER_PAGE - left_skip){ right_skip = 0; } else{ right_skip = SECTORS_PER_PAGE - left_skip - remain; } write_sects = SECTORS_PER_PAGE - left_skip - right_skip; ret = GET_NEW_PAGE(VICTIM_OVERALL, EMPTY_TABLE_ENTRY_NB, &new_ppn); if(ret == FAIL){ printf("ERROR[FTL_WRITE] Get new page fail \n"); return FAIL; } ret = SSD_PAGE_WRITE(CALC_FLASH(new_ppn), CALC_BLOCK(new_ppn), CALC_PAGE(new_ppn), write_page_nb, WRITE, io_page_nb); write_page_nb++; lpn = lba / (int32_t)SECTORS_PER_PAGE; UPDATE_OLD_PAGE_MAPPING(lpn); UPDATE_NEW_PAGE_MAPPING(lpn, new_ppn); #ifdef FTL_DEBUG if(ret == FAIL){ printf("Error[FTL_WRITE] %d page write fail \n", new_ppn); } #endif lba += write_sects; remain -= write_sects; left_skip = 0; } INCREASE_IO_REQUEST_SEQ_NB(); #ifdef GC_ON GC_CHECK(CALC_FLASH(new_ppn), CALC_BLOCK(new_ppn)); #endif #ifdef MONITOR_ON char szTemp[1024]; sprintf(szTemp, "WRITE PAGE %d ", length); WRITE_LOG(szTemp); sprintf(szTemp, "WB CORRECT %d", write_page_nb); WRITE_LOG(szTemp); #endif #ifdef FTL_DEBUG printf("[%s] Complete\n",__FUNCTION__); #endif return ret; }
int _FTL_OBJ_READ(object_id_t object_id, unsigned int offset, unsigned int length) { stored_object *object; page_node *current_page; int io_page_nb; int curr_io_page_nb; unsigned int ret = FAIL; object = lookup_object(object_id); // file not found if (object == NULL) return FAIL; // object not big enough if (object->size < (offset + length)) return FAIL; if(!(current_page = page_by_offset(object, offset))) { printf("Error[%s] %u lookup page by offset failed \n", __FUNCTION__, current_page->page_id); return FAIL; } // just calculate the overhead of allocating the request. io_page_nb will be the total number of pages we're gonna read io_alloc_overhead = ALLOC_IO_REQUEST(current_page->page_id * SECTORS_PER_PAGE, length, READ, &io_page_nb); for (curr_io_page_nb = 0; curr_io_page_nb < io_page_nb; curr_io_page_nb++) { // simulate the page read ret = SSD_PAGE_READ(CALC_FLASH(current_page->page_id), CALC_BLOCK(current_page->page_id), CALC_PAGE(current_page->page_id), curr_io_page_nb, READ, io_page_nb); // send a physical read action being done to the statistics gathering if (ret == SUCCESS) { FTL_STATISTICS_GATHERING(current_page->page_id, PHYSICAL_READ); } #ifdef FTL_DEBUG if (ret == FAIL) { printf("Error[%s] %u page read fail \n", __FUNCTION__, current_page->page_id); } #endif // get the next page current_page = current_page->next; } INCREASE_IO_REQUEST_SEQ_NB(); #ifdef MONITOR_ON char szTemp[1024]; sprintf(szTemp, "READ PAGE %d ", length); WRITE_LOG(szTemp); #endif #ifdef FTL_DEBUG printf("[%s] Complete\n",__FUNCTION__); #endif return ret; }