int _FTL_OBJ_COPYBACK(int32_t source, int32_t destination) { page_node *source_p; source_p = lookup_page(source); // source_p can be NULL if the GC is working on some old pages that belonged to an object we deleted already if (source_p != NULL) { // invalidate the source page UPDATE_INVERSE_BLOCK_VALIDITY(CALC_FLASH(source), CALC_BLOCK(source), CALC_PAGE(source), INVALID); // mark new page as valid and used UPDATE_NEW_PAGE_MAPPING_NO_LOGICAL(destination); // change the object's page mapping to the new page HASH_DEL(global_page_table, source_p); source_p->page_id = destination; HASH_ADD_INT(global_page_table, page_id, source_p); } #ifdef FTL_DEBUG else { printf("Warning[%s] %u copyback page not mapped to an object \n", __FUNCTION__, source); } #endif return SUCCESS; }
int UPDATE_NEW_PAGE_MAPPING(int32_t lpn, int32_t ppn, float ratio) { /* Update Page Mapping Table */ mapping_table[lpn] = ppn; LOG("Update lpn %d new ppn %d", lpn, ppn); /* Update Inverse Page Mapping Table */ UPDATE_INVERSE_BLOCK_VALIDITY(CALC_FLASH(ppn), CALC_BLOCK(ppn), CALC_PAGE(ppn), VALID); UPDATE_INVERSE_BLOCK_MAPPING(CALC_FLASH(ppn), CALC_BLOCK(ppn), DATA_BLOCK); UPDATE_INVERSE_PAGE_MAPPING(ppn, NEW_PPN, lpn); return SUCCESS; }
int UPDATE_INVERSE_BLOCK_MAPPING(unsigned int phy_flash_nb, unsigned int phy_block_nb, int type) { int i; inverse_block_mapping_entry* mapping_entry = GET_INVERSE_BLOCK_MAPPING_ENTRY(phy_flash_nb, phy_block_nb); mapping_entry->type = type; if(type == EMPTY_BLOCK){ for(i=0;i<PAGE_NB;i++){ UPDATE_INVERSE_BLOCK_VALIDITY(phy_flash_nb, phy_block_nb, i, 0); } } return SUCCESS; }
int UPDATE_OLD_PAGE_MAPPING(int32_t lpn) { int32_t old_ppn; old_ppn = GET_MAPPING_INFO(lpn); if(old_ppn == -1){ #ifdef FTL_DEBUG //printf("[%s] New page \n",__FUNCTION__); #endif //LOG("Wrote new page for lpn %d (old_ppn %d)", lpn, old_ppn); return SUCCESS; } else{ LOG("Rewrote lpn %d (old_ppn %d)", lpn, old_ppn); UPDATE_INVERSE_BLOCK_VALIDITY(CALC_FLASH(old_ppn), CALC_BLOCK(old_ppn), CALC_PAGE(old_ppn), INVALID); //LOG("invalidated old_ppn=%d", old_ppn); UPDATE_INVERSE_PAGE_MAPPING(old_ppn, INVALIDATE, lpn); } return SUCCESS; }
int UPDATE_OLD_PAGE_MAPPING(int32_t lpn, int index) { int32_t *old_ppn; old_ppn = GET_MAPPING_INFO(lpn); if(old_ppn[index] == -1) { #ifdef FTL_DEBUG printf("[%s] New page \n",__FUNCTION__); #endif LOG("Wrote new page for lpn %d index %d (old_ppn %d)", lpn, index, old_ppn[index]); return SUCCESS; } else { LOG("Rewrote lpn %d index %d (old_ppn %d)", lpn, index, old_ppn[index]); UPDATE_INVERSE_BLOCK_VALIDITY(CALC_FLASH(old_ppn[index]), CALC_BLOCK(old_ppn[index]), CALC_PAGE(old_ppn[index]), INVALID, ratio_table[lpn][index]); UPDATE_INVERSE_PAGE_MAPPING(old_ppn[index], INVALIDATE, lpn, index); old_ppn[index] = -1; // this portion of the lpn is now empty } return SUCCESS; }
int UPDATE_NEW_PAGE_MAPPING(int32_t lpn, int32_t ppn, double ratio, int index) { /* Update Page Mapping Table */ mapping_table[lpn][index] = ppn; /* update rewrites */ assert(rewrites[lpn]<MAX_WOM_REWRITES); rewrites[lpn]++; if(rewrites[lpn]==2 && ratio_table[lpn][0] * WOM_FACTOR -1.0 >= 10e-7){ printf("ERROR: rewriting too much. ratio old %lf new %lf. old * WOM_FACTOR %lf\n",ratio_table[lpn][index], ratio, ratio_table[lpn][0] * WOM_FACTOR); assert(ratio * WOM_FACTOR -1.0 < 10e-7); } ratio_table[lpn][index] = ratio; //LOG("Update lpn %d index %d new ppn %d", lpn, index, ppn); /* Update Inverse Page Mapping Table */ UPDATE_INVERSE_BLOCK_VALIDITY(CALC_FLASH(ppn), CALC_BLOCK(ppn), CALC_PAGE(ppn), VALID, ratio); UPDATE_INVERSE_BLOCK_MAPPING(CALC_FLASH(ppn), CALC_BLOCK(ppn), DATA_BLOCK); if(rewrites[lpn]==1) UPDATE_INVERSE_PAGE_MAPPING(ppn, NEW_PPN, lpn, index); return SUCCESS; }
int remove_object(stored_object *object) { page_node *current_page; page_node *invalidated_page; // object could not exist in the hashtable yet because it could just be cleanup in case create_object failed // if we do perform HASH_DEL on an object that is not in the hashtable, the whole hashtable will be deleted if (object->hh.tbl != NULL) HASH_DEL(objects_table, object); current_page = object->pages; while (current_page != NULL) { // invalidate the physical page and update its mapping UPDATE_INVERSE_BLOCK_VALIDITY(CALC_FLASH(current_page->page_id), CALC_BLOCK(current_page->page_id), CALC_PAGE(current_page->page_id), INVALID); #ifdef GC_ON // should we really perform GC for every page? we know we are invalidating a lot of them now... GC_CHECK(CALC_FLASH(current_page->page_id), CALC_BLOCK(current_page->page_id), true); #endif // get next page and free the current one invalidated_page = current_page; current_page = current_page->next; if (invalidated_page->hh.tbl != NULL) HASH_DEL(global_page_table, invalidated_page); free(invalidated_page); } // free the object's memory free(object); return SUCCESS; }
int _FTL_OBJ_WRITE(object_id_t object_id, unsigned int offset, unsigned int length) { stored_object *object; page_node *current_page = NULL,*temp_page; uint32_t page_id; int io_page_nb; int curr_io_page_nb; unsigned int ret = FAIL; object = lookup_object(object_id); // file not found if (object == NULL) return FAIL; // calculate the overhead of allocating the request. io_page_nb will be the total number of pages we're gonna write io_alloc_overhead = ALLOC_IO_REQUEST(offset, length, WRITE, &io_page_nb); // if the offset is past the current size of the stored_object we need to append new pages until we can start writing while (offset > object->size) { if (GET_NEW_PAGE(VICTIM_OVERALL, EMPTY_TABLE_ENTRY_NB, &page_id) == FAIL) { // not enough memory presumably printf("ERROR[FTL_WRITE] Get new page fail \n"); return FAIL; } if(!add_page(object, page_id)) return FAIL; // mark new page as valid and used UPDATE_NEW_PAGE_MAPPING_NO_LOGICAL(page_id); } for (curr_io_page_nb = 0; curr_io_page_nb < io_page_nb; curr_io_page_nb++) { // if this is the first iteration we need to find the page by offset, otherwise we can go with the page chain if (current_page == NULL) current_page = page_by_offset(object, offset); else current_page = current_page->next; // get the pge we'll be writing to if (GET_NEW_PAGE(VICTIM_OVERALL, EMPTY_TABLE_ENTRY_NB, &page_id) == FAIL) { printf("ERROR[FTL_WRITE] Get new page fail \n"); return FAIL; } if((temp_page=lookup_page(page_id))) { printf("ERROR[FTL_WRITE] Object %lu already contains page %d\n",temp_page->object_id,page_id); return FAIL; } // mark new page as valid and used UPDATE_NEW_PAGE_MAPPING_NO_LOGICAL(page_id); if (current_page == NULL) // writing at the end of the object and need to allocate more space for it { current_page = add_page(object, page_id); if(!current_page) return FAIL; } else // writing over parts of the object { // invalidate the old physical page and replace the page_node's page UPDATE_INVERSE_BLOCK_VALIDITY(CALC_FLASH(current_page->page_id), CALC_BLOCK(current_page->page_id), CALC_PAGE(current_page->page_id), INVALID); UPDATE_INVERSE_PAGE_MAPPING(current_page->page_id, -1); HASH_DEL(global_page_table, current_page); current_page->page_id = page_id; HASH_ADD_INT(global_page_table, page_id, current_page); } #ifdef GC_ON // must improve this because it is very possible that we will do multiple GCs on the same flash chip and block // probably gonna add an array to hold the unique ones and in the end GC all of them GC_CHECK(CALC_FLASH(current_page->page_id), CALC_BLOCK(current_page->page_id), false); #endif ret = SSD_PAGE_WRITE(CALC_FLASH(page_id), CALC_BLOCK(page_id), CALC_PAGE(page_id), curr_io_page_nb, WRITE, io_page_nb); // send a physical write action being done to the statistics gathering if (ret == SUCCESS) { FTL_STATISTICS_GATHERING(page_id , PHYSICAL_WRITE); } #ifdef FTL_DEBUG if (ret == FAIL) { printf("Error[FTL_WRITE] %d page write fail \n", page_id); } #endif // page_node *page; // printf("Object page map:{"); // for(page=object->pages; page; page=page->next) // printf("%d->",page->page_id); // printf("}\n"); } INCREASE_IO_REQUEST_SEQ_NB(); #ifdef MONITOR_ON char szTemp[1024]; sprintf(szTemp, "WRITE PAGE %d ", length); WRITE_LOG(szTemp); sprintf(szTemp, "WB CORRECT %d", curr_io_page_nb); WRITE_LOG(szTemp); #endif #ifdef FTL_DEBUG printf("[%s] Complete\n",__FUNCTION__); #endif return ret; }