static UINT32 get_free_page(UINT32 const bank) { // This function returns the row address for write operation. UINT32 row; UINT32 vblk_offset, page_offset; row = g_misc_meta[bank].g_target_row; vblk_offset = row / PAGES_PER_VBLK; page_offset = row % PAGES_PER_VBLK; if (page_offset == 0) // We are going to write to a new vblock. { while (is_bad_block(bank, vblk_offset) && vblk_offset < VBLKS_PER_BANK) { vblk_offset++; // We have to skip bad vblocks. } } if (vblk_offset >= VBLKS_PER_BANK) { // Free vblocks are exhausted. Since this example FTL does not do garbage collection, // no more data can be written to this SSD. The SSD stops working now. led (1); while (1); } row = vblk_offset * PAGES_PER_VBLK + page_offset; g_misc_meta[bank].g_target_row = row + 1; return row; }
static void format(void) { // This function is called upon the very first power-up of the SSD. // This function does the low-level format (i.e. FTL level format) of SSD. // A typical FTL would create its mapping table and the list of free blocks. // However, this example does nothing more than erasing all the free blocks. // // This function may take a long time to complete. For example, erasing all the flash blocks can // take more than ten seconds depending on the total density. // In that case, the host will declare time-out error. (no response from SSD for a long time) // A suggested solution to this problem is: // When you power-up the SSD for the first time, connect the power cable but not the SATA cable. // At the end of this function, you can put a call to led(1) to indicate that the low level format // has been completed. When the LED is on, turn off the power, connect the SATA cable, and turn on // the power again. UINT32 vblk_offset, bank; for (vblk_offset = 1; vblk_offset < VBLKS_PER_BANK; vblk_offset++) { for (bank = 0; bank < NUM_BANKS; bank++) { if (is_bad_block(bank, vblk_offset)) continue; // You do not need to set the values of FCP_DMA_ADDR, FCP_DMA_CNT and FCP_COL for FC_ERASE. SETREG(FCP_CMD, FC_ERASE); SETREG(FCP_BANK, REAL_BANK(bank)); SETREG(FCP_OPTION, FO_P); SETREG(FCP_ROW_L(bank), vblk_offset * PAGES_PER_VBLK); SETREG(FCP_ROW_H(bank), vblk_offset * PAGES_PER_VBLK); // You should not issue a new command when Waiting Room is not empty. while ((GETREG(WR_STAT) & 0x00000001) != 0); // By writing any value to FCP_ISSUE, you put FC_ERASE into Waiting Room. // The value written to FCP_ISSUE does not have any meaning. SETREG(FCP_ISSUE, NULL); } } // In general, write_format_mark() should be called upon completion of low level format in order to prevent // format() from being called again. // However, since the tutorial FTL does not support power off recovery, // format() should be called every time. init_meta_data(); ftl_flush(); write_format_mark(); led(1); }
static void format(void) { UINT32 bank, vblock, vcount_val; ASSERT(NUM_MISC_META_SECT > 0); ASSERT(NUM_VCOUNT_SECT > 0); uart_printf("Total FTL DRAM metadata size: %d KB", DRAM_BYTES_OTHER / 1024); uart_printf("VBLKS_PER_BANK: %d", VBLKS_PER_BANK); uart_printf("LBLKS_PER_BANK: %d", NUM_LPAGES / PAGES_PER_BLK / NUM_BANKS); uart_printf("META_BLKS_PER_BANK: %d", META_BLKS_PER_BANK); //---------------------------------------- // initialize DRAM metadata //---------------------------------------- mem_set_dram(PAGE_MAP_ADDR, NULL, PAGE_MAP_BYTES); mem_set_dram(VCOUNT_ADDR, NULL, VCOUNT_BYTES); //---------------------------------------- // erase all blocks except vblock #0 //---------------------------------------- for (vblock = MISCBLK_VBN; vblock < VBLKS_PER_BANK; vblock++) { for (bank = 0; bank < NUM_BANKS; bank++) { vcount_val = VC_MAX; if (is_bad_block(bank, vblock) == FALSE) { nand_block_erase(bank, vblock); vcount_val = 0; } write_dram_16(VCOUNT_ADDR + ((bank * VBLKS_PER_BANK) + vblock) * sizeof(UINT16), vcount_val); } } //---------------------------------------- // initialize SRAM metadata //---------------------------------------- init_metadata_sram(); // flush metadata to NAND logging_pmap_table(); logging_misc_metadata(); write_format_mark(); led(1); uart_print("format complete"); }
//------------------------------------------------------------- // Victim selection policy: Greedy // // Select the block which contain minumum valid pages //------------------------------------------------------------- static UINT32 get_vt_vblock(UINT32 const bank) { ASSERT(bank < NUM_BANKS); UINT32 vblock; // search the block which has mininum valid pages vblock = mem_search_min_max(VCOUNT_ADDR + (bank * VBLKS_PER_BANK * sizeof(UINT16)), sizeof(UINT16), VBLKS_PER_BANK, MU_CMD_SEARCH_MIN_DRAM); ASSERT(is_bad_block(bank, vblock) == FALSE); ASSERT(vblock >= META_BLKS_PER_BANK && vblock < VBLKS_PER_BANK); ASSERT(get_vcount(bank, vblock) < (PAGES_PER_BLK - 1)); return vblock; }
void ftl_open(void) { sanity_check(); // STEP 1 - read scan lists from NAND flash scan_list_t* scan_list = (scan_list_t*) SCAN_LIST_ADDR; UINT32 bank; UINT32 bad_block, i , j ; // Since we are going to check the flash interrupt flags within this function, ftl_isr() should not be called. disable_irq(); flash_clear_irq(); // clear any flash interrupt flags that might have been set for (bank = 0; bank < NUM_BANKS; bank++) { //g_misc_meta[bank].g_merge_buff_sect = 0; SETREG(FCP_CMD, FC_COL_ROW_READ_OUT); // FC_COL_ROW_READ_OUT = sensing and data output SETREG(FCP_OPTION, FO_E); // scan list was written in 1-plane mode by install.exe, so there is no FO_P SETREG(FCP_DMA_ADDR, scan_list + bank); // target address should be DRAM or SRAM (see flash.h for rules) SETREG(FCP_DMA_CNT, SCAN_LIST_SIZE); // number of bytes for data output SETREG(FCP_COL, 0); SETREG(FCP_ROW_L(bank), SCAN_LIST_PAGE_OFFSET); // scan list was written to this position by install.exe SETREG(FCP_ROW_H(bank), SCAN_LIST_PAGE_OFFSET); // Tutorial FTL always uses the same row addresses for high chip and low chip flash_issue_cmd(bank, RETURN_ON_ISSUE); // Take a look at the source code of flash_issue_cmd() now. } // This while() statement waits the last issued command to be accepted. // If bit #0 of WR_STAT is one, a flash command is in the Waiting Room, because the target bank has not accepted it yet. while ((GETREG(WR_STAT) & 0x00000001) != 0); // Now, FC_COL_ROW_READ_OUT commands are accepted by all the banks. // Before checking whether scan lists are corrupted or not, we have to wait the completion of read operations. // This code shows how to wait for ALL the banks to become idle. while (GETREG(MON_CHABANKIDLE) != 0); // Now we can check the flash interrupt flags. for (bank = 0; bank < NUM_BANKS; bank++) { UINT32 num_entries = NULL; UINT32 result = OK; if (BSP_INTR(bank) & FIRQ_DATA_CORRUPT) { // Too many bits are corrupted so that they cannot be corrected by ECC. result = FAIL; } else { // Even though the scan list is not corrupt, we have to check whether its contents make sense. UINT32 i; num_entries = read_dram_16(&(scan_list[bank].num_entries)); if (num_entries > SCAN_LIST_ITEMS) { result = FAIL; // We cannot trust this scan list. Perhaps a software bug. } else { for (i = 0; i < num_entries; i++) { UINT16 entry = read_dram_16(&(scan_list[bank].list[i])); UINT16 pblk_offset = entry & 0x7FFF; if (pblk_offset == 0 || pblk_offset >= PBLKS_PER_BANK) { #if OPTION_REDUCED_CAPACITY == FALSE result = FAIL; // We cannot trust this scan list. Perhaps a software bug. #endif } else { // Bit position 15 of scan list entry is high-chip/low-chip flag. // Remove the flag in order to make is_bad_block() simple. write_dram_16(&(scan_list[bank].list[i]), pblk_offset); } } } } if (result == FAIL) { mem_set_dram(scan_list + bank, 0, SCAN_LIST_SIZE); g_misc_meta[bank].g_scan_list_entries = 0; } else { write_dram_16(&(scan_list[bank].num_entries), 0); g_misc_meta[bank].g_scan_list_entries = num_entries; } } // STEP 2 - If necessary, do low-level format // format() should be called after loading scan lists, because format() calls is_bad_block(). init_meta_data(); // save non bad block list for metadata block // block#0 : list, block#1 : misc meta // block#2 ~ map table meta and data for(i = 0 ;i < NUM_BANKS;i++){ bad_block = 2; for(j = 0 ;j < NUM_BANKS_MAX;j++){ while(is_bad_block(i, bad_block) && j < VBLKS_PER_BANK) { bad_block++; } g_bad_list[i][j] = bad_block++; } g_free_start[i] = g_bad_list[i][NUM_BANKS_MAX-1] + 1; } //if (check_format_mark() == FALSE) if( TRUE) { // When ftl_open() is called for the first time (i.e. the SSD is powered up the first time) // format() is called. format(); } else{ loading_misc_meta(); } //*Red// // STEP 3 - initialize sector mapping table pieces // The page mapping table is too large to fit in SRAM and DRAM. // gyuhwa // init_metadata(); // STEP 4 - initialize global variables that belong to FTL g_ftl_read_buf_id = 0; g_ftl_write_buf_id = 0; for (bank = 0; bank < NUM_BANKS; bank++) { g_misc_meta[bank].g_target_row = PAGES_PER_VBLK * (g_free_start[bank]); } flash_clear_irq(); // This example FTL can handle runtime bad block interrupts and read fail (uncorrectable bit errors) interrupts SETREG(INTR_MASK, FIRQ_DATA_CORRUPT | FIRQ_BADBLK_L | FIRQ_BADBLK_H); SETREG(FCONF_PAUSE, FIRQ_DATA_CORRUPT | FIRQ_BADBLK_L | FIRQ_BADBLK_H); enable_irq(); }
static void init_metadata_sram(void) { UINT32 bank; UINT32 vblock; UINT32 mapblk_lbn; //---------------------------------------- // initialize misc. metadata //---------------------------------------- for (bank = 0; bank < NUM_BANKS; bank++) { g_misc_meta[bank].free_blk_cnt = VBLKS_PER_BANK - META_BLKS_PER_BANK; g_misc_meta[bank].free_blk_cnt -= get_bad_blk_cnt(bank); // NOTE: vblock #0,1 don't use for user space write_dram_16(VCOUNT_ADDR + ((bank * VBLKS_PER_BANK) + 0) * sizeof(UINT16), VC_MAX); write_dram_16(VCOUNT_ADDR + ((bank * VBLKS_PER_BANK) + 1) * sizeof(UINT16), VC_MAX); //---------------------------------------- // assign misc. block //---------------------------------------- // assumption: vblock #1 = fixed location. // Thus if vblock #1 is a bad block, it should be allocate another block. set_miscblk_vpn(bank, MISCBLK_VBN * PAGES_PER_BLK - 1); ASSERT(is_bad_block(bank, MISCBLK_VBN) == FALSE); vblock = MISCBLK_VBN; //---------------------------------------- // assign map block //---------------------------------------- mapblk_lbn = 0; while (mapblk_lbn < MAPBLKS_PER_BANK) { vblock++; ASSERT(vblock < VBLKS_PER_BANK); if (is_bad_block(bank, vblock) == FALSE) { set_mapblk_vpn(bank, mapblk_lbn, vblock * PAGES_PER_BLK); write_dram_16(VCOUNT_ADDR + ((bank * VBLKS_PER_BANK) + vblock) * sizeof(UINT16), VC_MAX); mapblk_lbn++; } } //---------------------------------------- // assign free block for gc //---------------------------------------- do { vblock++; // NOTE: free block should not be secleted as a victim @ first GC write_dram_16(VCOUNT_ADDR + ((bank * VBLKS_PER_BANK) + vblock) * sizeof(UINT16), VC_MAX); // set free block set_gc_vblock(bank, vblock); ASSERT(vblock < VBLKS_PER_BANK); }while(is_bad_block(bank, vblock) == TRUE); //---------------------------------------- // assign free vpn for first new write //---------------------------------------- do { vblock++; // 현재 next vblock부터 새로운 데이터를 저장을 시작 set_new_write_vpn(bank, vblock * PAGES_PER_BLK); ASSERT(vblock < VBLKS_PER_BANK); }while(is_bad_block(bank, vblock) == TRUE); } }
//------------------------------------------------------------ // if all blocks except one free block are full, // do garbage collection for making at least one free page //------------------------------------------------------------- static void garbage_collection(UINT32 const bank) { ASSERT(bank < NUM_BANKS); g_ftl_statistics[bank].gc_cnt++; UINT32 src_lpn; UINT32 vt_vblock; UINT32 free_vpn; UINT32 vcount; // valid page count in victim block UINT32 src_page; UINT32 gc_vblock; g_ftl_statistics[bank].gc_cnt++; vt_vblock = get_vt_vblock(bank); // get victim block vcount = get_vcount(bank, vt_vblock); gc_vblock = get_gc_vblock(bank); free_vpn = gc_vblock * PAGES_PER_BLK; /* uart_printf("garbage_collection bank %d, vblock %d",bank, vt_vblock); */ ASSERT(vt_vblock != gc_vblock); ASSERT(vt_vblock >= META_BLKS_PER_BANK && vt_vblock < VBLKS_PER_BANK); ASSERT(vcount < (PAGES_PER_BLK - 1)); ASSERT(get_vcount(bank, gc_vblock) == VC_MAX); ASSERT(!is_bad_block(bank, gc_vblock)); // 1. load p2l list from last page offset of victim block (4B x PAGES_PER_BLK) // fix minor bug nand_page_ptread(bank, vt_vblock, PAGES_PER_BLK - 1, 0, ((sizeof(UINT32) * PAGES_PER_BLK + BYTES_PER_SECTOR - 1 ) / BYTES_PER_SECTOR), FTL_BUF(bank), RETURN_WHEN_DONE); mem_copy(g_misc_meta[bank].lpn_list_of_cur_vblock, FTL_BUF(bank), sizeof(UINT32) * PAGES_PER_BLK); // 2. copy-back all valid pages to free space for (src_page = 0; src_page < (PAGES_PER_BLK - 1); src_page++) { // get lpn of victim block from a read lpn list src_lpn = get_lpn(bank, src_page); CHECK_VPAGE(get_vpn(src_lpn)); // determine whether the page is valid or not if (get_vpn(src_lpn) != ((vt_vblock * PAGES_PER_BLK) + src_page)) { // invalid page continue; } ASSERT(get_lpn(bank, src_page) != INVALID); CHECK_LPAGE(src_lpn); // if the page is valid, // then do copy-back op. to free space nand_page_copyback(bank, vt_vblock, src_page, free_vpn / PAGES_PER_BLK, free_vpn % PAGES_PER_BLK); ASSERT((free_vpn / PAGES_PER_BLK) == gc_vblock); // update metadata set_vpn(src_lpn, free_vpn); set_lpn(bank, (free_vpn % PAGES_PER_BLK), src_lpn); free_vpn++; } #if OPTION_ENABLE_ASSERT if (vcount == 0) { ASSERT(free_vpn == (gc_vblock * PAGES_PER_BLK)); } #endif // 3. erase victim block nand_block_erase(bank, vt_vblock); ASSERT((free_vpn % PAGES_PER_BLK) < (PAGES_PER_BLK - 2)); ASSERT((free_vpn % PAGES_PER_BLK == vcount)); /* uart_printf("gc page count : %d", vcount); */ // 4. update metadata set_vcount(bank, vt_vblock, VC_MAX); set_vcount(bank, gc_vblock, vcount); set_new_write_vpn(bank, free_vpn); // set a free page for new write set_gc_vblock(bank, vt_vblock); // next free block (reserve for GC) dec_full_blk_cnt(bank); // decrease full block count /* uart_print("garbage_collection end"); */ }
static void read_sector(di_t *img, int track, int sector, uint8_t *buf) { memmove(buf, img->image + img->di.LBA(track, sector) * 256, 256); if (img->error_table) if (is_bad_block(img->error_table[img->di.LBA(track, sector)])) { bad_relfile = true; } }
static void init_metadata_sram(void) { UINT32 bank; UINT32 vblock; UINT32 mapblk_lbn; UINT32 index; for(index = 0; index < CMT_SIZE; index++) { cmt[index].lpn = INVALID; cmt[index].sc = FALSE; } //---------------------------------------- // initialize misc. metadata //---------------------------------------- for (bank = 0; bank < NUM_BANKS; bank++) { for(index = 0; index < GTD_SIZE_PER_BANK; index++) { gtd[bank][index] = INVALID; } uart_printf("bank %d bad blk cnt %d", bank, get_bad_blk_cnt(bank)); g_misc_meta[bank].free_blk_cnt = VBLKS_PER_BANK - META_BLKS_PER_BANK; g_misc_meta[bank].free_blk_cnt -= get_bad_blk_cnt(bank); // NOTE: vblock #0,1 don't use for user space write_dram_16(VCOUNT_ADDR + ((bank * VBLKS_PER_BANK) + 0) * sizeof(UINT16), VC_MAX); write_dram_16(VCOUNT_ADDR + ((bank * VBLKS_PER_BANK) + 1) * sizeof(UINT16), VC_MAX); write_dram_16(VCOUNT_ADDR + ((bank * VBLKS_PER_BANK) + 2) * sizeof(UINT16), VC_MAX); vblock = 0; //---------------------------------------- // assign map block //---------------------------------------- mapblk_lbn = 0; while (mapblk_lbn < MAPBLKS_PER_BANK) { vblock++; ASSERT(vblock < VBLKS_PER_BANK); if (is_bad_block(bank, vblock) == FALSE) { map_blk[bank][mapblk_lbn] = vblock; write_dram_16(VCOUNT_ADDR + ((bank * VBLKS_PER_BANK) + vblock) * sizeof(UINT16), VC_MAX); mapblk_lbn++; } } set_new_map_write_vpn(bank, map_blk[bank][0] * PAGES_PER_BLK); //---------------------------------------- // assign free block for gc //---------------------------------------- do { vblock++; // NOTE: free block should not be secleted as a victim @ first GC write_dram_16(VCOUNT_ADDR + ((bank * VBLKS_PER_BANK) + vblock) * sizeof(UINT16), VC_MAX); // set free block set_gc_vblock(bank, vblock); ASSERT(vblock < VBLKS_PER_BANK); }while(is_bad_block(bank, vblock) == TRUE); //---------------------------------------- // assign free vpn for first new write //---------------------------------------- do { vblock++; // 현재 next vblock부터 새로운 데이터를 저장을 시작 set_new_write_vpn(bank, vblock * PAGES_PER_BLK); ASSERT(vblock < VBLKS_PER_BANK); }while(is_bad_block(bank, vblock) == TRUE); } }