void nand_page_program(UINT32 const bank, UINT32 const vblock, UINT32 const page_num, UINT32 const buf_addr, UINT32 const issue_flag) { #if PrintStats uart_print_level_1("FP "); uart_print_level_1_int(SECTORS_PER_PAGE); uart_print_level_1("\r\n"); #endif totSecWrites += SECTORS_PER_PAGE; UINT32 row; ASSERT(bank < NUM_BANKS); ASSERT(vblock < VBLKS_PER_BANK); ASSERT(page_num < PAGES_PER_BLK); row = (vblock * PAGES_PER_BLK) + page_num; uart_print("nand_page_program bank="); uart_print_int(bank); uart_print(", vblock="); uart_print_int(vblock); uart_print(", page="); uart_print_int(page_num); uart_print("\r\n"); uart_print("Writing row="); uart_print_int(row); uart_print("\r\n"); SETREG(FCP_CMD, FC_COL_ROW_IN_PROG); SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY); SETREG(FCP_DMA_ADDR, buf_addr); SETREG(FCP_DMA_CNT, BYTES_PER_PAGE); SETREG(FCP_COL, 0); SETREG(FCP_ROW_L(bank), row); SETREG(FCP_ROW_H(bank), row); //flash_issue_cmd(bank, RETURN_WHEN_DONE); //flash_issue_cmd(bank, RETURN_ON_ACCEPT); //flash_issue_cmd(bank, RETURN_ON_ISSUE); flash_issue_cmd(bank, issue_flag); }
void flush_smt_piece(UINT32 idx) { UINT32 bank,row,block; UINT32 dest; bank = smt_dram_map[idx] / NUM_BANKS_MAX; block = smt_dram_map[idx] % NUM_BANKS_MAX; if((smt_bit_map[bank] & (1<<block)) != 0){ // smt piece data if( g_misc_meta[bank].smt_pieces[block] >= SMT_LIMIT - 1){ // erase nand_block_erase(bank,g_bad_list[bank][block]); } //update and flash g_misc_meta[bank].smt_pieces[block] = (g_misc_meta[bank].smt_pieces[block] + 1) % SMT_LIMIT; row = g_misc_meta[bank].smt_pieces[block] * SMT_INC_SIZE + ( PAGES_PER_VBLK * g_bad_list[bank][block]); // flash map data to nand SETREG(FCP_CMD, FC_COL_ROW_IN_PROG); SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY); SETREG(FCP_COL,0); SETREG(FCP_ROW_L(bank),row); SETREG(FCP_ROW_H(bank),row); dest = SMT_ADDR + (idx * SMT_PIECE_BYTES); SETREG(FCP_DMA_ADDR,dest); SETREG(FCP_DMA_CNT, SMT_PIECE_BYTES); while(_BSP_FSM(bank) != BANK_IDLE) { bank = bank; } flash_issue_cmd(bank,RETURN_WHEN_DONE); } smt_piece_map[smt_dram_map[idx]] = (UINT32)-1; }
// synchronous one full page read void nand_page_read(UINT32 const bank, UINT32 const vblock, UINT32 const page_num, UINT32 const buf_addr) { #if PrintStats uart_print_level_1("FR "); uart_print_level_1_int(SECTORS_PER_PAGE); uart_print_level_1("\r\n"); #endif UINT32 row; ASSERT(bank < NUM_BANKS); ASSERT(vblock < VBLKS_PER_BANK); ASSERT(page_num < PAGES_PER_BLK); row = (vblock * PAGES_PER_BLK) + page_num; // row means ppn uart_print("nand_page_read bank="); uart_print_int(bank); uart_print(", vblock="); uart_print_int(vblock); uart_print(", page="); uart_print_int(page_num); uart_print(", dst_addr="); uart_print_int(buf_addr); uart_print("\r\n"); uart_print("Reading row="); uart_print_int(row); uart_print("\r\n"); SETREG(FCP_CMD, FC_COL_ROW_READ_OUT); SETREG(FCP_OPTION, FO_P | FO_E); SETREG(FCP_DMA_ADDR, buf_addr); SETREG(FCP_DMA_CNT, BYTES_PER_PAGE); SETREG(FCP_COL, 0); SETREG(FCP_ROW_L(bank), row); SETREG(FCP_ROW_H(bank), row); flash_issue_cmd(bank, RETURN_WHEN_DONE); }
// General purpose page read function // synchronous page read (for reading metadata) // asynchronous page read (left/right hole async read user data) void nand_page_ptread(UINT32 const bank, UINT32 const vblock, UINT32 const page_num, UINT32 const sect_offset, UINT32 const num_sectors, UINT32 const buf_addr, UINT32 const issue_flag) { UINT32 row; /* uart_printf("--ptread: bank %d vblock %d page_num %d sect_offset %d, num_sectors %d", bank, vblock, page_num, sect_offset, num_sectors); */ ASSERT(bank < NUM_BANKS); ASSERT(vblock < VBLKS_PER_BANK); ASSERT(page_num < PAGES_PER_BLK); // row means ppn row = (vblock * PAGES_PER_BLK) + page_num; SETREG(FCP_CMD, FC_COL_ROW_READ_OUT); SETREG(FCP_OPTION, FO_P | FO_E); SETREG(FCP_DMA_ADDR, buf_addr); SETREG(FCP_DMA_CNT, num_sectors * BYTES_PER_SECTOR); SETREG(FCP_COL, sect_offset); SETREG(FCP_ROW_L(bank), row); SETREG(FCP_ROW_H(bank), row); // issue_flag: // RETURN_ON_ISSUE, RETURN_WHEN_DONE, RETURN_ON_ACCEPT flash_issue_cmd(bank, issue_flag); }
void flush_smt_piece(UINT32 idx) { UINT32 bank,row,block; bank = smt_dram_map[idx] / NUM_BANKS_MAX; block = smt_dram_map[idx] % NUM_BANKS_MAX; if((smt_bit_map[bank] & (1<<block)) != 0){ // smt piece data if( g_misc_meta[bank].smt_pieces[block] >= SMT_LIMIT - 1){ // erase nand_block_erase(bank,g_bad_list[bank][block]); } //update and flash g_misc_meta[bank].smt_pieces[block] = (g_misc_meta[bank].smt_pieces[block] + SMT_INC_SIZE) % SMT_LIMIT; row = (g_misc_meta[bank].smt_pieces[block] * SMT_PIECE_BYTES); row = ((row + BYTES_PER_PAGE -1 ) / BYTES_PER_PAGE) + (PAGES_PER_VBLK * g_bad_list[bank][block]); // flash map data to nand SETREG(FCP_CMD, FC_COL_ROW_IN_PROG); SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY); SETREG(FCP_DMA_ADDR,SMT_ADDR + (g_smt_victim * SMT_PIECE_BYTES)); SETREG(FCP_DMA_CNT, SMT_PIECE_BYTES); SETREG(FCP_COL,0); SETREG(FCP_ROW_L(bank),row); SETREG(FCP_ROW_H(bank),row); flash_issue_cmd(bank,RETURN_ON_ISSUE); } smt_dram_bit[bank] ^= ( 1 <<block ); }
void flush_merge_buffer() { UINT32 new_row, new_psn; UINT32 new_bank = g_target_bank; int i; if( g_target_sect != 0 ){ // get free page from target bank new_row = get_free_page(new_bank); // set registers to write a data to nand flash memory SETREG(FCP_CMD, FC_COL_ROW_IN_PROG); SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY); // Address is merge buffer address which contains actual data SETREG(FCP_DMA_ADDR, MERGE_BUFFER_ADDR + new_bank * BYTES_PER_PAGE); SETREG(FCP_DMA_CNT, BYTES_PER_SECTOR * g_target_sect); SETREG(FCP_COL,0); SETREG(FCP_ROW_L(new_bank),new_row); SETREG(FCP_ROW_H(new_bank),new_row); flash_issue_cmd(new_bank,RETURN_ON_ISSUE); // for lba -> psn mapping information new_psn = new_bank * SECTORS_PER_BANK + new_row * SECTORS_PER_PAGE; // Update mapping information for(i = 0 ;i < g_target_sect; i++ ) { set_psn( g_merge_buffer_lsn[i], new_psn + i ); } } }
void nand_page_ptprogram_from_host(UINT32 const bank, UINT32 const vblock, UINT32 const page_num, UINT32 const sect_offset, UINT32 const num_sectors) { UINT32 row; ASSERT(bank < NUM_BANKS); ASSERT(vblock < VBLKS_PER_BANK); ASSERT(page_num < PAGES_PER_BLK); row = (vblock * PAGES_PER_BLK) + page_num; SETREG(FCP_CMD, FC_COL_ROW_IN_PROG); #if OPTION_FTL_TEST == TRUE SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY); #else SETREG(FCP_OPTION, FO_P | FO_E | FO_B_SATA_W); #endif SETREG(FCP_DMA_ADDR, WR_BUF_PTR(g_ftl_write_buf_id)); SETREG(FCP_DMA_CNT, num_sectors * BYTES_PER_SECTOR); SETREG(FCP_COL, sect_offset); SETREG(FCP_ROW_L(bank), row); SETREG(FCP_ROW_H(bank), row); flash_issue_cmd(bank, RETURN_ON_ISSUE); g_ftl_write_buf_id = (g_ftl_write_buf_id + 1) % NUM_WR_BUFFERS; }
/* g_smt_target, g_smt_victim */ void load_smt_piece(UINT32 idx){ UINT32 bank,row,block; UINT32 dest; bank = idx / NUM_BANKS_MAX; block = idx % NUM_BANKS_MAX; row = g_misc_meta[bank].smt_pieces[block] * SMT_INC_SIZE + (PAGES_PER_VBLK * g_bad_list[bank][block]); if( g_smt_target == NUM_BANKS_MAX || g_smt_full == 1){ g_smt_full = 1; g_smt_victim = (g_smt_victim + 1 ) % NUM_BANKS_MAX; flush_smt_piece(g_smt_victim); g_smt_target = (g_smt_target + 1) % NUM_BANKS_MAX; } SETREG(FCP_CMD, FC_COL_ROW_READ_OUT); SETREG(FCP_DMA_CNT,SMT_PIECE_BYTES); SETREG(FCP_COL, 0); dest = SMT_ADDR + (g_smt_target * SMT_PIECE_BYTES); SETREG(FCP_DMA_ADDR, dest); SETREG(FCP_OPTION, FO_P | FO_E ); SETREG(FCP_ROW_L(bank), row); SETREG(FCP_ROW_H(bank), row); flash_issue_cmd(bank, RETURN_WHEN_DONE); smt_dram_map[g_smt_target] = idx; smt_piece_map[idx] = g_smt_target; smt_bit_map[bank] &= ~( 1 <<block ); if(( g_misc_meta[bank].smt_init & ( 1 << block ) ) == 0){ mem_set_dram( dest, 0x00, SMT_PIECE_BYTES); g_misc_meta[bank].smt_init |= (1 <<block); } g_smt_target++; }
void nand_page_read_to_host(UINT32 const bank, UINT32 const vblock, UINT32 const page_num) { #if PrintStats uart_print_level_1("FR "); uart_print_level_1_int(SECTORS_PER_PAGE); uart_print_level_1("\r\n"); #endif UINT32 row; ASSERT(bank < NUM_BANKS); ASSERT(vblock < VBLKS_PER_BANK); ASSERT(page_num < PAGES_PER_BLK); row = (vblock * PAGES_PER_BLK) + page_num; uart_print("nand_page_read_to_host bank="); uart_print_int(bank); uart_print(", vblock="); uart_print_int(vblock); uart_print(", page="); uart_print_int(page_num); uart_print("\r\n"); uart_print("Reading row="); uart_print_int(row); uart_print("\r\n"); uart_print("read flash: bank="); uart_print_int(bank); uart_print(", page="); uart_print_int(row); uart_print("\r\n"); SETREG(FCP_CMD, FC_COL_ROW_READ_OUT); SETREG(FCP_DMA_ADDR, RD_BUF_PTR(g_ftl_read_buf_id)); SETREG(FCP_DMA_CNT, BYTES_PER_PAGE); SETREG(FCP_COL, 0); #if OPTION_FTL_TEST == TRUE SETREG(FCP_OPTION, FO_P | FO_E); #else SETREG(FCP_OPTION, FO_P | FO_E | FO_B_SATA_R); #endif SETREG(FCP_ROW_L(bank), row); SETREG(FCP_ROW_H(bank), row); g_ftl_read_buf_id = (g_ftl_read_buf_id + 1) % NUM_RD_BUFFERS; #if OPTION_FTL_TEST == FALSE { int count=0; while (1) { count ++; if (count > 100000) { uart_print_level_1("Warning1 in nand_page_read_to_host\r\n"); count=0; } UINT32 sata_id = GETREG(SATA_RBUF_PTR); if (g_ftl_read_buf_id != sata_id) break; } } #endif flash_issue_cmd(bank, RETURN_ON_ISSUE); }
static void format(void) { // This function is called upon the very first power-up of the SSD. // This function does the low-level format (i.e. FTL level format) of SSD. // A typical FTL would create its mapping table and the list of free blocks. // However, this example does nothing more than erasing all the free blocks. // // This function may take a long time to complete. For example, erasing all the flash blocks can // take more than ten seconds depending on the total density. // In that case, the host will declare time-out error. (no response from SSD for a long time) // A suggested solution to this problem is: // When you power-up the SSD for the first time, connect the power cable but not the SATA cable. // At the end of this function, you can put a call to led(1) to indicate that the low level format // has been completed. When the LED is on, turn off the power, connect the SATA cable, and turn on // the power again. UINT32 vblk_offset, bank; for (vblk_offset = 1; vblk_offset < VBLKS_PER_BANK; vblk_offset++) { for (bank = 0; bank < NUM_BANKS; bank++) { if (is_bad_block(bank, vblk_offset)) continue; // You do not need to set the values of FCP_DMA_ADDR, FCP_DMA_CNT and FCP_COL for FC_ERASE. SETREG(FCP_CMD, FC_ERASE); SETREG(FCP_BANK, REAL_BANK(bank)); SETREG(FCP_OPTION, FO_P); SETREG(FCP_ROW_L(bank), vblk_offset * PAGES_PER_VBLK); SETREG(FCP_ROW_H(bank), vblk_offset * PAGES_PER_VBLK); // You should not issue a new command when Waiting Room is not empty. while ((GETREG(WR_STAT) & 0x00000001) != 0); // By writing any value to FCP_ISSUE, you put FC_ERASE into Waiting Room. // The value written to FCP_ISSUE does not have any meaning. SETREG(FCP_ISSUE, NULL); } } // In general, write_format_mark() should be called upon completion of low level format in order to prevent // format() from being called again. // However, since the tutorial FTL does not support power off recovery, // format() should be called every time. init_meta_data(); ftl_flush(); write_format_mark(); led(1); }
static BOOL32 check_format_mark(void) { // This function reads a flash page from (bank #0, block #0) in order to check whether the SSD is formatted or not. #ifdef __GNUC__ extern UINT32 size_of_firmware_image; UINT32 firmware_image_pages = (((UINT32) (&size_of_firmware_image)) + BYTES_PER_FW_PAGE - 1) / BYTES_PER_FW_PAGE; #else extern UINT32 Image$$ER_CODE$$RO$$Length; extern UINT32 Image$$ER_RW$$RW$$Length; UINT32 firmware_image_bytes = ((UINT32) &Image$$ER_CODE$$RO$$Length) + ((UINT32) &Image$$ER_RW$$RW$$Length); UINT32 firmware_image_pages = (firmware_image_bytes + BYTES_PER_FW_PAGE - 1) / BYTES_PER_FW_PAGE; #endif UINT32 format_mark_page_offset = FW_PAGE_OFFSET + firmware_image_pages; UINT32 temp; flash_clear_irq(); // clear any flash interrupt flags that might have been set SETREG(FCP_CMD, FC_COL_ROW_READ_OUT); SETREG(FCP_BANK, REAL_BANK(0)); SETREG(FCP_OPTION, FO_E); SETREG(FCP_DMA_ADDR, FTL_BUF_ADDR); // flash -> DRAM SETREG(FCP_DMA_CNT, BYTES_PER_SECTOR); SETREG(FCP_COL, 0); SETREG(FCP_ROW_L(0), format_mark_page_offset); SETREG(FCP_ROW_H(0), format_mark_page_offset); // At this point, we do not have to check Waiting Room status before issuing a command, // because scan list loading has been completed just before this function is called. SETREG(FCP_ISSUE, NULL); // wait for the FC_COL_ROW_READ_OUT command to be accepted by bank #0 while ((GETREG(WR_STAT) & 0x00000001) != 0); // wait until bank #0 finishes the read operation while (BSP_FSM(0) != BANK_IDLE); // Now that the read operation is complete, we can check interrupt flags. temp = BSP_INTR(0) & FIRQ_ALL_FF; // clear interrupt flags CLR_BSP_INTR(0, 0xFF); if (temp != 0) { return FALSE; // the page contains all-0xFF (the format mark does not exist.) } else { return TRUE; // the page contains something other than 0xFF (it must be the format mark) } }
void nand_block_erase_sync(UINT32 const bank, UINT32 const vblock) { ASSERT(bank < NUM_BANKS); ASSERT(vblock < VBLKS_PER_BANK); SETREG(FCP_CMD, FC_ERASE); SETREG(FCP_BANK, REAL_BANK(bank)); SETREG(FCP_OPTION, FO_P); // if OPTION_2_PLANE == 0, FO_P will be zero. SETREG(FCP_ROW_H(bank), vblock * PAGES_PER_VBLK); SETREG(FCP_ROW_L(bank), vblock * PAGES_PER_VBLK); flash_issue_cmd(bank, RETURN_WHEN_DONE); }
void flush_smt_piece(UINT32 idx) { UINT32 bank,row,block; UINT32 dest; UINT32 pblock, i ; UINT32 new_row, new_block; bank = smt_dram_map[idx] / SMT_BANK_NUM; block = smt_dram_map[idx] % SMT_BANK_NUM; pblock = block / SMT_BLOCK; if((smt_bit_map[bank][block/NUM_BANKS_MAX] & (1<<(block%NUM_BANKS_MAX))) != 0){ //update and flash if( g_misc_meta[bank].smt_row[pblock] >= SMT_LIMIT ){ // erase for(i = 0; i < (SMT_BANK_NUM + SMT_BLOCK -1) / SMT_BLOCK; i++) { dest = bank * SMT_BANK_NUM + SMT_BLOCK * pblock + i; new_row = smt_pos[dest]; nand_page_copyback(bank,g_bad_list[bank][pblock], new_row * SMT_INC_SIZE , g_bad_list[bank][SMT_BLOCK], i * SMT_INC_SIZE); smt_pos[dest] = i; } g_misc_meta[bank].smt_row[pblock] = i; row = i; nand_block_erase(bank,g_bad_list[bank][pblock]); new_block = g_bad_list[bank][pblock]; g_bad_list[bank][pblock] = g_bad_list[bank][SMT_BLOCK]; g_bad_list[bank][SMT_BLOCK] = new_block; } else{ row = g_misc_meta[bank].smt_row[pblock]++; } smt_pos[smt_dram_map[idx]] = row; row = row * SMT_INC_SIZE + ( PAGES_PER_VBLK * g_bad_list[bank][pblock]); // flash map data to nand SETREG(FCP_CMD, FC_COL_ROW_IN_PROG); SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY); SETREG(FCP_COL,0); SETREG(FCP_ROW_L(bank),row); SETREG(FCP_ROW_H(bank),row); dest = SMT_ADDR + (idx * SMT_PIECE_BYTES); SETREG(FCP_DMA_ADDR,dest); SETREG(FCP_DMA_CNT, SMT_PIECE_BYTES); //flash_issue_cmd(bank,RETURN_WHEN_DONE); flash_issue_cmd(bank,RETURN_ON_ISSUE); g_bank_to_wait = bank; } smt_piece_map[smt_dram_map[idx]] = (UINT32)-1; }
void nand_block_erase(UINT32 const bank, UINT32 const vblock) { ASSERT(bank < NUM_BANKS); ASSERT(vblock < VBLKS_PER_BANK); SETREG(FCP_CMD, FC_ERASE); SETREG(FCP_BANK, REAL_BANK(bank)); SETREG(FCP_OPTION, FO_P); // if OPTION_2_PLANE == 0, FO_P will be zero. SETREG(FCP_ROW_H(bank), vblock * PAGES_PER_VBLK); SETREG(FCP_ROW_L(bank), vblock * PAGES_PER_VBLK); flash_issue_cmd(bank, RETURN_ON_ISSUE); /* uart_printf("erase block #: %d, %d", bank, vblock); */ }
void flush_smt_piece(UINT32 idx) { UINT32 bank,row,block; UINT32 dest; UINT32 pblock; UINT32 i, old_block; bank = smt_dram_map[idx] / SMT_NUM; block = smt_dram_map[idx] % SMT_NUM; pblock = block / ( NUM_BANKS_MAX *2 ); if((smt_bit_map[bank][block / NUM_BANKS_MAX] & (1<< (block % NUM_BANKS_MAX))) != 0){ // smt piece data if( g_misc_meta[bank].smt_next_page[pblock] >= SMT_LIMIT - 1){ // erase for(i = 0 ;i < 16 ;i++){ nand_page_copyback(bank, g_bad_list[bank][pblock], g_misc_meta[bank].smt_pieces[i * NUM_BANKS_MAX * 2 + pblock], g_smt_free[bank], i ); g_misc_meta[bank].smt_pieces[i * NUM_BANKS_MAX * 2 + pblock] = i; } nand_block_erase(bank,g_bad_list[bank][pblock]); g_misc_meta[bank].smt_next_page[pblock] = 16; old_block = g_bad_list[bank][pblock]; g_bad_list[bank][pblock] = g_smt_free[bank]; g_smt_free[bank] = old_block; } //update and flash g_misc_meta[bank].smt_pieces[block] = g_misc_meta[bank].smt_next_page[pblock]; row = g_misc_meta[bank].smt_pieces[block] * SMT_INC_SIZE + ( PAGES_PER_VBLK * g_bad_list[bank][pblock]); // flash map data to nand SETREG(FCP_CMD, FC_COL_ROW_IN_PROG); SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY); SETREG(FCP_COL,0); SETREG(FCP_ROW_L(bank),row); SETREG(FCP_ROW_H(bank),row); dest = SMT_ADDR + (idx * SMT_PIECE_BYTES); SETREG(FCP_DMA_ADDR,dest); //SETREG(FCP_DMA_CNT, SMT_PIECE_BYTES); SETREG(FCP_DMA_CNT, BYTES_PER_PAGE); flash_issue_cmd(bank,RETURN_WHEN_DONE); g_misc_meta[bank].smt_next_page[pblock]++; } smt_piece_map[smt_dram_map[idx]] = (UINT32)-1; }
void NANDController::getCommandInfo() { //////////////////////////////////////////////////////////////////// // Get command infomation //////////////////////////////////////////////////////////////////// curFCPCommand.cmd = getRegister(FCP_CMD); curFCPCommand.bank = getRegister(FCP_BANK); curFCPCommand.option = getRegister(FCP_OPTION); curFCPCommand.dma_addr = getRegister(FCP_DMA_ADDR); curFCPCommand.dma_cnt = getRegister(FCP_DMA_CNT); curFCPCommand.col = getRegister(FCP_COL); curFCPCommand.row[curFCPCommand.bank][0] = getRegister(FCP_ROW_L(curFCPCommand.bank)); curFCPCommand.row[curFCPCommand.bank][1] = getRegister(FCP_ROW_H(curFCPCommand.bank)); if(curFCPCommand.row[curFCPCommand.bank][0] != curFCPCommand.row[curFCPCommand.bank][1]) { printf( "Different row, ROW_L:%d, ROW_H:%d", curFCPCommand.row[curFCPCommand.bank][0], curFCPCommand.row[curFCPCommand.bank][1]); } curFCPCommand.dst_col = getRegister(FCP_DST_COL); curFCPCommand.dst_row[0] = getRegister(FCP_DST_ROW_L); curFCPCommand.dst_row[1] = getRegister(FCP_DST_ROW_H); curFCPCommand.cmd_id = 0; //don't know curFCPCommand.issue = ISSUE_ENABLE; //////////////////////////////////////////////////////////////////// // Print log //////////////////////////////////////////////////////////////////// printf( ">> FCP Command Receive------------------\n"); printf( "cmd:\t0x%x\n", curFCPCommand.cmd); printf( "bank:\t0x%x\n", curFCPCommand.bank); printf( "option:\t0x%x\n", curFCPCommand.option); printf( "dma_addr:\t0x%x\n", curFCPCommand.dma_addr); printf( "dma_cnt:\t0x%x\n", curFCPCommand.dma_cnt); printf( "col:\t0x%x\n", curFCPCommand.col); printf( "row_l:\t0x%x\n", curFCPCommand.row[curFCPCommand.bank][0]); printf( "row_h:\t0x%x\n", curFCPCommand.row[curFCPCommand.bank][1]); printf( "dst_col:\t0x%x\n", curFCPCommand.dst_col); printf( "dst_row_l:\t0x%x\n", curFCPCommand.dst_row[0]); printf( "dst_row_h:\t0x%x\n", curFCPCommand.dst_row[1]); printf( "cmd_id:\t0x%x\n", curFCPCommand.cmd_id); printf( "issue:\t0x%x\n", curFCPCommand.issue); printf( "---------------------------------------\n"); }
void ftl_read_sector(UINT32 const lba, UINT32 const sect_offset) //added by GYUHWA { UINT32 psn, bank, row, buf_offset, nand_offset; UINT32 t1; UINT32 src,dst; psn = get_psn(lba); //physical sector nomber //bank = lba % NUM_BANKS; bank = psn / SECTORS_PER_BANK; t1 = psn % SECTORS_PER_BANK; row = t1 / SECTORS_PER_PAGE; nand_offset = t1 % SECTORS_PER_PAGE; //physical nand offset if((psn & (UINT32)BIT31) != 0 ) //data is in merge buffer { buf_offset = (psn ^ (UINT32)BIT31); //bank = g_target_bank; bank = buf_offset / SECTORS_PER_PAGE; buf_offset = buf_offset % SECTORS_PER_PAGE; dst = RD_BUF_PTR(g_ftl_read_buf_id) + sect_offset * BYTES_PER_SECTOR; src = MERGE_BUFFER_ADDR + bank * BYTES_PER_PAGE + BYTES_PER_SECTOR * buf_offset; mem_copy(dst, src, BYTES_PER_SECTOR); //find collect data -> mem_copy to RD_BUFFER } else if (psn != NULL) //data is in nand flash { SETREG(FCP_CMD, FC_COL_ROW_READ_OUT); //FCP command for read one sector SETREG(FCP_DMA_CNT, BYTES_PER_SECTOR); SETREG(FCP_COL, nand_offset); SETREG(FCP_DMA_ADDR, RD_BUF_PTR(g_ftl_read_buf_id) + (BYTES_PER_SECTOR * (sect_offset - nand_offset))); // nand_offset is COL, and RD_BUFFER_offset is COL. So we change DMA_ADDR to read data different sector offset. SETREG(FCP_OPTION, FO_P | FO_E ); SETREG(FCP_ROW_L(bank), row); SETREG(FCP_ROW_H(bank), row);// change bm_read_limit flash_issue_cmd(bank, RETURN_ON_ISSUE); //Because we don't increase BM_STACK_RD_LIMIT to collect sectors } else //data that never written { mem_set_dram(RD_BUF_PTR(g_ftl_read_buf_id) + row * BYTES_PER_SECTOR, 0xFFFFFFFF, BYTES_PER_SECTOR); //add 0xfffff to data that never written } }
/* g_smt_target, g_smt_victim */ void load_smt_piece(UINT32 idx){ UINT32 bank,row,block; UINT32 dest; UINT32 pblock; // physical block which have target mapping table bank = idx / SMT_BANK_NUM; block = idx % SMT_BANK_NUM; pblock = block / SMT_BLOCK; row = smt_pos[idx] * SMT_INC_SIZE + (PAGES_PER_VBLK * g_bad_list[bank][pblock]); if( g_smt_full == 1){ flush_smt_piece(g_smt_victim); g_smt_victim = (g_smt_victim +1 ) % SMT_DRAM; } SETREG(FCP_CMD, FC_COL_ROW_READ_OUT); SETREG(FCP_DMA_CNT,SMT_PIECE_BYTES); SETREG(FCP_COL, 0); dest = SMT_ADDR + (g_smt_target * SMT_PIECE_BYTES); SETREG(FCP_DMA_ADDR, dest); SETREG(FCP_OPTION, FO_P | FO_E ); SETREG(FCP_ROW_L(bank), row); SETREG(FCP_ROW_H(bank), row); // fully guarantee //flash_issue_cmd(bank, RETURN_WHEN_DONE); while(_BSP_FSM(g_bank_to_wait) != BANK_IDLE); flash_issue_cmd(bank, RETURN_ON_ISSUE); g_bank_to_wait = bank; smt_dram_map[g_smt_target] = idx; smt_piece_map[idx] = g_smt_target; smt_bit_map[bank][block/NUM_BANKS_MAX] &= ~( 1 <<(block % NUM_BANKS_MAX) ); /* init or not */ if(( g_misc_meta[bank].smt_init[block/NUM_BANKS_MAX] & ( 1 << (block % NUM_BANKS_MAX) ) ) == 0){ mem_set_dram( dest, 0x00, SMT_PIECE_BYTES); g_misc_meta[bank].smt_init[block/NUM_BANKS_MAX] |= (1 <<(block % NUM_BANKS_MAX)); } g_smt_target = (g_smt_target + 1) % SMT_DRAM; if( g_smt_target == 0 ){ g_smt_full = 1; } }
void nand_page_ptprogram_from_host(UINT32 const bank, UINT32 const vblock, UINT32 const page_num, UINT32 const sect_offset, UINT32 const num_sectors) { #if PrintStats uart_print_level_1("FP "); uart_print_level_1_int(num_sectors); uart_print_level_1("\r\n"); #endif totSecWrites += num_sectors; UINT32 row; ASSERT(bank < NUM_BANKS); ASSERT(vblock < VBLKS_PER_BANK); ASSERT(page_num < PAGES_PER_BLK); row = (vblock * PAGES_PER_BLK) + page_num; uart_print("nand_page_ptprogram_from_host bank="); uart_print_int(bank); uart_print(", vblock="); uart_print_int(vblock); uart_print(", page="); uart_print_int(page_num); uart_print(", sect_offset="); uart_print_int(sect_offset); uart_print(", num_sectors="); uart_print_int(num_sectors); uart_print("\r\n"); uart_print("Writing row="); uart_print_int(row); uart_print("\r\n"); SETREG(FCP_CMD, FC_COL_ROW_IN_PROG); #if OPTION_FTL_TEST == TRUE SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY); #else SETREG(FCP_OPTION, FO_P | FO_E | FO_B_SATA_W); #endif SETREG(FCP_DMA_ADDR, WR_BUF_PTR(g_ftl_write_buf_id)); SETREG(FCP_DMA_CNT, num_sectors * BYTES_PER_SECTOR); SETREG(FCP_COL, sect_offset); SETREG(FCP_ROW_L(bank), row); SETREG(FCP_ROW_H(bank), row); //flash_issue_cmd(bank, RETURN_WHEN_DONE); flash_issue_cmd(bank, RETURN_ON_ISSUE); g_ftl_write_buf_id = (g_ftl_write_buf_id + 1) % NUM_WR_BUFFERS; }
void nand_page_program(UINT32 const bank, UINT32 const vblock, UINT32 const page_num, UINT32 const buf_addr) { UINT32 row; ASSERT(bank < NUM_BANKS); ASSERT(vblock < VBLKS_PER_BANK); ASSERT(page_num < PAGES_PER_BLK); row = (vblock * PAGES_PER_BLK) + page_num; SETREG(FCP_CMD, FC_COL_ROW_IN_PROG); SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY); SETREG(FCP_DMA_ADDR, buf_addr); SETREG(FCP_DMA_CNT, BYTES_PER_PAGE); SETREG(FCP_COL, 0); SETREG(FCP_ROW_L(bank), row); SETREG(FCP_ROW_H(bank), row); flash_issue_cmd(bank, RETURN_ON_ISSUE); }
// synchronous one full page read void nand_page_read(UINT32 const bank, UINT32 const vblock, UINT32 const page_num, UINT32 const buf_addr) { UINT32 row; ASSERT(bank < NUM_BANKS); ASSERT(vblock < VBLKS_PER_BANK); ASSERT(page_num < PAGES_PER_BLK); // row means ppn row = (vblock * PAGES_PER_BLK) + page_num; SETREG(FCP_CMD, FC_COL_ROW_READ_OUT); SETREG(FCP_OPTION, FO_P | FO_E); SETREG(FCP_DMA_ADDR, buf_addr); SETREG(FCP_DMA_CNT, BYTES_PER_PAGE); SETREG(FCP_COL, 0); SETREG(FCP_ROW_L(bank), row); SETREG(FCP_ROW_H(bank), row); flash_issue_cmd(bank, RETURN_WHEN_DONE); }
void nand_page_ptprogram(UINT32 const bank, UINT32 const vblock, UINT32 const page_num, UINT32 const sect_offset, UINT32 const num_sectors, UINT32 const buf_addr) { UINT32 row; /* uart_printf("ptprogram: bank %d vblock %d page_num %d sect_offset %d, num_sectors %d", bank, vblock, page_num, sect_offset, num_sectors); */ ASSERT(bank < NUM_BANKS); ASSERT(vblock < VBLKS_PER_BANK); ASSERT(page_num < PAGES_PER_BLK); row = (vblock * PAGES_PER_BLK) + page_num; SETREG(FCP_CMD, FC_COL_ROW_IN_PROG); SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY); SETREG(FCP_DMA_ADDR, buf_addr); SETREG(FCP_DMA_CNT, num_sectors * BYTES_PER_SECTOR); SETREG(FCP_COL, sect_offset); SETREG(FCP_ROW_L(bank), row); SETREG(FCP_ROW_H(bank), row); flash_issue_cmd(bank, RETURN_ON_ISSUE); }
void flush_merge_buffer() { UINT32 new_row, new_psn; UINT32 new_bank; int i, j; for(i = 0 ;i < NUM_BANKS;i++){ // remain page on dram merge buffer if( g_target_sect[i] != 0 ){ new_bank = i; // get free page from target bank // set registers to write a data to nand flash memory SETREG(FCP_CMD, FC_COL_ROW_IN_PROG); SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY); // Address is merge buffer address which contains actual data SETREG(FCP_DMA_ADDR, MERGE_BUFFER_ADDR + new_bank * BYTES_PER_PAGE); SETREG(FCP_DMA_CNT, BYTES_PER_SECTOR * g_target_sect[i]); SETREG(FCP_COL,0); flash_issue_cmd(AUTO_SEL,RETURN_ON_ACCEPT); g_prev_bank[i] = GETREG(WR_BANK); new_row = get_free_page(g_prev_bank[i]); SETREG(FCP_ROW_L(g_prev_bank[i]),new_row); SETREG(FCP_ROW_H(g_prev_bank[i]),new_row); // for lba -> psn mapping information new_psn = g_prev_bank[i] * SECTORS_PER_BANK + g_target_row[g_prev_bank[i]] * SECTORS_PER_PAGE; // Update mapping information for(j = 0 ;j < g_target_sect[i]; i++ ) { set_psn( g_merge_buffer_lsn[i][j], new_psn + i ); } g_target_row[g_prev_bank[i]] = new_row; } } }
void nand_page_read_to_host(UINT32 const bank, UINT32 const vblock, UINT32 const page_num) { UINT32 row; ASSERT(bank < NUM_BANKS); ASSERT(vblock < VBLKS_PER_BANK); ASSERT(page_num < PAGES_PER_BLK); row = (vblock * PAGES_PER_BLK) + page_num; SETREG(FCP_CMD, FC_COL_ROW_READ_OUT); SETREG(FCP_DMA_ADDR, RD_BUF_PTR(g_ftl_read_buf_id)); SETREG(FCP_DMA_CNT, BYTES_PER_PAGE); SETREG(FCP_COL, 0); #if OPTION_FTL_TEST == TRUE SETREG(FCP_OPTION, FO_P | FO_E); #else SETREG(FCP_OPTION, FO_P | FO_E | FO_B_SATA_R); #endif SETREG(FCP_ROW_L(bank), row); SETREG(FCP_ROW_H(bank), row); g_ftl_read_buf_id = (g_ftl_read_buf_id + 1) % NUM_RD_BUFFERS; #if OPTION_FTL_TEST == FALSE { while (1) { UINT32 sata_id = GETREG(SATA_RBUF_PTR); if (g_ftl_read_buf_id != sata_id) break; } } #endif flash_issue_cmd(bank, RETURN_ON_ISSUE); }
/* g_smt_target, g_smt_victim */ void load_smt_piece(UINT32 idx){ UINT32 bank,row,block; UINT32 dest; UINT32 pblock; bank = idx / SMT_NUM; block = idx % SMT_NUM; pblock = block / (NUM_BANKS_MAX *2); row = g_misc_meta[bank].smt_pieces[block] * SMT_INC_SIZE + (PAGES_PER_VBLK * g_bad_list[bank][pblock]); if(g_smt_full == 1){ flush_smt_piece(g_smt_target); g_smt_victim = (g_smt_victim +1 ) % SMT_NUM; } SETREG(FCP_CMD, FC_COL_ROW_READ_OUT); SETREG(FCP_DMA_CNT,SMT_PIECE_BYTES); SETREG(FCP_COL, 0); dest = SMT_ADDR + (g_smt_target * SMT_PIECE_BYTES); SETREG(FCP_DMA_ADDR, dest); SETREG(FCP_OPTION, FO_P | FO_E ); SETREG(FCP_ROW_L(bank), row); SETREG(FCP_ROW_H(bank), row); // fully guarantee flash_issue_cmd(bank, RETURN_WHEN_DONE); smt_dram_map[g_smt_target] = idx; smt_piece_map[idx] = g_smt_target; smt_bit_map[bank][block/NUM_BANKS_MAX] &= ~( 1 << (block % NUM_BANKS_MAX) ); if(( g_misc_meta[bank].smt_init[block / NUM_BANKS_MAX ] & ( 1 << (block % NUM_BANKS_MAX)) ) == 0){ mem_set_dram( dest, 0x00, SMT_PIECE_BYTES); g_misc_meta[bank].smt_init[block / NUM_BANKS_MAX] |= (1 << (block % NUM_BANKS_MAX)); } g_smt_target++; if( g_smt_target == SMT_NUM ){ g_smt_target = 0; g_smt_full = 1; } }
// General purpose page read function // synchronous page read (for reading metadata) // asynchronous page read (left/right hole async read user data) void nand_page_ptread(UINT32 const bank, UINT32 const vblock, UINT32 const page_num, UINT32 const sect_offset, UINT32 const num_sectors, UINT32 const buf_addr, UINT32 const issue_flag) { #if PrintStats uart_print_level_1("FR "); uart_print_level_1_int(num_sectors); uart_print_level_1("\r\n"); #endif UINT32 row; ASSERT(bank < NUM_BANKS); ASSERT(vblock < VBLKS_PER_BANK); ASSERT(page_num < PAGES_PER_BLK); // row means ppn row = (vblock * PAGES_PER_BLK) + page_num; uart_print("nand_page_ptread bank="); uart_print_int(bank); uart_print(", vblock="); uart_print_int(vblock); uart_print(", page="); uart_print_int(page_num); uart_print(", sect_offset="); uart_print_int(sect_offset); uart_print(", num_sectors="); uart_print_int(num_sectors); uart_print(", dst_addr="); uart_print_int(buf_addr); uart_print("\r\n"); uart_print("Reading row="); uart_print_int(row); uart_print("\r\n"); SETREG(FCP_CMD, FC_COL_ROW_READ_OUT); SETREG(FCP_OPTION, FO_P | FO_E); SETREG(FCP_DMA_ADDR, buf_addr); SETREG(FCP_DMA_CNT, num_sectors * BYTES_PER_SECTOR); SETREG(FCP_COL, sect_offset); SETREG(FCP_ROW_L(bank), row); SETREG(FCP_ROW_H(bank), row); // issue_flag: // RETURN_ON_ISSUE, RETURN_WHEN_DONE, RETURN_ON_ACCEPT flash_issue_cmd(bank, issue_flag); }
static void write_format_mark(void) { // This function writes a format mark to a page at (bank #0, block #0). #ifdef __GNUC__ extern UINT32 size_of_firmware_image; UINT32 firmware_image_pages = (((UINT32) (&size_of_firmware_image)) + BYTES_PER_FW_PAGE - 1) / BYTES_PER_FW_PAGE; #else extern UINT32 Image$$ER_CODE$$RO$$Length; extern UINT32 Image$$ER_RW$$RW$$Length; UINT32 firmware_image_bytes = ((UINT32) &Image$$ER_CODE$$RO$$Length) + ((UINT32) &Image$$ER_RW$$RW$$Length); UINT32 firmware_image_pages = (firmware_image_bytes + BYTES_PER_FW_PAGE - 1) / BYTES_PER_FW_PAGE; #endif UINT32 format_mark_page_offset = FW_PAGE_OFFSET + firmware_image_pages; mem_set_dram(FTL_BUF_ADDR, 0, BYTES_PER_SECTOR); SETREG(FCP_CMD, FC_COL_ROW_IN_PROG); SETREG(FCP_BANK, REAL_BANK(0)); SETREG(FCP_OPTION, FO_E | FO_B_W_DRDY); SETREG(FCP_DMA_ADDR, FTL_BUF_ADDR); // DRAM -> flash SETREG(FCP_DMA_CNT, BYTES_PER_SECTOR); SETREG(FCP_COL, 0); SETREG(FCP_ROW_L(0), format_mark_page_offset); SETREG(FCP_ROW_H(0), format_mark_page_offset); // At this point, we do not have to check Waiting Room status before issuing a command, // because we have waited for all the banks to become idle before returning from format(). SETREG(FCP_ISSUE, NULL); // wait for the FC_COL_ROW_IN_PROG command to be accepted by bank #0 while ((GETREG(WR_STAT) & 0x00000001) != 0); // wait until bank #0 finishes the write operation while (BSP_FSM(0) != BANK_IDLE); }
/* g_smt_target, g_smt_victim */ void load_smt_piece(UINT32 idx){ UINT32 bank,row,block; bank = idx / NUM_BANKS_MAX; block = idx % NUM_BANKS_MAX; row = g_misc_meta[bank].smt_pieces[block] + (PAGES_PER_VBLK * g_bad_list[bank][block]); if( g_smt_target == NUM_BANKS_MAX){ g_smt_target = 0; flush_smt_piece(smt_dram_map[g_smt_victim]); g_smt_victim = (g_smt_victim +1 ) % NUM_BANKS_MAX; } SETREG(FCP_CMD, FC_COL_ROW_READ_OUT); SETREG(FCP_DMA_CNT,SMT_PIECE_BYTES); SETREG(FCP_COL, 0); SETREG(FCP_DMA_ADDR, SMT_ADDR + (g_smt_target * SMT_PIECE_BYTES)); SETREG(FCP_OPTION, FO_P | FO_E ); SETREG(FCP_ROW_L(bank), row); SETREG(FCP_ROW_H(bank), row); flash_issue_cmd(bank, RETURN_ON_ISSUE); smt_dram_bit[bank] |= (1 << block); smt_dram_map[g_smt_target] = idx; smt_piece_map[idx] = g_smt_target; smt_bit_map[bank] &= ~( 1 <<block ); g_smt_target++; }
void ftl_write_sector(UINT32 const lba) { UINT32 new_bank, vsect_num, new_row; UINT32 new_psn; UINT32 temp; UINT32 dst,src; UINT32 index = lba % SECTORS_PER_PAGE; int i; //new_bank = lba % NUM_BANKS; // get bank number of sector new_bank = g_target_bank; temp = get_psn(lba); if( (temp & (UINT32)BIT31) != 0 ){ // If data, which located in same lba, is already in dram // copy sata host data to same merge buffer sector vsect_num = (temp ^ (UINT32)BIT31); dst = MERGE_BUFFER_ADDR + new_bank * BYTES_PER_PAGE + vsect_num * BYTES_PER_SECTOR; src = WR_BUF_PTR(g_ftl_write_buf_id) + index * BYTES_PER_SECTOR; mem_copy(dst,src, BYTES_PER_SECTOR); } else{ // copy sata host data to dram memory merge buffer page //vsect_num = g_misc_meta[new_bank].g_merge_buff_sect; vsect_num = g_target_sect; dst = MERGE_BUFFER_ADDR + new_bank * BYTES_PER_PAGE + vsect_num * BYTES_PER_SECTOR; src = WR_BUF_PTR(g_ftl_write_buf_id) + index * BYTES_PER_SECTOR; mem_copy(dst, src, BYTES_PER_SECTOR); // set psn to -1 , it means that data is in dram set_psn(lba, ((UINT32)BIT31 | vsect_num )); // for change psn g_merge_buffer_lsn[vsect_num] = lba; vsect_num++; // If merge_buffer of bank is full , // than flush the merge buffer page to nand flash // and set a psn number of all sectors. if( vsect_num >= SECTORS_PER_PAGE ){ /* get free page */ new_row = get_free_page(new_bank); SETREG(FCP_CMD, FC_COL_ROW_IN_PROG); SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY); SETREG(FCP_DMA_ADDR, MERGE_BUFFER_ADDR + new_bank * BYTES_PER_PAGE); SETREG(FCP_DMA_CNT, BYTES_PER_PAGE); SETREG(FCP_COL,0); SETREG(FCP_ROW_L(new_bank),new_row); SETREG(FCP_ROW_H(new_bank),new_row); flash_issue_cmd(new_bank,RETURN_ON_ISSUE); /* initialize merge buffer page's sector point */ // g_misc_meta[new_bank].g_merge_buff_sect = 0; g_target_sect = 0; g_target_bank = (g_target_bank + 1 ) % NUM_BANKS; // allocate new psn //new_psn = new_row * SECTORS_PER_PAGE; new_psn = new_bank * SECTORS_PER_BANK + new_row * SECTORS_PER_PAGE; // vsn - > psn mapping for(i = 0 ;i < SECTORS_PER_PAGE; i++ ) { set_psn( g_merge_buffer_lsn[i], new_psn + i ); } } else { //g_misc_meta[new_bank].g_merge_buff_sect++; g_target_sect++; } } }
void ftl_open(void) { sanity_check(); // STEP 1 - read scan lists from NAND flash scan_list_t* scan_list = (scan_list_t*) SCAN_LIST_ADDR; UINT32 bank; UINT32 bad_block, i , j ; // Since we are going to check the flash interrupt flags within this function, ftl_isr() should not be called. disable_irq(); flash_clear_irq(); // clear any flash interrupt flags that might have been set for (bank = 0; bank < NUM_BANKS; bank++) { //g_misc_meta[bank].g_merge_buff_sect = 0; SETREG(FCP_CMD, FC_COL_ROW_READ_OUT); // FC_COL_ROW_READ_OUT = sensing and data output SETREG(FCP_OPTION, FO_E); // scan list was written in 1-plane mode by install.exe, so there is no FO_P SETREG(FCP_DMA_ADDR, scan_list + bank); // target address should be DRAM or SRAM (see flash.h for rules) SETREG(FCP_DMA_CNT, SCAN_LIST_SIZE); // number of bytes for data output SETREG(FCP_COL, 0); SETREG(FCP_ROW_L(bank), SCAN_LIST_PAGE_OFFSET); // scan list was written to this position by install.exe SETREG(FCP_ROW_H(bank), SCAN_LIST_PAGE_OFFSET); // Tutorial FTL always uses the same row addresses for high chip and low chip flash_issue_cmd(bank, RETURN_ON_ISSUE); // Take a look at the source code of flash_issue_cmd() now. } // This while() statement waits the last issued command to be accepted. // If bit #0 of WR_STAT is one, a flash command is in the Waiting Room, because the target bank has not accepted it yet. while ((GETREG(WR_STAT) & 0x00000001) != 0); // Now, FC_COL_ROW_READ_OUT commands are accepted by all the banks. // Before checking whether scan lists are corrupted or not, we have to wait the completion of read operations. // This code shows how to wait for ALL the banks to become idle. while (GETREG(MON_CHABANKIDLE) != 0); // Now we can check the flash interrupt flags. for (bank = 0; bank < NUM_BANKS; bank++) { UINT32 num_entries = NULL; UINT32 result = OK; if (BSP_INTR(bank) & FIRQ_DATA_CORRUPT) { // Too many bits are corrupted so that they cannot be corrected by ECC. result = FAIL; } else { // Even though the scan list is not corrupt, we have to check whether its contents make sense. UINT32 i; num_entries = read_dram_16(&(scan_list[bank].num_entries)); if (num_entries > SCAN_LIST_ITEMS) { result = FAIL; // We cannot trust this scan list. Perhaps a software bug. } else { for (i = 0; i < num_entries; i++) { UINT16 entry = read_dram_16(&(scan_list[bank].list[i])); UINT16 pblk_offset = entry & 0x7FFF; if (pblk_offset == 0 || pblk_offset >= PBLKS_PER_BANK) { #if OPTION_REDUCED_CAPACITY == FALSE result = FAIL; // We cannot trust this scan list. Perhaps a software bug. #endif } else { // Bit position 15 of scan list entry is high-chip/low-chip flag. // Remove the flag in order to make is_bad_block() simple. write_dram_16(&(scan_list[bank].list[i]), pblk_offset); } } } } if (result == FAIL) { mem_set_dram(scan_list + bank, 0, SCAN_LIST_SIZE); g_misc_meta[bank].g_scan_list_entries = 0; } else { write_dram_16(&(scan_list[bank].num_entries), 0); g_misc_meta[bank].g_scan_list_entries = num_entries; } } // STEP 2 - If necessary, do low-level format // format() should be called after loading scan lists, because format() calls is_bad_block(). init_meta_data(); // save non bad block list for metadata block // block#0 : list, block#1 : misc meta // block#2 ~ map table meta and data for(i = 0 ;i < NUM_BANKS;i++){ bad_block = 2; for(j = 0 ;j < NUM_BANKS_MAX;j++){ while(is_bad_block(i, bad_block) && j < VBLKS_PER_BANK) { bad_block++; } g_bad_list[i][j] = bad_block++; } g_free_start[i] = g_bad_list[i][NUM_BANKS_MAX-1] + 1; } //if (check_format_mark() == FALSE) if( TRUE) { // When ftl_open() is called for the first time (i.e. the SSD is powered up the first time) // format() is called. format(); } else{ loading_misc_meta(); } //*Red// // STEP 3 - initialize sector mapping table pieces // The page mapping table is too large to fit in SRAM and DRAM. // gyuhwa // init_metadata(); // STEP 4 - initialize global variables that belong to FTL g_ftl_read_buf_id = 0; g_ftl_write_buf_id = 0; for (bank = 0; bank < NUM_BANKS; bank++) { g_misc_meta[bank].g_target_row = PAGES_PER_VBLK * (g_free_start[bank]); } flash_clear_irq(); // This example FTL can handle runtime bad block interrupts and read fail (uncorrectable bit errors) interrupts SETREG(INTR_MASK, FIRQ_DATA_CORRUPT | FIRQ_BADBLK_L | FIRQ_BADBLK_H); SETREG(FCONF_PAUSE, FIRQ_DATA_CORRUPT | FIRQ_BADBLK_L | FIRQ_BADBLK_H); enable_irq(); }