void cmd_boot_linux(void){ //1.从nand flash 0x600000读取内核到内存0x20008000,读取大小0xC00000 //特殊情况:板子的nand flash有坏块 //nand_page_read(test_buf,0); uart0_puts("\nloading kernel..."); unsigned int *ptr = (unsigned int *)0x20008000; unsigned int page = 0x600000/NF_PAGE_SIZE; int i ; for(i = 0;i < 6000;i++){ uart0_putc('.'); uart0_puts("^_^"); nand_page_read(ptr,page); ptr += NF_PAGE_SIZE/4;//偏移NF_PAGE_SIZE page++;//加1页 } uart0_puts("\nrun linux ..."); //2.调用0x20008000处的程序(函数指针) func_t kernel_start; kernel_start = (func_t)0x20008000; kernel_start(0,2456,0); //第一个参数0,内核已经不使用了 //第二个参数,当编译内核时为开发板指定的一个号, //内核启动时会判断引导程序传给内核的参数和编译内核时指定的值是否一样; //如果一样内核继续执行,如果不一样,内核停止执行; //第三个参数,引导程序和内核传递参数 的 内存地址;内核根据给定地址找参数;该内存地址是引导和内核共享的内存; //给0是因为Image是使用默认参数的,不需要引导程序传递; //uart0_puts("\nkernel run end"); }
// get vpn from PAGE_MAP static UINT32 get_vpn(UINT32 const lpn) { if(IS_WRITE) { get_v_w++; } else { get_v++; } UINT32 index; for(index = 0; index < CMT_SIZE; index++) { if(cmt[index].lpn == lpn) { if(IS_WRITE) get_v_w_hit++; else cmt_r_hit++; return SET_CLEAN(cmt[index].vpn); } else if(cmt[index].lpn == INVALID) return 0; } /* * not in CMT * now select an victim */ evict_mapping(); /* * now, cmt[cmt_hand] is a victim */ UINT32 gtd_index; UINT32 mapping_bank = get_num_bank(lpn); UINT32 offset_in_bank = lpn / NUM_BANKS; UINT32 offset_in_page = offset_in_bank % MAPPINGS_PER_PAGE; gtd_index = offset_in_bank / MAPPINGS_PER_PAGE; UINT32 mapping_vpn = gtd[mapping_bank][gtd_index]; if(mapping_vpn == INVALID) { return NULL; } map_read++; nand_page_read(mapping_bank, mapping_vpn / PAGES_PER_BLK, mapping_vpn % PAGES_PER_BLK, TRANS_BUF(mapping_bank)); cmt[cmt_hand].lpn = lpn; cmt[cmt_hand].vpn = read_dram_32(TRANS_BUF(mapping_bank) + sizeof(UINT32) * offset_in_page); cmt[cmt_hand].sc = TRUE; UINT32 ret = SET_CLEAN(cmt[cmt_hand].vpn); cmt_hand = (cmt_hand + 1) % CMT_SIZE; return ret; }
void cmd_nread(void){ char itoa_buf[16]; uart0_puts("\nnand read ..."); nand_page_read(test_buf,0); uart0_puts("\ndata:"); int i = 0; for(i = 0 ;i < 64 ; i++){ if(!(i%4)){ uart0_puts("\n"); } itoa(itoa_buf,test_buf[i]); uart0_puts(itoa_buf); uart0_puts(" "); } }
static void write_page(UINT32 const lpn, UINT32 const sect_offset, UINT32 const num_sectors) { CHECK_LPAGE(lpn); ASSERT(sect_offset < SECTORS_PER_PAGE); ASSERT(num_sectors > 0 && num_sectors <= SECTORS_PER_PAGE); UINT32 bank, old_vpn, new_vpn; UINT32 vblock, page_num, page_offset, column_cnt; bank = get_num_bank(lpn); // page striping page_offset = sect_offset; column_cnt = num_sectors; new_vpn = assign_new_write_vpn(bank); old_vpn = get_vpn(lpn); CHECK_VPAGE (old_vpn); CHECK_VPAGE (new_vpn); ASSERT(old_vpn != new_vpn); g_ftl_statistics[bank].page_wcount++; // if old data already exist, if (old_vpn != NULL) { vblock = old_vpn / PAGES_PER_BLK; page_num = old_vpn % PAGES_PER_BLK; //-------------------------------------------------------------------------------------- // `Partial programming' // we could not determine whether the new data is loaded in the SATA write buffer. // Thus, read the left/right hole sectors of a valid page and copy into the write buffer. // And then, program whole valid data //-------------------------------------------------------------------------------------- if (num_sectors != SECTORS_PER_PAGE) { // Performance optimization (but, not proved) // To reduce flash memory access, valid hole copy into SATA write buffer after reading whole page // Thus, in this case, we need just one full page read + one or two mem_copy if ((num_sectors <= 8) && (page_offset != 0)) { // one page async read nand_page_read(bank, vblock, page_num, FTL_BUF(bank)); // copy `left hole sectors' into SATA write buffer if (page_offset != 0) { mem_copy(WR_BUF_PTR(g_ftl_write_buf_id), FTL_BUF(bank), page_offset * BYTES_PER_SECTOR); } // copy `right hole sectors' into SATA write buffer if ((page_offset + column_cnt) < SECTORS_PER_PAGE) { UINT32 const rhole_base = (page_offset + column_cnt) * BYTES_PER_SECTOR; mem_copy(WR_BUF_PTR(g_ftl_write_buf_id) + rhole_base, FTL_BUF(bank) + rhole_base, BYTES_PER_PAGE - rhole_base); } } // left/right hole async read operation (two partial page read) else { // read `left hole sectors' if (page_offset != 0) { nand_page_ptread(bank, vblock, page_num, 0, page_offset, WR_BUF_PTR(g_ftl_write_buf_id), RETURN_ON_ISSUE); } // read `right hole sectors' if ((page_offset + column_cnt) < SECTORS_PER_PAGE) { nand_page_ptread(bank, vblock, page_num, page_offset + column_cnt, SECTORS_PER_PAGE - (page_offset + column_cnt), WR_BUF_PTR(g_ftl_write_buf_id), RETURN_ON_ISSUE); } } } // full page write page_offset = 0; column_cnt = SECTORS_PER_PAGE; // invalid old page (decrease vcount) set_vcount(bank, vblock, get_vcount(bank, vblock) - 1); } vblock = new_vpn / PAGES_PER_BLK; page_num = new_vpn % PAGES_PER_BLK; ASSERT(get_vcount(bank,vblock) < (PAGES_PER_BLK - 1)); // write new data (make sure that the new data is ready in the write buffer frame) // (c.f FO_B_SATA_W flag in flash.h) nand_page_ptprogram_from_host(bank, vblock, page_num, page_offset, column_cnt); // update metadata set_lpn(bank, page_num, lpn); set_vpn(lpn, new_vpn); set_vcount(bank, vblock, get_vcount(bank, vblock) + 1); }
/********************* READ **********************************/ size_t lm_read(sect_t lsn, sect_t size, int mapdir_flag) { int i, k, m, h; int read_flag; int lpn = lsn/SECT_NUM_PER_PAGE; int lbn = lsn/SECT_NUM_PER_BLK; int ppn; int pbn; int size_page = size/SECT_NUM_PER_PAGE; int offset = lpn%PAGE_NUM_PER_BLK; int valid_flag; int sect_num; sect_t s_lsn; sect_t s_psn; sect_t copy[SECT_NUM_PER_PAGE]; memset (copy, 0xFF, sizeof (copy)); if(BMT[lbn] == -1){ ASSERT(0); } sect_num = 4; s_psn = ((BMT[lbn] * PAGE_NUM_PER_BLK + offset) * SECT_NUM_PER_PAGE); s_lsn = lpn * SECT_NUM_PER_PAGE; for (h = 0; h < SECT_NUM_PER_PAGE; h++) { copy[h] = s_lsn + h; } valid_flag = nand_oob_read(s_psn); if(valid_flag == 1){ size = nand_page_read(s_psn, copy, 0); } else if(valid_flag == -1){ read_flag = 0; for( k = 0; (k < total_log_blk_num) && (read_flag != 1); k++){ for( m = 0; m < PAGE_NUM_PER_BLK; m++){ if((PMT[k].lpn[m] == lpn) && (PMT[k].lpn_status[m] == 1)) { s_psn = ((PMT[k].pbn * PAGE_NUM_PER_BLK + m) * SECT_NUM_PER_PAGE); s_lsn = lpn * SECT_NUM_PER_PAGE; for (i = 0; i < SECT_NUM_PER_PAGE; i++) { copy[i] = s_lsn + i; } size = nand_page_read(s_psn, copy, 0); read_flag = 1; break; } } } } else{ stat_read_num++; flash_read_num++; return 4; } ASSERT(size == SECT_NUM_PER_PAGE); return sect_num; }
void merge_full(int pmt_index) { int i,j,k,m,h; int size; int old_pbn; int lbn,lpn,new_pbn,pbn,offset, invalid_flag; int s_lsn, s_psn; sect_t lsns[SECT_NUM_PER_PAGE]; if(PMT[pmt_index].fpc != 0 && pmt_index == 0) { printf("something sucks"); ASSERT(0); } // Check with all page in a log block for(i = 0; i<PAGE_NUM_PER_BLK; i++) { if(PMT[pmt_index].lpn_status[i] != 1){ // -1: invalid, 0: free, 1: valid continue; } else{ offset = PMT[pmt_index].lpn[i] % PAGE_NUM_PER_BLK; lbn = PMT[pmt_index].lpn[i] / PAGE_NUM_PER_BLK; old_pbn = BMT[lbn]; if(old_pbn == global_SW_blk.data_blk) { merge_partial(global_SW_blk.logblk.pbn, global_SW_blk.data_blk, global_SW_blk.logblk.fpc,-1); merge_partial_num++; global_SW_blk.logblk.pbn = nand_get_free_blk(0,1); global_SW_blk.logblk.fpc = PAGE_NUM_PER_BLK; global_SW_blk.data_blk = -1; for( h = 0; h < PAGE_NUM_PER_BLK; h++) { global_SW_blk.logblk.lpn[h] = -1; // -1: no data written global_SW_blk.logblk.lpn_status[h] = 0; // 0: free } PMT[0] = global_SW_blk.logblk; // insert new SW_blk info into PMT continue; } new_pbn = nand_get_free_blk(0,1); BMT[lbn] = new_pbn; merge_full_num++; for(j =0 ; j < PAGE_NUM_PER_BLK ; j++) { lpn = (lbn * PAGE_NUM_PER_BLK) + j; /* for nand_oob_read */ s_psn = SECTOR(old_pbn, j*SECT_NUM_PER_PAGE); // chk if correct s_lsn = lpn * SECT_NUM_PER_PAGE; memset (lsns, 0xFF, sizeof (lsns)); for (h = 0; h < SECT_NUM_PER_PAGE; h++) { lsns[h] = s_lsn + h; } size = nand_oob_read(s_psn); if(size == 1) // valid -> invalidate page in the data block { // invalidate page in data block s_psn = SECTOR(old_pbn, j*SECT_NUM_PER_PAGE); // chk if correct s_lsn = lpn * SECT_NUM_PER_PAGE; // read from data block - youkim memset (lsns, 0xFF, sizeof (lsns)); for (h = 0; h< SECT_NUM_PER_PAGE; h++) { lsns[h] = s_lsn + h; } nand_page_read(s_psn, lsns, 1); // invalidate page in data block for(h = 0; h<SECT_NUM_PER_PAGE; h++){ nand_invalidate(s_psn + h, s_lsn + h); } nand_stat(OOB_WRITE); // write into new pbn s_psn = SECTOR(new_pbn,j* SECT_NUM_PER_PAGE); s_lsn = lpn * SECT_NUM_PER_PAGE; memset (lsns, 0xFF, sizeof (lsns)); for (h = 0; h< SECT_NUM_PER_PAGE; h++) { lsns[h] = s_lsn + h; } nand_page_write(s_psn, lsns, 1, 1); } else if(size == -1) { invalid_flag = 0; for( k = 1; (k < total_log_blk_num) && (invalid_flag != 1); k++){ for( m = 0; m < PAGE_NUM_PER_BLK; m++){ if((PMT[k].lpn[m] == lpn) && (PMT[k].lpn_status[m] == 1)) { // invalidate page in log block PMT[k].lpn_status[m] = -1; // -1: invalid s_psn = ((PMT[k].pbn * PAGE_NUM_PER_BLK + m) * SECT_NUM_PER_PAGE); s_lsn = lpn * SECT_NUM_PER_PAGE; // read from data block - youkim memset (lsns, 0xFF, sizeof (lsns)); for (h = 0; h< SECT_NUM_PER_PAGE; h++) { lsns[h] = s_lsn + h; } nand_page_read(s_psn, lsns, 1); // invalidate for(h = 0; h<SECT_NUM_PER_PAGE; h++){ nand_invalidate(s_psn + h, s_lsn + h); } nand_stat(OOB_WRITE); invalid_flag = 1; break; } } } // write into new pbn s_psn = SECTOR(new_pbn,j* SECT_NUM_PER_PAGE); s_lsn = lpn * SECT_NUM_PER_PAGE; memset (lsns, 0xFF, sizeof (lsns)); for (h = 0; h< SECT_NUM_PER_PAGE; h++) { lsns[h] = s_lsn + h; } nand_page_write(s_psn, lsns, 1, 1); } else{ } } // erase the data block if(old_pbn == PMT[0].pbn){ printf("1. something sucks"); ASSERT(0); } nand_erase(old_pbn); } } // erase the log block nand_erase(PMT[pmt_index].pbn); free_RW_blk_num++; }
void merge_full_SW(int req_lsn) { int i,h; int s_lsn, s_psn, s_psn1, lpn, valid_flag = 0; int new_pbn,pbn,lbn = -1; sect_t lsns[SECT_NUM_PER_PAGE]; merge_full_num++; PMT[0] = global_SW_blk.logblk; pbn = global_SW_blk.data_blk; new_pbn = nand_get_free_blk(0,1); for(i = 0; i<total_blk_num; i++){ if( BMT[i] == pbn){ lbn = i; break; } } BMT[lbn] = new_pbn; ASSERT( lbn != -1); for( i =0 ; i < PAGE_NUM_PER_BLK; i++) { if(PMT[0].lpn_status[i] == -1){ // -1: invalid, 0: free, 1: valid ASSERT(0); continue; } else if(PMT[0].lpn_status[i] == 1) { lpn = (lbn * PAGE_NUM_PER_BLK) + i; s_lsn = lpn * SECT_NUM_PER_PAGE; s_psn = SECTOR(new_pbn,i* SECT_NUM_PER_PAGE); memset (lsns, 0xFF, sizeof (lsns)); for (h = 0; h < SECT_NUM_PER_PAGE; h++) { lsns[h] = s_lsn + h; } if(req_lsn == s_lsn) { nand_page_write(s_psn, lsns, 0, 1); } else{ s_psn1 = (global_SW_blk.logblk.pbn * PAGE_NUM_PER_BLK + i) * SECT_NUM_PER_PAGE; nand_page_read(s_psn1, lsns, 1); // read from log block nand_page_write(s_psn, lsns, 1, 1); } } else { lpn = (lbn * PAGE_NUM_PER_BLK) + i; s_lsn = lpn * SECT_NUM_PER_PAGE; s_psn = SECTOR(new_pbn,i* SECT_NUM_PER_PAGE); s_psn1 = (pbn * PAGE_NUM_PER_BLK + i) * SECT_NUM_PER_PAGE; for (h = 0; h < SECT_NUM_PER_PAGE; h++) { lsns[h] = s_lsn + h; } valid_flag = nand_oob_read(s_psn1); if( valid_flag == 1){ // read from data block nand_page_read(s_psn1,lsns,1); nand_page_write(s_psn,lsns,1,1); } } } nand_erase(pbn); nand_erase(global_SW_blk.logblk.pbn); }
void merge_partial(int log_pbn, int data_pbn, int fpc, int req_lsn) { //1. copy valid pages from data_pbn to log_pbn int i,j,k,h,m; int lpn; int s_psn, s_lsn; int sect_index = 0; int valid_sect_num; int start = PAGE_NUM_PER_BLK - fpc; int invalid_flag,valid_flag; int copy[SECT_NUM_PER_PAGE]; memset(copy, 0xFF, sizeof copy); for(j=0; j < total_blk_num; j++) { if(BMT[j] == data_pbn) { break; } } ASSERT(j != total_blk_num); lpn = j*PAGE_NUM_PER_BLK; for (i = start; i < PAGE_NUM_PER_BLK; i++) { s_lsn = (lpn+i) * SECT_NUM_PER_PAGE; for (m = 0; m < SECT_NUM_PER_PAGE; m++) { copy[m] = s_lsn + m; } valid_flag = nand_oob_read( SECTOR(data_pbn, i * SECT_NUM_PER_PAGE)); if(valid_flag == 1) { if(s_lsn != req_lsn){ valid_sect_num = nand_page_read( SECTOR(data_pbn, i * SECT_NUM_PER_PAGE), copy, 1); nand_page_write(SECTOR(log_pbn, i*SECT_NUM_PER_PAGE), copy, 1, 1); } else{ nand_page_write(SECTOR(log_pbn, i*SECT_NUM_PER_PAGE), copy, 0, 1); } } else if( valid_flag == -1) { invalid_flag = 0; for( j = 0; j < total_log_blk_num && invalid_flag != 1; j++) { for( k = 0; k < PAGE_NUM_PER_BLK;k++) { if(PMT[j].lpn[k] == (lpn+i)) { //invalidate in log block ASSERT(PMT[j].lpn_status[k] = 1); PMT[j].lpn_status[k] = -1; // -1: invalid s_psn = ((PMT[j].pbn * PAGE_NUM_PER_BLK + k) * SECT_NUM_PER_PAGE); s_lsn = (lpn+i) * SECT_NUM_PER_PAGE; // copy the page in log block into new data block valid_sect_num = nand_page_read(s_psn, copy, 1); if(s_lsn != req_lsn){ nand_page_write(SECTOR(log_pbn, i*SECT_NUM_PER_PAGE), copy, 1, 1); } else{ nand_page_write(SECTOR(log_pbn, i*SECT_NUM_PER_PAGE), copy, 0, 1); } // invalidate the page in log block for(h = 0; h<SECT_NUM_PER_PAGE; h++){ nand_invalidate(s_psn + h, s_lsn + h); } nand_stat(OOB_WRITE); invalid_flag = 1; break; } } } } else { if(s_lsn == req_lsn){ nand_page_write(SECTOR(log_pbn, i*SECT_NUM_PER_PAGE), copy, 0, 1); } } } //2. update BMT for(i = 0; i<total_blk_num; i++){ if( BMT[i] == data_pbn ){ BMT[i] = log_pbn; break; } } ASSERT(i != total_blk_num); //3. erase (data_pbn) nand_erase(data_pbn); }
static void evict_mapping(void) { if(cmt[cmt_hand].lpn == INVALID) return; while(1) { if(cmt[cmt_hand].sc == TRUE) { cmt[cmt_hand].sc = FALSE; cmt_hand = (cmt_hand + 1) % CMT_SIZE; } else break; } UINT32 gtd_index; UINT32 victim_lpn, victim_vpn; UINT32 mapping_vpn; UINT32 mapping_bank; victim_vpn = cmt[cmt_hand].vpn; /* * VICTIM : cmt_hand * dirty : 같은 translation page 에 속하는 dirty를 * 같이 업데이트 해 준다 * clean : 그냥 버린다 */ if(IS_CLEAN(victim_vpn)) { return; } //Dirty victim_lpn = cmt[cmt_hand].lpn; gtd_index = victim_lpn / (MAPPINGS_PER_PAGE*NUM_BANKS); mapping_bank = get_num_bank(victim_lpn); mapping_vpn = gtd[mapping_bank][gtd_index]; if(mapping_vpn != INVALID) { map_read++; nand_page_read(mapping_bank, mapping_vpn / PAGES_PER_BLK, mapping_vpn % PAGES_PER_BLK, TRANS_BUF(mapping_bank)); } else { mem_set_dram(TRANS_BUF(mapping_bank), 0, BYTES_PER_PAGE); } int index; for(index = 0; index < CMT_SIZE; index++) { if(get_num_bank(cmt[index].lpn) == mapping_bank) { if((!IS_CLEAN(cmt[index].vpn)) && \ ((cmt[index].lpn / (MAPPINGS_PER_PAGE*NUM_BANKS)) == gtd_index)) { cmt[index].vpn = SET_CLEAN(cmt[index].vpn); write_dram_32(TRANS_BUF(mapping_bank) + \ sizeof(UINT32 ) * ((cmt[index].lpn/NUM_BANKS) % MAPPINGS_PER_PAGE), cmt[index].vpn); } } } mapping_vpn = assign_new_map_write_vpn(mapping_bank); gtd[mapping_bank][gtd_index] = mapping_vpn; map_prog++; nand_page_program(mapping_bank, mapping_vpn / PAGES_PER_BLK, mapping_vpn % PAGES_PER_BLK, TRANS_BUF(mapping_bank)); }
static void write_page(UINT32 const lpn, UINT32 const sect_offset, UINT32 const num_sectors) { write_p++; UINT32 bank, old_vpn, new_vpn; UINT32 vblock, page_num, page_offset, column_cnt; bank = get_num_bank(lpn); // page striping page_offset = sect_offset; column_cnt = num_sectors; new_vpn = assign_new_write_vpn(bank); old_vpn = get_vpn(lpn); if (old_vpn != NULL) { vblock = old_vpn / PAGES_PER_BLK; page_num = old_vpn % PAGES_PER_BLK; if (num_sectors != SECTORS_PER_PAGE) { if ((num_sectors <= 8) && (page_offset != 0)) { // one page async read data_read++; nand_page_read(bank, vblock, page_num, FTL_BUF(bank)); // copy `left hole sectors' into SATA write buffer if (page_offset != 0) { mem_copy(WR_BUF_PTR(g_ftl_write_buf_id), FTL_BUF(bank), page_offset * BYTES_PER_SECTOR); } // copy `right hole sectors' into SATA write buffer if ((page_offset + column_cnt) < SECTORS_PER_PAGE) { UINT32 const rhole_base = (page_offset + column_cnt) * BYTES_PER_SECTOR; mem_copy(WR_BUF_PTR(g_ftl_write_buf_id) + rhole_base, FTL_BUF(bank) + rhole_base, BYTES_PER_PAGE - rhole_base); } } // left/right hole async read operation (two partial page read) else { // read `left hole sectors' if (page_offset != 0) { data_read++; nand_page_ptread(bank, vblock, page_num, 0, page_offset, WR_BUF_PTR(g_ftl_write_buf_id), RETURN_WHEN_DONE); } // read `right hole sectors' if ((page_offset + column_cnt) < SECTORS_PER_PAGE) { data_read++; nand_page_ptread(bank, vblock, page_num, page_offset + column_cnt, SECTORS_PER_PAGE - (page_offset + column_cnt), WR_BUF_PTR(g_ftl_write_buf_id), RETURN_WHEN_DONE); } } } set_vcount(bank, vblock, get_vcount(bank, vblock) - 1); } else if (num_sectors != SECTORS_PER_PAGE) { if(page_offset != 0) mem_set_dram(WR_BUF_PTR(g_ftl_write_buf_id), 0, page_offset * BYTES_PER_SECTOR); if((page_offset + num_sectors) < SECTORS_PER_PAGE) { UINT32 const rhole_base = (page_offset + num_sectors) * BYTES_PER_SECTOR; mem_set_dram(WR_BUF_PTR(g_ftl_write_buf_id) + rhole_base, 0, BYTES_PER_PAGE - rhole_base); } } vblock = new_vpn / PAGES_PER_BLK; page_num = new_vpn % PAGES_PER_BLK; // write new data (make sure that the new data is ready in the write buffer frame) // (c.f FO_B_SATA_W flag in flash.h) data_prog++; nand_page_program_from_host(bank, vblock, page_num); // update metadata set_lpn(bank, page_num, lpn); set_vpn(lpn, new_vpn); set_vcount(bank, vblock, get_vcount(bank, vblock) + 1); }