static UINT32 assign_new_write_vpn(UINT32 const bank) { UINT32 write_vpn; UINT32 vblock; write_vpn = get_cur_write_vpn(bank); vblock = write_vpn / PAGES_PER_BLK; // NOTE: if next new write page's offset is // the last page offset of vblock (i.e. PAGES_PER_BLK - 1), if ((write_vpn % PAGES_PER_BLK) == (PAGES_PER_BLK - 2)) { // then, because of the flash controller limitation // (prohibit accessing a spare area (i.e. OOB)), // thus, we persistenly write a lpn list into last page of vblock. mem_copy(TEMP_BUF(bank), g_misc_meta[bank].lpn_list_of_cur_vblock, sizeof(UINT32) * PAGES_PER_BLK); // fix minor bug misc_w++; nand_page_ptprogram(bank, vblock, PAGES_PER_BLK - 1, 0, ((sizeof(UINT32) * PAGES_PER_BLK + BYTES_PER_SECTOR - 1 ) / BYTES_PER_SECTOR), TEMP_BUF(bank)); mem_set_sram(g_misc_meta[bank].lpn_list_of_cur_vblock, 0x00000000, sizeof(UINT32) * PAGES_PER_BLK); inc_full_blk_cnt(bank); // do garbage collection if necessary if (is_full_all_blks(bank)) { GC: garbage_collection(bank); return get_cur_write_vpn(bank); } do { vblock++; if(vblock == VBLKS_PER_BANK) { uart_printf(" vblock == VBLKS_PER_BANK"); goto GC; } }while (get_vcount(bank, vblock) == VC_MAX); } // write page -> next block if (vblock != (write_vpn / PAGES_PER_BLK)) { write_vpn = vblock * PAGES_PER_BLK; } else { write_vpn++; } set_new_write_vpn(bank, write_vpn); return write_vpn; }
//------------------------------------------------------------- // Victim selection policy: Greedy // // Select the block which contain minumum valid pages //------------------------------------------------------------- static UINT32 get_vt_vblock(UINT32 const bank) { ASSERT(bank < NUM_BANKS); UINT32 vblock; // search the block which has mininum valid pages vblock = mem_search_min_max(VCOUNT_ADDR + (bank * VBLKS_PER_BANK * sizeof(UINT16)), sizeof(UINT16), VBLKS_PER_BANK, MU_CMD_SEARCH_MIN_DRAM); ASSERT(is_bad_block(bank, vblock) == FALSE); ASSERT(vblock >= META_BLKS_PER_BANK && vblock < VBLKS_PER_BANK); ASSERT(get_vcount(bank, vblock) < (PAGES_PER_BLK - 1)); return vblock; }
//------------------------------------------------------------ // if all blocks except one free block are full, // do garbage collection for making at least one free page //------------------------------------------------------------- static void garbage_collection(UINT32 const bank) { ASSERT(bank < NUM_BANKS); g_ftl_statistics[bank].gc_cnt++; UINT32 src_lpn; UINT32 vt_vblock; UINT32 free_vpn; UINT32 vcount; // valid page count in victim block UINT32 src_page; UINT32 gc_vblock; g_ftl_statistics[bank].gc_cnt++; vt_vblock = get_vt_vblock(bank); // get victim block vcount = get_vcount(bank, vt_vblock); gc_vblock = get_gc_vblock(bank); free_vpn = gc_vblock * PAGES_PER_BLK; /* uart_printf("garbage_collection bank %d, vblock %d",bank, vt_vblock); */ ASSERT(vt_vblock != gc_vblock); ASSERT(vt_vblock >= META_BLKS_PER_BANK && vt_vblock < VBLKS_PER_BANK); ASSERT(vcount < (PAGES_PER_BLK - 1)); ASSERT(get_vcount(bank, gc_vblock) == VC_MAX); ASSERT(!is_bad_block(bank, gc_vblock)); // 1. load p2l list from last page offset of victim block (4B x PAGES_PER_BLK) // fix minor bug nand_page_ptread(bank, vt_vblock, PAGES_PER_BLK - 1, 0, ((sizeof(UINT32) * PAGES_PER_BLK + BYTES_PER_SECTOR - 1 ) / BYTES_PER_SECTOR), FTL_BUF(bank), RETURN_WHEN_DONE); mem_copy(g_misc_meta[bank].lpn_list_of_cur_vblock, FTL_BUF(bank), sizeof(UINT32) * PAGES_PER_BLK); // 2. copy-back all valid pages to free space for (src_page = 0; src_page < (PAGES_PER_BLK - 1); src_page++) { // get lpn of victim block from a read lpn list src_lpn = get_lpn(bank, src_page); CHECK_VPAGE(get_vpn(src_lpn)); // determine whether the page is valid or not if (get_vpn(src_lpn) != ((vt_vblock * PAGES_PER_BLK) + src_page)) { // invalid page continue; } ASSERT(get_lpn(bank, src_page) != INVALID); CHECK_LPAGE(src_lpn); // if the page is valid, // then do copy-back op. to free space nand_page_copyback(bank, vt_vblock, src_page, free_vpn / PAGES_PER_BLK, free_vpn % PAGES_PER_BLK); ASSERT((free_vpn / PAGES_PER_BLK) == gc_vblock); // update metadata set_vpn(src_lpn, free_vpn); set_lpn(bank, (free_vpn % PAGES_PER_BLK), src_lpn); free_vpn++; } #if OPTION_ENABLE_ASSERT if (vcount == 0) { ASSERT(free_vpn == (gc_vblock * PAGES_PER_BLK)); } #endif // 3. erase victim block nand_block_erase(bank, vt_vblock); ASSERT((free_vpn % PAGES_PER_BLK) < (PAGES_PER_BLK - 2)); ASSERT((free_vpn % PAGES_PER_BLK == vcount)); /* uart_printf("gc page count : %d", vcount); */ // 4. update metadata set_vcount(bank, vt_vblock, VC_MAX); set_vcount(bank, gc_vblock, vcount); set_new_write_vpn(bank, free_vpn); // set a free page for new write set_gc_vblock(bank, vt_vblock); // next free block (reserve for GC) dec_full_blk_cnt(bank); // decrease full block count /* uart_print("garbage_collection end"); */ }
static void write_page(UINT32 const lpn, UINT32 const sect_offset, UINT32 const num_sectors) { CHECK_LPAGE(lpn); ASSERT(sect_offset < SECTORS_PER_PAGE); ASSERT(num_sectors > 0 && num_sectors <= SECTORS_PER_PAGE); UINT32 bank, old_vpn, new_vpn; UINT32 vblock, page_num, page_offset, column_cnt; bank = get_num_bank(lpn); // page striping page_offset = sect_offset; column_cnt = num_sectors; new_vpn = assign_new_write_vpn(bank); old_vpn = get_vpn(lpn); CHECK_VPAGE (old_vpn); CHECK_VPAGE (new_vpn); ASSERT(old_vpn != new_vpn); g_ftl_statistics[bank].page_wcount++; // if old data already exist, if (old_vpn != NULL) { vblock = old_vpn / PAGES_PER_BLK; page_num = old_vpn % PAGES_PER_BLK; //-------------------------------------------------------------------------------------- // `Partial programming' // we could not determine whether the new data is loaded in the SATA write buffer. // Thus, read the left/right hole sectors of a valid page and copy into the write buffer. // And then, program whole valid data //-------------------------------------------------------------------------------------- if (num_sectors != SECTORS_PER_PAGE) { // Performance optimization (but, not proved) // To reduce flash memory access, valid hole copy into SATA write buffer after reading whole page // Thus, in this case, we need just one full page read + one or two mem_copy if ((num_sectors <= 8) && (page_offset != 0)) { // one page async read nand_page_read(bank, vblock, page_num, FTL_BUF(bank)); // copy `left hole sectors' into SATA write buffer if (page_offset != 0) { mem_copy(WR_BUF_PTR(g_ftl_write_buf_id), FTL_BUF(bank), page_offset * BYTES_PER_SECTOR); } // copy `right hole sectors' into SATA write buffer if ((page_offset + column_cnt) < SECTORS_PER_PAGE) { UINT32 const rhole_base = (page_offset + column_cnt) * BYTES_PER_SECTOR; mem_copy(WR_BUF_PTR(g_ftl_write_buf_id) + rhole_base, FTL_BUF(bank) + rhole_base, BYTES_PER_PAGE - rhole_base); } } // left/right hole async read operation (two partial page read) else { // read `left hole sectors' if (page_offset != 0) { nand_page_ptread(bank, vblock, page_num, 0, page_offset, WR_BUF_PTR(g_ftl_write_buf_id), RETURN_ON_ISSUE); } // read `right hole sectors' if ((page_offset + column_cnt) < SECTORS_PER_PAGE) { nand_page_ptread(bank, vblock, page_num, page_offset + column_cnt, SECTORS_PER_PAGE - (page_offset + column_cnt), WR_BUF_PTR(g_ftl_write_buf_id), RETURN_ON_ISSUE); } } } // full page write page_offset = 0; column_cnt = SECTORS_PER_PAGE; // invalid old page (decrease vcount) set_vcount(bank, vblock, get_vcount(bank, vblock) - 1); } vblock = new_vpn / PAGES_PER_BLK; page_num = new_vpn % PAGES_PER_BLK; ASSERT(get_vcount(bank,vblock) < (PAGES_PER_BLK - 1)); // write new data (make sure that the new data is ready in the write buffer frame) // (c.f FO_B_SATA_W flag in flash.h) nand_page_ptprogram_from_host(bank, vblock, page_num, page_offset, column_cnt); // update metadata set_lpn(bank, page_num, lpn); set_vpn(lpn, new_vpn); set_vcount(bank, vblock, get_vcount(bank, vblock) + 1); }
static void garbage_collection(UINT32 const bank) { SET_GC; gc++; // g_ftl_statistics[bank].gc_cnt++; UINT32 src_lpn; UINT32 vt_vblock; UINT32 free_vpn; UINT32 vcount; // valid page count in victim block UINT32 src_page; UINT32 gc_vblock; vt_vblock = get_vt_vblock(bank); // get victim block vcount = get_vcount(bank, vt_vblock); gc_vblock = get_gc_vblock(bank); free_vpn = gc_vblock * PAGES_PER_BLK; // 1. load p2l list from last page offset of victim block (4B x PAGES_PER_BLK) // fix minor bug misc_w++; nand_page_ptread(bank, vt_vblock, PAGES_PER_BLK - 1, 0, ((sizeof(UINT32) * PAGES_PER_BLK + BYTES_PER_SECTOR - 1 ) / BYTES_PER_SECTOR), GC_BUF(bank), RETURN_WHEN_DONE); mem_copy(g_misc_meta[bank].lpn_list_of_cur_vblock, GC_BUF(bank), sizeof(UINT32) * PAGES_PER_BLK); // 2. copy-back all valid pages to free space for (src_page = 0; src_page < (PAGES_PER_BLK - 1); src_page++) { // get lpn of victim block from a read lpn list src_lpn = get_lpn(bank, src_page); // determine whether the page is valid or not if (get_vpn(src_lpn) != ((vt_vblock * PAGES_PER_BLK) + src_page)) { // invalid page continue; } // if the page is valid, // then do copy-back op. to free space gc_prog++; nand_page_copyback(bank, vt_vblock, src_page, free_vpn / PAGES_PER_BLK, free_vpn % PAGES_PER_BLK); // update metadata set_vpn(src_lpn, free_vpn); set_lpn(bank, (free_vpn % PAGES_PER_BLK), src_lpn); free_vpn++; } // 3. erase victim block erase++; nand_block_erase(bank, vt_vblock); // 4. update metadata //set_vcount(bank, vt_vblock, VC_MAX); set_vcount(bank, vt_vblock, VC_MAX); set_vcount(bank, gc_vblock, vcount); set_new_write_vpn(bank, free_vpn); // set a free page for new write set_gc_vblock(bank, vt_vblock); // next free block (reserve for GC) dec_full_blk_cnt(bank); // decrease full block count CLEAR_GC; }
static void write_page(UINT32 const lpn, UINT32 const sect_offset, UINT32 const num_sectors) { write_p++; UINT32 bank, old_vpn, new_vpn; UINT32 vblock, page_num, page_offset, column_cnt; bank = get_num_bank(lpn); // page striping page_offset = sect_offset; column_cnt = num_sectors; new_vpn = assign_new_write_vpn(bank); old_vpn = get_vpn(lpn); if (old_vpn != NULL) { vblock = old_vpn / PAGES_PER_BLK; page_num = old_vpn % PAGES_PER_BLK; if (num_sectors != SECTORS_PER_PAGE) { if ((num_sectors <= 8) && (page_offset != 0)) { // one page async read data_read++; nand_page_read(bank, vblock, page_num, FTL_BUF(bank)); // copy `left hole sectors' into SATA write buffer if (page_offset != 0) { mem_copy(WR_BUF_PTR(g_ftl_write_buf_id), FTL_BUF(bank), page_offset * BYTES_PER_SECTOR); } // copy `right hole sectors' into SATA write buffer if ((page_offset + column_cnt) < SECTORS_PER_PAGE) { UINT32 const rhole_base = (page_offset + column_cnt) * BYTES_PER_SECTOR; mem_copy(WR_BUF_PTR(g_ftl_write_buf_id) + rhole_base, FTL_BUF(bank) + rhole_base, BYTES_PER_PAGE - rhole_base); } } // left/right hole async read operation (two partial page read) else { // read `left hole sectors' if (page_offset != 0) { data_read++; nand_page_ptread(bank, vblock, page_num, 0, page_offset, WR_BUF_PTR(g_ftl_write_buf_id), RETURN_WHEN_DONE); } // read `right hole sectors' if ((page_offset + column_cnt) < SECTORS_PER_PAGE) { data_read++; nand_page_ptread(bank, vblock, page_num, page_offset + column_cnt, SECTORS_PER_PAGE - (page_offset + column_cnt), WR_BUF_PTR(g_ftl_write_buf_id), RETURN_WHEN_DONE); } } } set_vcount(bank, vblock, get_vcount(bank, vblock) - 1); } else if (num_sectors != SECTORS_PER_PAGE) { if(page_offset != 0) mem_set_dram(WR_BUF_PTR(g_ftl_write_buf_id), 0, page_offset * BYTES_PER_SECTOR); if((page_offset + num_sectors) < SECTORS_PER_PAGE) { UINT32 const rhole_base = (page_offset + num_sectors) * BYTES_PER_SECTOR; mem_set_dram(WR_BUF_PTR(g_ftl_write_buf_id) + rhole_base, 0, BYTES_PER_PAGE - rhole_base); } } vblock = new_vpn / PAGES_PER_BLK; page_num = new_vpn % PAGES_PER_BLK; // write new data (make sure that the new data is ready in the write buffer frame) // (c.f FO_B_SATA_W flag in flash.h) data_prog++; nand_page_program_from_host(bank, vblock, page_num); // update metadata set_lpn(bank, page_num, lpn); set_vpn(lpn, new_vpn); set_vcount(bank, vblock, get_vcount(bank, vblock) + 1); }
void main() { int x = 0; int y = 0; int i, j; int ply_x = 120; int ply_y = 92; int ply_tile = 'H'; int ply2_x = 120; int ply2_y = 100; int ply2_tile = 'H'; int raster[10], speeds[10], *p, *p2; char *c; for (y = 2; y < 22; y += 2) { for (x = 0; x != 32; x += 2) { set_bkg_map(road_pattern, x, y, 2, 2); } for (x = 0; x != 32; x += 4) { if (y < 12) { set_bkg_map(car1_map_r, x, y+1, 2, 1); } else { set_bkg_map(car1_map_l, x, y+1, 2, 1); } } } for (x = 0; x != 32; x ++) { set_bkg_map(top_sidewalk_pattern, x, 0, 1, 3); set_bkg_map(bottom_sidewalk_pattern, x, 22, 1, 3); set_bkg_map(central_strip_pattern, x, 12, 1, 1); } for (i = 0, p2 = speeds; i != 5; i++, p2++) { (*p2) = rand_speed(); } for (i = 0; i != 5; i++, p2++) { (*p2) = -rand_speed(); } set_vdp_reg(VDP_REG_FLAGS1, VDP_REG_FLAGS1_SCREEN); load_tiles(chicken_graphics, 0, 255, 4); load_palette(pal1, 0, 16); load_palette(pal2, 16, 16); // add_pause_int(pause_handler); for (;;) { p = raster; p2 = speeds; j = 0; x = 1; y = 16; while (get_vcount() != 0) { } x = (*p) >> 4; while (y < 176) { while (get_vcount() < y) { } if (j & 0x01) { load_palette(pal1, 0, 16); } else { load_palette(pal2, 0, 16); } scroll_bkg(x, y); (*p) += (*p2); p++; p2++; x = (*p) >> 4; y += 14; j++; while (get_vcount() < y) { } scroll_bkg(0, 0); y += 2; } load_palette(pal1, 0, 16); scroll_bkg(0, 0); wait_vblank_noint(); } }