Esempio n. 1
0
static UINT32 assign_new_map_write_vpn(UINT32 const bank)
{
	ASSERT(bank < NUM_BANKS);

	UINT32 write_vpn;
	UINT32 vblock;
	UINT32 new_vblock;

	write_vpn = get_cur_map_write_vpn(bank);
	vblock    = write_vpn / PAGES_PER_BLK;

	if ((write_vpn % PAGES_PER_BLK) == (PAGES_PER_BLK - 1))
	{
		if(vblock == map_blk[bank][0])
		{
			new_vblock = map_blk[bank][1];
		}
		else
		{
			new_vblock = map_blk[bank][0];
		}
		/*
		 * valid한 gtd page들을 새로운 블락에 복사함
		 */
		UINT32 free_offset = 0;
		UINT32 index;
		for(index = 0; index<GTD_SIZE_PER_BANK; index++)
		{
			if(gtd[bank][index] != INVALID)
			{
				nand_page_copyback(bank,
						vblock,
						gtd[bank][index] % PAGES_PER_BLK,
						new_vblock,
						free_offset);
				gtd[bank][index] = new_vblock * PAGES_PER_BLK + free_offset;
				free_offset++;
			}
		}
		/*
		 * erase
		 */
		erase++;
		nand_block_erase(bank, vblock);
		write_vpn = new_vblock*PAGES_PER_BLK + free_offset;
	}
	else
	{
		write_vpn++;
	}
	set_new_map_write_vpn(bank, write_vpn);
	return write_vpn;
}
Esempio n. 2
0
void flush_smt_piece(UINT32 idx)
{
	UINT32 bank,row,block;
	UINT32 dest;
	UINT32 pblock, i ;
	UINT32 new_row, new_block;

	bank = smt_dram_map[idx] / SMT_BANK_NUM;
	block = smt_dram_map[idx] % SMT_BANK_NUM;
	pblock = block / SMT_BLOCK;
	if((smt_bit_map[bank][block/NUM_BANKS_MAX]  & (1<<(block%NUM_BANKS_MAX))) != 0){
		//update and flash 
		if( g_misc_meta[bank].smt_row[pblock] >= SMT_LIMIT ){
			// erase 
			for(i = 0; i <  (SMT_BANK_NUM + SMT_BLOCK -1) / SMT_BLOCK; i++)
			{
				dest = bank * SMT_BANK_NUM + SMT_BLOCK * pblock + i;
				new_row = smt_pos[dest];
				nand_page_copyback(bank,g_bad_list[bank][pblock], new_row * SMT_INC_SIZE , g_bad_list[bank][SMT_BLOCK], i * SMT_INC_SIZE);
				smt_pos[dest] = i;
			}
			g_misc_meta[bank].smt_row[pblock] = i;
			row = i;
			nand_block_erase(bank,g_bad_list[bank][pblock]);
			new_block = g_bad_list[bank][pblock];
			g_bad_list[bank][pblock] = g_bad_list[bank][SMT_BLOCK];
			g_bad_list[bank][SMT_BLOCK] = new_block;
		}
		else{
			row = g_misc_meta[bank].smt_row[pblock]++;
		}
		smt_pos[smt_dram_map[idx]] = row;
		row = row * SMT_INC_SIZE + ( PAGES_PER_VBLK * g_bad_list[bank][pblock]);

		// flash map data to nand
		SETREG(FCP_CMD, FC_COL_ROW_IN_PROG);
		SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY);
		SETREG(FCP_COL,0);
		SETREG(FCP_ROW_L(bank),row);
		SETREG(FCP_ROW_H(bank),row);
		dest = SMT_ADDR + (idx * SMT_PIECE_BYTES);
		SETREG(FCP_DMA_ADDR,dest);
		SETREG(FCP_DMA_CNT, SMT_PIECE_BYTES);
		//flash_issue_cmd(bank,RETURN_WHEN_DONE);
		flash_issue_cmd(bank,RETURN_ON_ISSUE);
		g_bank_to_wait = bank;
	}
	smt_piece_map[smt_dram_map[idx]] = (UINT32)-1;
}
Esempio n. 3
0
void flush_smt_piece(UINT32 idx)
{
	UINT32 bank,row,block;
	UINT32 dest;
	UINT32 pblock;
	UINT32 i, old_block;
	bank = smt_dram_map[idx] / SMT_NUM;
	block = smt_dram_map[idx] % SMT_NUM;

	pblock = block / ( NUM_BANKS_MAX *2 );

	if((smt_bit_map[bank][block / NUM_BANKS_MAX] & (1<< (block % NUM_BANKS_MAX))) != 0){
		//  smt piece data
		if( g_misc_meta[bank].smt_next_page[pblock] >= SMT_LIMIT - 1){
			// erase 
			for(i = 0 ;i < 16 ;i++){
				nand_page_copyback(bank,
					g_bad_list[bank][pblock],
	g_misc_meta[bank].smt_pieces[i * NUM_BANKS_MAX * 2 + pblock],
					g_smt_free[bank],
					i );
				g_misc_meta[bank].smt_pieces[i * NUM_BANKS_MAX * 2 + pblock] = i;
			}
			nand_block_erase(bank,g_bad_list[bank][pblock]);

			g_misc_meta[bank].smt_next_page[pblock] = 16;
			old_block = g_bad_list[bank][pblock];
			g_bad_list[bank][pblock] = g_smt_free[bank];
			g_smt_free[bank] = old_block;
		}
		//update and flash 
		g_misc_meta[bank].smt_pieces[block] = g_misc_meta[bank].smt_next_page[pblock];
		row = g_misc_meta[bank].smt_pieces[block] * SMT_INC_SIZE + ( PAGES_PER_VBLK * g_bad_list[bank][pblock]);
		// flash map data to nand
		SETREG(FCP_CMD, FC_COL_ROW_IN_PROG);
		SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY);
		SETREG(FCP_COL,0);
		SETREG(FCP_ROW_L(bank),row);
		SETREG(FCP_ROW_H(bank),row);
		dest = SMT_ADDR + (idx * SMT_PIECE_BYTES);
		SETREG(FCP_DMA_ADDR,dest);
		//SETREG(FCP_DMA_CNT, SMT_PIECE_BYTES);
		SETREG(FCP_DMA_CNT, BYTES_PER_PAGE);
		flash_issue_cmd(bank,RETURN_WHEN_DONE);
		g_misc_meta[bank].smt_next_page[pblock]++;
	}
	smt_piece_map[smt_dram_map[idx]] = (UINT32)-1;
}
Esempio n. 4
0
//------------------------------------------------------------
// if all blocks except one free block are full,
// do garbage collection for making at least one free page
//-------------------------------------------------------------
static void garbage_collection(UINT32 const bank)
{
    ASSERT(bank < NUM_BANKS);
    g_ftl_statistics[bank].gc_cnt++;

    UINT32 src_lpn;
    UINT32 vt_vblock;
    UINT32 free_vpn;
    UINT32 vcount; // valid page count in victim block
    UINT32 src_page;
    UINT32 gc_vblock;

    g_ftl_statistics[bank].gc_cnt++;

    vt_vblock = get_vt_vblock(bank);   // get victim block
    vcount    = get_vcount(bank, vt_vblock);
    gc_vblock = get_gc_vblock(bank);
    free_vpn  = gc_vblock * PAGES_PER_BLK;

/*     uart_printf("garbage_collection bank %d, vblock %d",bank, vt_vblock); */

    ASSERT(vt_vblock != gc_vblock);
    ASSERT(vt_vblock >= META_BLKS_PER_BANK && vt_vblock < VBLKS_PER_BANK);
    ASSERT(vcount < (PAGES_PER_BLK - 1));
    ASSERT(get_vcount(bank, gc_vblock) == VC_MAX);
    ASSERT(!is_bad_block(bank, gc_vblock));

    // 1. load p2l list from last page offset of victim block (4B x PAGES_PER_BLK)
    // fix minor bug
    nand_page_ptread(bank, vt_vblock, PAGES_PER_BLK - 1, 0,
                     ((sizeof(UINT32) * PAGES_PER_BLK + BYTES_PER_SECTOR - 1 ) / BYTES_PER_SECTOR), FTL_BUF(bank), RETURN_WHEN_DONE);
    mem_copy(g_misc_meta[bank].lpn_list_of_cur_vblock, FTL_BUF(bank), sizeof(UINT32) * PAGES_PER_BLK);
    // 2. copy-back all valid pages to free space
    for (src_page = 0; src_page < (PAGES_PER_BLK - 1); src_page++)
    {
        // get lpn of victim block from a read lpn list
        src_lpn = get_lpn(bank, src_page);
        CHECK_VPAGE(get_vpn(src_lpn));

        // determine whether the page is valid or not
        if (get_vpn(src_lpn) !=
            ((vt_vblock * PAGES_PER_BLK) + src_page))
        {
            // invalid page
            continue;
        }
        ASSERT(get_lpn(bank, src_page) != INVALID);
        CHECK_LPAGE(src_lpn);
        // if the page is valid,
        // then do copy-back op. to free space
        nand_page_copyback(bank,
                           vt_vblock,
                           src_page,
                           free_vpn / PAGES_PER_BLK,
                           free_vpn % PAGES_PER_BLK);
        ASSERT((free_vpn / PAGES_PER_BLK) == gc_vblock);
        // update metadata
        set_vpn(src_lpn, free_vpn);
        set_lpn(bank, (free_vpn % PAGES_PER_BLK), src_lpn);

        free_vpn++;
    }
#if OPTION_ENABLE_ASSERT
    if (vcount == 0)
    {
        ASSERT(free_vpn == (gc_vblock * PAGES_PER_BLK));
    }
#endif
    // 3. erase victim block
    nand_block_erase(bank, vt_vblock);
    ASSERT((free_vpn % PAGES_PER_BLK) < (PAGES_PER_BLK - 2));
    ASSERT((free_vpn % PAGES_PER_BLK == vcount));

/*     uart_printf("gc page count : %d", vcount); */

    // 4. update metadata
    set_vcount(bank, vt_vblock, VC_MAX);
    set_vcount(bank, gc_vblock, vcount);
    set_new_write_vpn(bank, free_vpn); // set a free page for new write
    set_gc_vblock(bank, vt_vblock); // next free block (reserve for GC)
    dec_full_blk_cnt(bank); // decrease full block count
    /* uart_print("garbage_collection end"); */
}
Esempio n. 5
0
static void garbage_collection(UINT32 const bank)
{
	SET_GC;

	gc++;
	//    g_ftl_statistics[bank].gc_cnt++;

	UINT32 src_lpn;
	UINT32 vt_vblock;
	UINT32 free_vpn;
	UINT32 vcount; // valid page count in victim block
	UINT32 src_page;
	UINT32 gc_vblock;

	vt_vblock = get_vt_vblock(bank);   // get victim block
	vcount    = get_vcount(bank, vt_vblock);
	gc_vblock = get_gc_vblock(bank);
	free_vpn  = gc_vblock * PAGES_PER_BLK;

	// 1. load p2l list from last page offset of victim block (4B x PAGES_PER_BLK)
	// fix minor bug
	misc_w++;
	nand_page_ptread(bank, vt_vblock, PAGES_PER_BLK - 1, 0,
			((sizeof(UINT32) * PAGES_PER_BLK + BYTES_PER_SECTOR - 1 ) / BYTES_PER_SECTOR), GC_BUF(bank), RETURN_WHEN_DONE);
	mem_copy(g_misc_meta[bank].lpn_list_of_cur_vblock,
			GC_BUF(bank), sizeof(UINT32) * PAGES_PER_BLK);
	// 2. copy-back all valid pages to free space
	for (src_page = 0; src_page < (PAGES_PER_BLK - 1); src_page++)
	{
		// get lpn of victim block from a read lpn list
		src_lpn = get_lpn(bank, src_page);

		// determine whether the page is valid or not
		if (get_vpn(src_lpn) !=
				((vt_vblock * PAGES_PER_BLK) + src_page))
		{
			// invalid page
			continue;
		}
		// if the page is valid,
		// then do copy-back op. to free space
		gc_prog++;
		nand_page_copyback(bank,
				vt_vblock,
				src_page,
				free_vpn / PAGES_PER_BLK,
				free_vpn % PAGES_PER_BLK);
		// update metadata
		set_vpn(src_lpn, free_vpn);
		set_lpn(bank, (free_vpn % PAGES_PER_BLK), src_lpn);

		free_vpn++;
	}
	// 3. erase victim block
	erase++;
	nand_block_erase(bank, vt_vblock);

	// 4. update metadata
	//set_vcount(bank, vt_vblock, VC_MAX);
	set_vcount(bank, vt_vblock, VC_MAX);
	set_vcount(bank, gc_vblock, vcount);
	set_new_write_vpn(bank, free_vpn); // set a free page for new write
	set_gc_vblock(bank, vt_vblock); // next free block (reserve for GC)
	dec_full_blk_cnt(bank); // decrease full block count
	CLEAR_GC;
}