Exemple #1
0
void flush_smt_piece(UINT32 idx)
{
	UINT32 bank,row,block;

	bank = smt_dram_map[idx] / NUM_BANKS_MAX;
	block = smt_dram_map[idx] % NUM_BANKS_MAX;
	if((smt_bit_map[bank] & (1<<block)) != 0){
		//  smt piece data
		if( g_misc_meta[bank].smt_pieces[block] >= SMT_LIMIT - 1){
			// erase 
			nand_block_erase(bank,g_bad_list[bank][block]);
		}
		//update and flash 
		g_misc_meta[bank].smt_pieces[block] = (g_misc_meta[bank].smt_pieces[block] + SMT_INC_SIZE) % SMT_LIMIT;
		row = (g_misc_meta[bank].smt_pieces[block] * SMT_PIECE_BYTES);
		row = ((row + BYTES_PER_PAGE -1 ) / BYTES_PER_PAGE) + (PAGES_PER_VBLK * g_bad_list[bank][block]);
		// flash map data to nand
		SETREG(FCP_CMD, FC_COL_ROW_IN_PROG);
		SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY);
		SETREG(FCP_DMA_ADDR,SMT_ADDR + (g_smt_victim * SMT_PIECE_BYTES));
		SETREG(FCP_DMA_CNT, SMT_PIECE_BYTES);
		SETREG(FCP_COL,0);
		SETREG(FCP_ROW_L(bank),row);
		SETREG(FCP_ROW_H(bank),row);
		flash_issue_cmd(bank,RETURN_ON_ISSUE);
	}
	smt_dram_bit[bank] ^= ( 1 <<block );
}
Exemple #2
0
void flush_smt_piece(UINT32 idx)
{
	UINT32 bank,row,block;
	UINT32 dest;
	bank = smt_dram_map[idx] / NUM_BANKS_MAX;
	block = smt_dram_map[idx] % NUM_BANKS_MAX;
	if((smt_bit_map[bank] & (1<<block)) != 0){
		//  smt piece data
		if( g_misc_meta[bank].smt_pieces[block] >= SMT_LIMIT - 1){
			// erase 
			nand_block_erase(bank,g_bad_list[bank][block]);
		}
		//update and flash 
		g_misc_meta[bank].smt_pieces[block] = (g_misc_meta[bank].smt_pieces[block] + 1) % SMT_LIMIT;
		row = g_misc_meta[bank].smt_pieces[block] * SMT_INC_SIZE + ( PAGES_PER_VBLK * g_bad_list[bank][block]);
		// flash map data to nand
		SETREG(FCP_CMD, FC_COL_ROW_IN_PROG);
		SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY);
		SETREG(FCP_COL,0);
		SETREG(FCP_ROW_L(bank),row);
		SETREG(FCP_ROW_H(bank),row);
		dest = SMT_ADDR + (idx * SMT_PIECE_BYTES);
		SETREG(FCP_DMA_ADDR,dest);
		SETREG(FCP_DMA_CNT, SMT_PIECE_BYTES);
		while(_BSP_FSM(bank) != BANK_IDLE)
		{
			bank = bank;
		}
		flash_issue_cmd(bank,RETURN_WHEN_DONE);
	}
	smt_piece_map[smt_dram_map[idx]] = (UINT32)-1;
}
Exemple #3
0
static UINT32 assign_new_map_write_vpn(UINT32 const bank)
{
	ASSERT(bank < NUM_BANKS);

	UINT32 write_vpn;
	UINT32 vblock;
	UINT32 new_vblock;

	write_vpn = get_cur_map_write_vpn(bank);
	vblock    = write_vpn / PAGES_PER_BLK;

	if ((write_vpn % PAGES_PER_BLK) == (PAGES_PER_BLK - 1))
	{
		if(vblock == map_blk[bank][0])
		{
			new_vblock = map_blk[bank][1];
		}
		else
		{
			new_vblock = map_blk[bank][0];
		}
		/*
		 * valid한 gtd page들을 새로운 블락에 복사함
		 */
		UINT32 free_offset = 0;
		UINT32 index;
		for(index = 0; index<GTD_SIZE_PER_BANK; index++)
		{
			if(gtd[bank][index] != INVALID)
			{
				nand_page_copyback(bank,
						vblock,
						gtd[bank][index] % PAGES_PER_BLK,
						new_vblock,
						free_offset);
				gtd[bank][index] = new_vblock * PAGES_PER_BLK + free_offset;
				free_offset++;
			}
		}
		/*
		 * erase
		 */
		erase++;
		nand_block_erase(bank, vblock);
		write_vpn = new_vblock*PAGES_PER_BLK + free_offset;
	}
	else
	{
		write_vpn++;
	}
	set_new_map_write_vpn(bank, write_vpn);
	return write_vpn;
}
Exemple #4
0
void flush_smt_piece(UINT32 idx)
{
	UINT32 bank,row,block;
	UINT32 dest;
	UINT32 pblock, i ;
	UINT32 new_row, new_block;

	bank = smt_dram_map[idx] / SMT_BANK_NUM;
	block = smt_dram_map[idx] % SMT_BANK_NUM;
	pblock = block / SMT_BLOCK;
	if((smt_bit_map[bank][block/NUM_BANKS_MAX]  & (1<<(block%NUM_BANKS_MAX))) != 0){
		//update and flash 
		if( g_misc_meta[bank].smt_row[pblock] >= SMT_LIMIT ){
			// erase 
			for(i = 0; i <  (SMT_BANK_NUM + SMT_BLOCK -1) / SMT_BLOCK; i++)
			{
				dest = bank * SMT_BANK_NUM + SMT_BLOCK * pblock + i;
				new_row = smt_pos[dest];
				nand_page_copyback(bank,g_bad_list[bank][pblock], new_row * SMT_INC_SIZE , g_bad_list[bank][SMT_BLOCK], i * SMT_INC_SIZE);
				smt_pos[dest] = i;
			}
			g_misc_meta[bank].smt_row[pblock] = i;
			row = i;
			nand_block_erase(bank,g_bad_list[bank][pblock]);
			new_block = g_bad_list[bank][pblock];
			g_bad_list[bank][pblock] = g_bad_list[bank][SMT_BLOCK];
			g_bad_list[bank][SMT_BLOCK] = new_block;
		}
		else{
			row = g_misc_meta[bank].smt_row[pblock]++;
		}
		smt_pos[smt_dram_map[idx]] = row;
		row = row * SMT_INC_SIZE + ( PAGES_PER_VBLK * g_bad_list[bank][pblock]);

		// flash map data to nand
		SETREG(FCP_CMD, FC_COL_ROW_IN_PROG);
		SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY);
		SETREG(FCP_COL,0);
		SETREG(FCP_ROW_L(bank),row);
		SETREG(FCP_ROW_H(bank),row);
		dest = SMT_ADDR + (idx * SMT_PIECE_BYTES);
		SETREG(FCP_DMA_ADDR,dest);
		SETREG(FCP_DMA_CNT, SMT_PIECE_BYTES);
		//flash_issue_cmd(bank,RETURN_WHEN_DONE);
		flash_issue_cmd(bank,RETURN_ON_ISSUE);
		g_bank_to_wait = bank;
	}
	smt_piece_map[smt_dram_map[idx]] = (UINT32)-1;
}
Exemple #5
0
static void format(void)
{
    UINT32 bank, vblock, vcount_val;

    ASSERT(NUM_MISC_META_SECT > 0);
    ASSERT(NUM_VCOUNT_SECT > 0);

    uart_printf("Total FTL DRAM metadata size: %d KB", DRAM_BYTES_OTHER / 1024);

    uart_printf("VBLKS_PER_BANK: %d", VBLKS_PER_BANK);
    uart_printf("LBLKS_PER_BANK: %d", NUM_LPAGES / PAGES_PER_BLK / NUM_BANKS);
    uart_printf("META_BLKS_PER_BANK: %d", META_BLKS_PER_BANK);

    //----------------------------------------
    // initialize DRAM metadata
    //----------------------------------------
    mem_set_dram(PAGE_MAP_ADDR, NULL, PAGE_MAP_BYTES);
    mem_set_dram(VCOUNT_ADDR, NULL, VCOUNT_BYTES);

    //----------------------------------------
    // erase all blocks except vblock #0
    //----------------------------------------
	for (vblock = MISCBLK_VBN; vblock < VBLKS_PER_BANK; vblock++)
	{
		for (bank = 0; bank < NUM_BANKS; bank++)
		{
            vcount_val = VC_MAX;
            if (is_bad_block(bank, vblock) == FALSE)
			{
				nand_block_erase(bank, vblock);
                vcount_val = 0;
            }
            write_dram_16(VCOUNT_ADDR + ((bank * VBLKS_PER_BANK) + vblock) * sizeof(UINT16),
                          vcount_val);
        }
    }
    //----------------------------------------
    // initialize SRAM metadata
    //----------------------------------------
    init_metadata_sram();

    // flush metadata to NAND
    logging_pmap_table();
    logging_misc_metadata();

    write_format_mark();
	led(1);
    uart_print("format complete");
}
Exemple #6
0
void flush_smt_piece(UINT32 idx)
{
	UINT32 bank,row,block;
	UINT32 dest;
	UINT32 pblock;
	UINT32 i, old_block;
	bank = smt_dram_map[idx] / SMT_NUM;
	block = smt_dram_map[idx] % SMT_NUM;

	pblock = block / ( NUM_BANKS_MAX *2 );

	if((smt_bit_map[bank][block / NUM_BANKS_MAX] & (1<< (block % NUM_BANKS_MAX))) != 0){
		//  smt piece data
		if( g_misc_meta[bank].smt_next_page[pblock] >= SMT_LIMIT - 1){
			// erase 
			for(i = 0 ;i < 16 ;i++){
				nand_page_copyback(bank,
					g_bad_list[bank][pblock],
	g_misc_meta[bank].smt_pieces[i * NUM_BANKS_MAX * 2 + pblock],
					g_smt_free[bank],
					i );
				g_misc_meta[bank].smt_pieces[i * NUM_BANKS_MAX * 2 + pblock] = i;
			}
			nand_block_erase(bank,g_bad_list[bank][pblock]);

			g_misc_meta[bank].smt_next_page[pblock] = 16;
			old_block = g_bad_list[bank][pblock];
			g_bad_list[bank][pblock] = g_smt_free[bank];
			g_smt_free[bank] = old_block;
		}
		//update and flash 
		g_misc_meta[bank].smt_pieces[block] = g_misc_meta[bank].smt_next_page[pblock];
		row = g_misc_meta[bank].smt_pieces[block] * SMT_INC_SIZE + ( PAGES_PER_VBLK * g_bad_list[bank][pblock]);
		// flash map data to nand
		SETREG(FCP_CMD, FC_COL_ROW_IN_PROG);
		SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY);
		SETREG(FCP_COL,0);
		SETREG(FCP_ROW_L(bank),row);
		SETREG(FCP_ROW_H(bank),row);
		dest = SMT_ADDR + (idx * SMT_PIECE_BYTES);
		SETREG(FCP_DMA_ADDR,dest);
		//SETREG(FCP_DMA_CNT, SMT_PIECE_BYTES);
		SETREG(FCP_DMA_CNT, BYTES_PER_PAGE);
		flash_issue_cmd(bank,RETURN_WHEN_DONE);
		g_misc_meta[bank].smt_next_page[pblock]++;
	}
	smt_piece_map[smt_dram_map[idx]] = (UINT32)-1;
}
Exemple #7
0
// logging misc + vcount metadata
static void logging_misc_metadata(void)
{
    UINT32 misc_meta_bytes = NUM_MISC_META_SECT * BYTES_PER_SECTOR; // per bank
    UINT32 vcount_addr     = VCOUNT_ADDR;
    UINT32 vcount_bytes    = NUM_VCOUNT_SECT * BYTES_PER_SECTOR; // per bank
    UINT32 vcount_boundary = VCOUNT_ADDR + VCOUNT_BYTES; // entire vcount data
    UINT32 bank;

    flash_finish();

    for (bank = 0; bank < NUM_BANKS; bank++)
    {
        inc_miscblk_vpn(bank);

        // note: if misc. meta block is full, just erase old block & write offset #0
        if ((get_miscblk_vpn(bank) / PAGES_PER_BLK) != MISCBLK_VBN)
        {
            nand_block_erase(bank, MISCBLK_VBN);
            set_miscblk_vpn(bank, MISCBLK_VBN * PAGES_PER_BLK); // vpn = 128
        }
        // copy misc. metadata to FTL buffer
        mem_copy(FTL_BUF(bank), &g_misc_meta[bank], misc_meta_bytes);

        // copy vcount metadata to FTL buffer
        if (vcount_addr <= vcount_boundary)
        {
            mem_copy(FTL_BUF(bank) + misc_meta_bytes, vcount_addr, vcount_bytes);
            vcount_addr += vcount_bytes;
        }
    }
    // logging the misc. metadata to nand flash
    for (bank = 0; bank < NUM_BANKS; bank++)
    {
        nand_page_ptprogram(bank,
                            get_miscblk_vpn(bank) / PAGES_PER_BLK,
                            get_miscblk_vpn(bank) % PAGES_PER_BLK,
                            0,
                            NUM_MISC_META_SECT + NUM_VCOUNT_SECT,
                            FTL_BUF(bank));
    }
    flash_finish();
}
Exemple #8
0
void logging_misc_meta()
{
	UINT32 bank;
	flash_finish();
	for(bank = 0; bank < NUM_BANKS; bank++)
	{
		g_misc_meta[bank].cur_miscblk_vpn++;
		if(g_misc_meta[bank].cur_miscblk_vpn / PAGES_PER_VBLK != 1 )
		{
			nand_block_erase(bank,1);
			g_misc_meta[bank].cur_miscblk_vpn = PAGES_PER_VBLK;
		}
		mem_copy(FTL_BUF_ADDR , &(g_misc_meta[bank]), sizeof(misc_metadata));
		nand_page_ptprogram(bank, 1,
			g_misc_meta[bank].cur_miscblk_vpn % PAGES_PER_VBLK,
			0,
			((sizeof(misc_metadata) + BYTES_PER_SECTOR -1 ) / BYTES_PER_SECTOR),	
				FTL_BUF_ADDR );
	}
	flash_finish();
}
Exemple #9
0
static void logging_pmap_table(void)
{
    UINT32 pmap_addr  = PAGE_MAP_ADDR;
    UINT32 pmap_bytes = BYTES_PER_PAGE; // per bank
    UINT32 mapblk_vpn;
    UINT32 bank;
    UINT32 pmap_boundary = PAGE_MAP_ADDR + PAGE_MAP_BYTES;
    BOOL32 finished = FALSE;

    for (UINT32 mapblk_lbn = 0; mapblk_lbn < MAPBLKS_PER_BANK; mapblk_lbn++)
    {
        flash_finish();

        for (bank = 0; bank < NUM_BANKS; bank++)
        {
            if (finished)
            {
                break;
            }
            else if (pmap_addr >= pmap_boundary)
            {
                finished = TRUE;
                break;
            }
            else if (pmap_addr + BYTES_PER_PAGE >= pmap_boundary)
            {
                finished = TRUE;
                pmap_bytes = (pmap_boundary - pmap_addr + BYTES_PER_SECTOR - 1) / BYTES_PER_SECTOR * BYTES_PER_SECTOR ;
            }
            inc_mapblk_vpn(bank, mapblk_lbn);

            mapblk_vpn = get_mapblk_vpn(bank, mapblk_lbn);

            // note: if there is no free page, then erase old map block first.
            if ((mapblk_vpn % PAGES_PER_BLK) == 0)
            {
                // erase full map block
                nand_block_erase(bank, (mapblk_vpn - 1) / PAGES_PER_BLK);

                // next vpn of mapblk is offset #0
                set_mapblk_vpn(bank, mapblk_lbn, ((mapblk_vpn - 1) / PAGES_PER_BLK) * PAGES_PER_BLK);
                mapblk_vpn = get_mapblk_vpn(bank, mapblk_lbn);
            }
            // copy the page mapping table to FTL buffer
            mem_copy(FTL_BUF(bank), pmap_addr, pmap_bytes);

            // logging update page mapping table into map_block
            nand_page_ptprogram(bank,
                                mapblk_vpn / PAGES_PER_BLK,
                                mapblk_vpn % PAGES_PER_BLK,
                                0,
                                pmap_bytes / BYTES_PER_SECTOR,
                                FTL_BUF(bank));
            pmap_addr += pmap_bytes;
        }
        if (finished)
        {
            break;
        }
    }
    flash_finish();
}
Exemple #10
0
//------------------------------------------------------------
// if all blocks except one free block are full,
// do garbage collection for making at least one free page
//-------------------------------------------------------------
static void garbage_collection(UINT32 const bank)
{
    ASSERT(bank < NUM_BANKS);
    g_ftl_statistics[bank].gc_cnt++;

    UINT32 src_lpn;
    UINT32 vt_vblock;
    UINT32 free_vpn;
    UINT32 vcount; // valid page count in victim block
    UINT32 src_page;
    UINT32 gc_vblock;

    g_ftl_statistics[bank].gc_cnt++;

    vt_vblock = get_vt_vblock(bank);   // get victim block
    vcount    = get_vcount(bank, vt_vblock);
    gc_vblock = get_gc_vblock(bank);
    free_vpn  = gc_vblock * PAGES_PER_BLK;

/*     uart_printf("garbage_collection bank %d, vblock %d",bank, vt_vblock); */

    ASSERT(vt_vblock != gc_vblock);
    ASSERT(vt_vblock >= META_BLKS_PER_BANK && vt_vblock < VBLKS_PER_BANK);
    ASSERT(vcount < (PAGES_PER_BLK - 1));
    ASSERT(get_vcount(bank, gc_vblock) == VC_MAX);
    ASSERT(!is_bad_block(bank, gc_vblock));

    // 1. load p2l list from last page offset of victim block (4B x PAGES_PER_BLK)
    // fix minor bug
    nand_page_ptread(bank, vt_vblock, PAGES_PER_BLK - 1, 0,
                     ((sizeof(UINT32) * PAGES_PER_BLK + BYTES_PER_SECTOR - 1 ) / BYTES_PER_SECTOR), FTL_BUF(bank), RETURN_WHEN_DONE);
    mem_copy(g_misc_meta[bank].lpn_list_of_cur_vblock, FTL_BUF(bank), sizeof(UINT32) * PAGES_PER_BLK);
    // 2. copy-back all valid pages to free space
    for (src_page = 0; src_page < (PAGES_PER_BLK - 1); src_page++)
    {
        // get lpn of victim block from a read lpn list
        src_lpn = get_lpn(bank, src_page);
        CHECK_VPAGE(get_vpn(src_lpn));

        // determine whether the page is valid or not
        if (get_vpn(src_lpn) !=
            ((vt_vblock * PAGES_PER_BLK) + src_page))
        {
            // invalid page
            continue;
        }
        ASSERT(get_lpn(bank, src_page) != INVALID);
        CHECK_LPAGE(src_lpn);
        // if the page is valid,
        // then do copy-back op. to free space
        nand_page_copyback(bank,
                           vt_vblock,
                           src_page,
                           free_vpn / PAGES_PER_BLK,
                           free_vpn % PAGES_PER_BLK);
        ASSERT((free_vpn / PAGES_PER_BLK) == gc_vblock);
        // update metadata
        set_vpn(src_lpn, free_vpn);
        set_lpn(bank, (free_vpn % PAGES_PER_BLK), src_lpn);

        free_vpn++;
    }
#if OPTION_ENABLE_ASSERT
    if (vcount == 0)
    {
        ASSERT(free_vpn == (gc_vblock * PAGES_PER_BLK));
    }
#endif
    // 3. erase victim block
    nand_block_erase(bank, vt_vblock);
    ASSERT((free_vpn % PAGES_PER_BLK) < (PAGES_PER_BLK - 2));
    ASSERT((free_vpn % PAGES_PER_BLK == vcount));

/*     uart_printf("gc page count : %d", vcount); */

    // 4. update metadata
    set_vcount(bank, vt_vblock, VC_MAX);
    set_vcount(bank, gc_vblock, vcount);
    set_new_write_vpn(bank, free_vpn); // set a free page for new write
    set_gc_vblock(bank, vt_vblock); // next free block (reserve for GC)
    dec_full_blk_cnt(bank); // decrease full block count
    /* uart_print("garbage_collection end"); */
}
void readPageSingleStep(UINT32 bank)
{
    uart_print("readPageSingleStep: bank="); uart_print_int(bank); uart_print(" ");
    uart_print("pageOffset[bank]="); uart_print_int(pageOffset[bank]); uart_print("\r\n");

    if (pageOffset[bank] == UsedPagesPerLogBlk)
    {
        if (nValidChunksInBlk[bank] != nValidChunksFromHeap[bank])
        {
            uart_print_level_1("ERROR: found different number of valid chunks than expected at the end of readPageSingleStep on normal block. GC on bank "); uart_print_level_1_int(bank);
            uart_print_level_1(" victimLbn "); uart_print_level_1_int(victimLbn[bank]); uart_print_level_1("\r\n");
            uart_print_level_1("Found "); uart_print_level_1_int(nValidChunksInBlk[bank]);
            uart_print_level_1(" instead of expected "); uart_print_level_1_int(nValidChunksFromHeap[bank]); uart_print_level_1("\r\n");
            uart_print_level_1("pageOffset: "); uart_print_level_1_int(pageOffset[bank]);
            while(1);
        }
        else
        {
            uart_print("readPageSingleStep: successful GC on normal block in bank "); uart_print_int(bank); uart_print("\r\n");
            //checkNoChunksAreValid(bank, victimLbn[bank]);
        }

        resetValidChunksAndRemove(&heapDataFirstUsage, bank, victimLbn[bank], CHUNKS_PER_LOG_BLK_FIRST_USAGE);
        resetValidChunksAndRemove(&heapDataSecondUsage, bank, victimLbn[bank], CHUNKS_PER_LOG_BLK_SECOND_USAGE);
        resetValidChunksAndRemove(&heapDataCold, bank, victimLbn[bank], CHUNKS_PER_LOG_BLK_SECOND_USAGE);
        nand_block_erase(bank, victimVbn[bank]);
        cleanListPush(&cleanListDataWrite, bank, victimLbn[bank]);
#if MeasureGc
        uart_print_level_2("GCW "); uart_print_level_2_int(bank);
        uart_print_level_2(" "); uart_print_level_2_int(0);
        uart_print_level_2(" "); uart_print_level_2_int(nValidChunksFromHeap[bank]);
        uart_print_level_2("\r\n");
#endif
        gcState[bank]=GcIdle;
        return;
    }

    uart_print("\r\npageOffset[bank]="); uart_print_int(pageOffset[bank]); uart_print("\r\n");

    nValidChunksInPage[bank]=0;
    for(UINT32 chunkOffset=0; chunkOffset<CHUNKS_PER_PAGE; chunkOffset++) validChunks[bank][chunkOffset]=FALSE;

    UINT32 victimLpns[CHUNKS_PER_PAGE];
    mem_copy(victimLpns, VICTIM_LPN_LIST(bank)+(pageOffset[bank]*CHUNKS_PER_PAGE)*CHUNK_ADDR_BYTES, CHUNKS_PER_PAGE * sizeof(UINT32));

    gcOnRecycledPage[bank] = FALSE;

    for(UINT32 chunkOffset=0; chunkOffset<CHUNKS_PER_RECYCLED_PAGE; ++chunkOffset)
    {
        if (victimLpns[chunkOffset] != INVALID && victimLpns[chunkOffset] & ColdLogBufBitFlag)
        {
            gcOnRecycledPage[bank] = TRUE;
        }
        else
        {
            if (gcOnRecycledPage[bank])
            {
                uart_print_level_1("ERROR in readSinglePage: inconsistent lpns in recycled page\r\n");
                while(1);
            }
        }
    }

    if (gcOnRecycledPage[bank])
    {

        UINT32 logChunkAddr = ( (bank*LOG_BLK_PER_BANK*CHUNKS_PER_BLK) + (victimLbn[bank]*CHUNKS_PER_BLK) + (pageOffset[bank]*CHUNKS_PER_PAGE) ) | ColdLogBufBitFlag;

        for(UINT32 chunkOffset=0; chunkOffset<CHUNKS_PER_RECYCLED_PAGE; ++chunkOffset)
        {   // This loops finds valid chunks is the page. Note that chunks in GC Buf won't be considered as they temporarily don't occupy space in Log
            UINT32 victimLpn = victimLpns[chunkOffset];
            if (victimLpn != INVALID)
            {
                UINT32 i = mem_search_equ_dram_4_bytes(ChunksMapTable(victimLpn, 0), CHUNKS_PER_PAGE, logChunkAddr);

                if(i<CHUNKS_PER_PAGE)
                {
                    dataChunkOffsets[bank][chunkOffset]=i;
                    dataLpns[bank][chunkOffset]=victimLpn & ~(ColdLogBufBitFlag);
                    validChunks[bank][chunkOffset]=TRUE;
                    nValidChunksInPage[bank]++;
                    nValidChunksInBlk[bank]++;
                }
            }
            logChunkAddr++;
        }
    }
    else
    {
        UINT32 logChunkAddr = (bank*LOG_BLK_PER_BANK*CHUNKS_PER_BLK) + (victimLbn[bank]*CHUNKS_PER_BLK) + (pageOffset[bank]*CHUNKS_PER_PAGE);

        for(UINT32 chunkOffset=0; chunkOffset<CHUNKS_PER_PAGE; chunkOffset++)
        {   // This loops finds valid chunks is the page. Note that chunks in GC Buf won't be considered as they temporarily don't occupy space in Log
            UINT32 victimLpn = victimLpns[chunkOffset];
            if (victimLpn != INVALID)
            {
                UINT32 i = mem_search_equ_dram_4_bytes(ChunksMapTable(victimLpn, 0), CHUNKS_PER_PAGE, logChunkAddr);

                if(i<CHUNKS_PER_PAGE)
                {
                    dataChunkOffsets[bank][chunkOffset]=i;
                    dataLpns[bank][chunkOffset]=victimLpn;
                    validChunks[bank][chunkOffset]=TRUE;
                    nValidChunksInPage[bank]++;
                    nValidChunksInBlk[bank]++;
                }
            }
            logChunkAddr++;
        }

    }

    if(nValidChunksInPage[bank] > 0)
    {
        uart_print("Current bank is full, copy page to another one\r\n");

        nand_page_ptread(bank, victimVbn[bank], pageOffset[bank], 0, SECTORS_PER_PAGE, GC_BUF(bank), RETURN_ON_ISSUE);

        gcState[bank] = GcWrite;
    }
    else
    {
        pageOffset[bank]++;
    }

}
void initGC(UINT32 bank)
{

#if PrintStats
    uart_print_level_1("CNT ");
    uart_print_level_1_int(bank);
    uart_print_level_1(" ");
    uart_print_level_1_int(cleanListSize(&cleanListDataWrite, bank));
    uart_print_level_1(" ");
    uart_print_level_1_int(heapDataFirstUsage.nElInHeap[bank]);
    uart_print_level_1(" ");
    uart_print_level_1_int(heapDataSecondUsage.nElInHeap[bank]);
    uart_print_level_1(" ");
    uart_print_level_1_int(heapDataCold.nElInHeap[bank]);
    uart_print_level_1("\r\n");
#endif

    nValidChunksInBlk[bank] = 0;

    // note(fabio): this version of the GC cleans only completely used blocks (from heapDataSecondUsage).

    UINT32 validCold = getVictimValidPagesNumber(&heapDataCold, bank);
    UINT32 validSecond = getVictimValidPagesNumber(&heapDataSecondUsage, bank);

    uart_print("Valid cold ");
    uart_print_int(validCold);
    uart_print(" valid second ");
    uart_print_int(validSecond);
    uart_print("\r\n");

    if (validCold < ((validSecond*secondHotFactorNum)/secondHotFactorDen))
    {
        uart_print("GC on cold block\r\n");
        nValidChunksFromHeap[bank] = validCold;
        victimLbn[bank] = getVictim(&heapDataCold, bank);

#if PrintStats
#if MeasureGc
        uart_print_level_1("COLD "); uart_print_level_1_int(bank); uart_print_level_1(" ");
        uart_print_level_1_int(validCold); uart_print_level_1("\r\n");
#endif
#endif
    }
    else
    {
        uart_print("GC on second hot block\r\n");
        nValidChunksFromHeap[bank] = validSecond;
        victimLbn[bank] = getVictim(&heapDataSecondUsage, bank);

#if PrintStats
#if MeasureGc
        uart_print_level_1("SECOND "); uart_print_level_1_int(bank); uart_print_level_1(" ");
        uart_print_level_1_int(validSecond); uart_print_level_1("\r\n");
#endif
#endif
    }

    victimVbn[bank] = get_log_vbn(bank, victimLbn[bank]);

    uart_print("initGC, bank "); uart_print_int(bank);
    uart_print(" victimLbn "); uart_print_int(victimLbn[bank]);
    uart_print(" valid chunks "); uart_print_int(nValidChunksFromHeap[bank]); uart_print("\r\n");

#if PrintStats
    { // print the Hot First Accumulated parameters
        uart_print_level_1("HFMAX ");
        for (int i=0; i<NUM_BANKS; ++i)
        {
            uart_print_level_1_int(hotFirstAccumulated[i]);
            uart_print_level_1(" ");
        }
        uart_print_level_1("\r\n");
    }
#endif

    { // Insert new value at position 0 in adaptive window and shift all others
        for (int i=adaptiveWindowSize-1; i>0; --i)
        {
            adaptiveWindow[bank][i] = adaptiveWindow[bank][i-1];
        }
        adaptiveWindow[bank][0] = nValidChunksFromHeap[bank];
    }

    if (nValidChunksFromHeap[bank] > 0)
    {
        nand_page_ptread(bank, victimVbn[bank], PAGES_PER_BLK - 1, 0, (CHUNK_ADDR_BYTES * CHUNKS_PER_LOG_BLK + BYTES_PER_SECTOR - 1) / BYTES_PER_SECTOR, VICTIM_LPN_LIST(bank), RETURN_WHEN_DONE); // read twice the lpns list size because there might be the recycled lpns list appended
        gcOnRecycledPage[bank]=FALSE;
        pageOffset[bank]=0;
        gcState[bank]=GcRead;
    }

    else
    {
        resetValidChunksAndRemove(&heapDataFirstUsage, bank, victimLbn[bank], CHUNKS_PER_LOG_BLK_FIRST_USAGE);
        resetValidChunksAndRemove(&heapDataSecondUsage, bank, victimLbn[bank], CHUNKS_PER_LOG_BLK_SECOND_USAGE);
        resetValidChunksAndRemove(&heapDataCold, bank, victimLbn[bank], CHUNKS_PER_LOG_BLK_SECOND_USAGE);
        nand_block_erase(bank, victimVbn[bank]);
        cleanListPush(&cleanListDataWrite, bank, victimLbn[bank]);

#if MeasureGc
        uart_print_level_2("GCW "); uart_print_level_2_int(bank);
        uart_print_level_2(" "); uart_print_level_2_int(0);
        uart_print_level_2(" "); uart_print_level_2_int(nValidChunksFromHeap[bank]);
        uart_print_level_2("\r\n");
#endif

        gcState[bank]=GcIdle;
    }

}
Exemple #13
0
static void garbage_collection(UINT32 const bank)
{
	SET_GC;

	gc++;
	//    g_ftl_statistics[bank].gc_cnt++;

	UINT32 src_lpn;
	UINT32 vt_vblock;
	UINT32 free_vpn;
	UINT32 vcount; // valid page count in victim block
	UINT32 src_page;
	UINT32 gc_vblock;

	vt_vblock = get_vt_vblock(bank);   // get victim block
	vcount    = get_vcount(bank, vt_vblock);
	gc_vblock = get_gc_vblock(bank);
	free_vpn  = gc_vblock * PAGES_PER_BLK;

	// 1. load p2l list from last page offset of victim block (4B x PAGES_PER_BLK)
	// fix minor bug
	misc_w++;
	nand_page_ptread(bank, vt_vblock, PAGES_PER_BLK - 1, 0,
			((sizeof(UINT32) * PAGES_PER_BLK + BYTES_PER_SECTOR - 1 ) / BYTES_PER_SECTOR), GC_BUF(bank), RETURN_WHEN_DONE);
	mem_copy(g_misc_meta[bank].lpn_list_of_cur_vblock,
			GC_BUF(bank), sizeof(UINT32) * PAGES_PER_BLK);
	// 2. copy-back all valid pages to free space
	for (src_page = 0; src_page < (PAGES_PER_BLK - 1); src_page++)
	{
		// get lpn of victim block from a read lpn list
		src_lpn = get_lpn(bank, src_page);

		// determine whether the page is valid or not
		if (get_vpn(src_lpn) !=
				((vt_vblock * PAGES_PER_BLK) + src_page))
		{
			// invalid page
			continue;
		}
		// if the page is valid,
		// then do copy-back op. to free space
		gc_prog++;
		nand_page_copyback(bank,
				vt_vblock,
				src_page,
				free_vpn / PAGES_PER_BLK,
				free_vpn % PAGES_PER_BLK);
		// update metadata
		set_vpn(src_lpn, free_vpn);
		set_lpn(bank, (free_vpn % PAGES_PER_BLK), src_lpn);

		free_vpn++;
	}
	// 3. erase victim block
	erase++;
	nand_block_erase(bank, vt_vblock);

	// 4. update metadata
	//set_vcount(bank, vt_vblock, VC_MAX);
	set_vcount(bank, vt_vblock, VC_MAX);
	set_vcount(bank, gc_vblock, vcount);
	set_new_write_vpn(bank, free_vpn); // set a free page for new write
	set_gc_vblock(bank, vt_vblock); // next free block (reserve for GC)
	dec_full_blk_cnt(bank); // decrease full block count
	CLEAR_GC;
}