void readPageSingleStep(UINT32 bank)
{
    uart_print("readPageSingleStep: bank="); uart_print_int(bank); uart_print(" ");
    uart_print("pageOffset[bank]="); uart_print_int(pageOffset[bank]); uart_print("\r\n");

    if (pageOffset[bank] == UsedPagesPerLogBlk)
    {
        if (nValidChunksInBlk[bank] != nValidChunksFromHeap[bank])
        {
            uart_print_level_1("ERROR: found different number of valid chunks than expected at the end of readPageSingleStep on normal block. GC on bank "); uart_print_level_1_int(bank);
            uart_print_level_1(" victimLbn "); uart_print_level_1_int(victimLbn[bank]); uart_print_level_1("\r\n");
            uart_print_level_1("Found "); uart_print_level_1_int(nValidChunksInBlk[bank]);
            uart_print_level_1(" instead of expected "); uart_print_level_1_int(nValidChunksFromHeap[bank]); uart_print_level_1("\r\n");
            uart_print_level_1("pageOffset: "); uart_print_level_1_int(pageOffset[bank]);
            while(1);
        }
        else
        {
            uart_print("readPageSingleStep: successful GC on normal block in bank "); uart_print_int(bank); uart_print("\r\n");
            //checkNoChunksAreValid(bank, victimLbn[bank]);
        }

        resetValidChunksAndRemove(&heapDataFirstUsage, bank, victimLbn[bank], CHUNKS_PER_LOG_BLK_FIRST_USAGE);
        resetValidChunksAndRemove(&heapDataSecondUsage, bank, victimLbn[bank], CHUNKS_PER_LOG_BLK_SECOND_USAGE);
        resetValidChunksAndRemove(&heapDataCold, bank, victimLbn[bank], CHUNKS_PER_LOG_BLK_SECOND_USAGE);
        nand_block_erase(bank, victimVbn[bank]);
        cleanListPush(&cleanListDataWrite, bank, victimLbn[bank]);
#if MeasureGc
        uart_print_level_2("GCW "); uart_print_level_2_int(bank);
        uart_print_level_2(" "); uart_print_level_2_int(0);
        uart_print_level_2(" "); uart_print_level_2_int(nValidChunksFromHeap[bank]);
        uart_print_level_2("\r\n");
#endif
        gcState[bank]=GcIdle;
        return;
    }

    uart_print("\r\npageOffset[bank]="); uart_print_int(pageOffset[bank]); uart_print("\r\n");

    nValidChunksInPage[bank]=0;
    for(UINT32 chunkOffset=0; chunkOffset<CHUNKS_PER_PAGE; chunkOffset++) validChunks[bank][chunkOffset]=FALSE;

    UINT32 victimLpns[CHUNKS_PER_PAGE];
    mem_copy(victimLpns, VICTIM_LPN_LIST(bank)+(pageOffset[bank]*CHUNKS_PER_PAGE)*CHUNK_ADDR_BYTES, CHUNKS_PER_PAGE * sizeof(UINT32));

    gcOnRecycledPage[bank] = FALSE;

    for(UINT32 chunkOffset=0; chunkOffset<CHUNKS_PER_RECYCLED_PAGE; ++chunkOffset)
    {
        if (victimLpns[chunkOffset] != INVALID && victimLpns[chunkOffset] & ColdLogBufBitFlag)
        {
            gcOnRecycledPage[bank] = TRUE;
        }
        else
        {
            if (gcOnRecycledPage[bank])
            {
                uart_print_level_1("ERROR in readSinglePage: inconsistent lpns in recycled page\r\n");
                while(1);
            }
        }
    }

    if (gcOnRecycledPage[bank])
    {

        UINT32 logChunkAddr = ( (bank*LOG_BLK_PER_BANK*CHUNKS_PER_BLK) + (victimLbn[bank]*CHUNKS_PER_BLK) + (pageOffset[bank]*CHUNKS_PER_PAGE) ) | ColdLogBufBitFlag;

        for(UINT32 chunkOffset=0; chunkOffset<CHUNKS_PER_RECYCLED_PAGE; ++chunkOffset)
        {   // This loops finds valid chunks is the page. Note that chunks in GC Buf won't be considered as they temporarily don't occupy space in Log
            UINT32 victimLpn = victimLpns[chunkOffset];
            if (victimLpn != INVALID)
            {
                UINT32 i = mem_search_equ_dram_4_bytes(ChunksMapTable(victimLpn, 0), CHUNKS_PER_PAGE, logChunkAddr);

                if(i<CHUNKS_PER_PAGE)
                {
                    dataChunkOffsets[bank][chunkOffset]=i;
                    dataLpns[bank][chunkOffset]=victimLpn & ~(ColdLogBufBitFlag);
                    validChunks[bank][chunkOffset]=TRUE;
                    nValidChunksInPage[bank]++;
                    nValidChunksInBlk[bank]++;
                }
            }
            logChunkAddr++;
        }
    }
    else
    {
        UINT32 logChunkAddr = (bank*LOG_BLK_PER_BANK*CHUNKS_PER_BLK) + (victimLbn[bank]*CHUNKS_PER_BLK) + (pageOffset[bank]*CHUNKS_PER_PAGE);

        for(UINT32 chunkOffset=0; chunkOffset<CHUNKS_PER_PAGE; chunkOffset++)
        {   // This loops finds valid chunks is the page. Note that chunks in GC Buf won't be considered as they temporarily don't occupy space in Log
            UINT32 victimLpn = victimLpns[chunkOffset];
            if (victimLpn != INVALID)
            {
                UINT32 i = mem_search_equ_dram_4_bytes(ChunksMapTable(victimLpn, 0), CHUNKS_PER_PAGE, logChunkAddr);

                if(i<CHUNKS_PER_PAGE)
                {
                    dataChunkOffsets[bank][chunkOffset]=i;
                    dataLpns[bank][chunkOffset]=victimLpn;
                    validChunks[bank][chunkOffset]=TRUE;
                    nValidChunksInPage[bank]++;
                    nValidChunksInBlk[bank]++;
                }
            }
            logChunkAddr++;
        }

    }

    if(nValidChunksInPage[bank] > 0)
    {
        uart_print("Current bank is full, copy page to another one\r\n");

        nand_page_ptread(bank, victimVbn[bank], pageOffset[bank], 0, SECTORS_PER_PAGE, GC_BUF(bank), RETURN_ON_ISSUE);

        gcState[bank] = GcWrite;
    }
    else
    {
        pageOffset[bank]++;
    }

}
void writePage(UINT32 bank)
{
    uart_print("writePage: bank="); uart_print_int(bank);
    uart_print(" victimLbn "); uart_print_int(victimLbn[bank]);
    uart_print(" pageOffset "); uart_print_int(pageOffset[bank]); uart_print(" ");

    if(nValidChunksInPage[bank] == 8)
    {

        UINT32 logChunkBase = ((bank*LOG_BLK_PER_BANK*CHUNKS_PER_BLK) + (victimLbn[bank]*CHUNKS_PER_BLK) + (pageOffset[bank]*CHUNKS_PER_PAGE));
        if (gcOnRecycledPage[bank])
        {
            logChunkBase = logChunkBase | ColdLogBufBitFlag;
        }

        for(UINT32 chunkOffset=0; chunkOffset<CHUNKS_PER_PAGE; chunkOffset++)
        {
            UINT32 chunkAddr = read_dram_32(ChunksMapTable(dataLpns[bank][chunkOffset], dataChunkOffsets[bank][chunkOffset]));

            // note (fabio): here we check against the normal chunkAddr (not recycled) because if there are 8 valid chunks the blk cannot be a recycled one
            if(chunkAddr != logChunkBase + chunkOffset)
            {
                // note(fabio): here invalidate only the first chunk that was moved by another write. If other chunks were also moved they'll be found by the code after the goto
                validChunks[bank][chunkOffset]=FALSE;
                nValidChunksInPage[bank]--;
                nValidChunksInBlk[bank]--;
                goto WritePartialPage;
            }

        }

        UINT32 dstLpn = getRWLpn(bank, coldLogCtrl);
        UINT32 dstVbn = get_log_vbn(bank, LogPageToLogBlk(dstLpn));
        UINT32 dstPageOffset = LogPageToOffset(dstLpn);

        uart_print(" dstLpn="); uart_print_int(dstLpn);
        uart_print(" dstVbn="); uart_print_int(dstVbn); uart_print(" dstPageOffset="); uart_print_int(dstPageOffset); uart_print("\r\n");

#if PrintStats
        uart_print_level_1("^\r\n");
#endif

        nand_page_program(bank, dstVbn, dstPageOffset, GC_BUF(bank), RETURN_ON_ISSUE);

        mem_copy(chunkInLpnsList(coldLogCtrl[bank].lpnsListAddr, dstPageOffset, 0), dataLpns[bank], CHUNKS_PER_PAGE * sizeof(UINT32));

        for (UINT32 chunkOffset=0; chunkOffset<CHUNKS_PER_PAGE; ++chunkOffset)
        {
            write_dram_32(ChunksMapTable(dataLpns[bank][chunkOffset], dataChunkOffsets[bank][chunkOffset]), (bank * LOG_BLK_PER_BANK * CHUNKS_PER_BLK) + (dstLpn * CHUNKS_PER_PAGE) + chunkOffset);
        }

        nValidChunksInPage[bank] = 0;
        gcOnRecycledPage[bank]=FALSE;

        gcState[bank] = GcRead;

        pageOffset[bank]++;

        coldLogCtrl[bank].increaseLpn(bank, coldLogCtrl);

    }
    else
WritePartialPage:
    {
        uart_print("write partial ");
        UINT32 chunkOffset=0;
        UINT32 logChunkBase=((bank*LOG_BLK_PER_BANK*CHUNKS_PER_BLK) + (victimLbn[bank]*CHUNKS_PER_BLK) + (pageOffset[bank]*CHUNKS_PER_PAGE));
        if (gcOnRecycledPage[bank])
        {
            logChunkBase = logChunkBase | ColdLogBufBitFlag;
            // note(fabio): Here we should decode
        }
        while(nValidChunksInPage[bank] > 0)
        {

            if(validChunks[bank][chunkOffset])
            {
                validChunks[bank][chunkOffset] = FALSE;
                nValidChunksInPage[bank]--;

                UINT32 chunkAddr = read_dram_32(ChunksMapTable(dataLpns[bank][chunkOffset], dataChunkOffsets[bank][chunkOffset]));

                if(chunkAddr == logChunkBase+chunkOffset)
                {

                    writeChunkOnLogBlockDuringGC(bank,
                                                 dataLpns[bank][chunkOffset],
                                                 dataChunkOffsets[bank][chunkOffset],
                                                 chunkOffset,
                                                 GC_BUF(bank));
                }
                else
                {
                    uart_print(" one chunk was moved during GC ");
                    nValidChunksInBlk[bank]--;
                }
            }
            chunkOffset++;
        }
        uart_print(" current nValidChunksInBlk="); uart_print_int(nValidChunksInBlk[bank]); uart_print("\r\n");

        if (gcState[bank] == GcWrite)
        {
            gcState[bank] = GcRead;
            gcOnRecycledPage[bank]=FALSE;
            pageOffset[bank]++;
        }
    }
}
Example #3
0
static void garbage_collection(UINT32 const bank)
{
	SET_GC;

	gc++;
	//    g_ftl_statistics[bank].gc_cnt++;

	UINT32 src_lpn;
	UINT32 vt_vblock;
	UINT32 free_vpn;
	UINT32 vcount; // valid page count in victim block
	UINT32 src_page;
	UINT32 gc_vblock;

	vt_vblock = get_vt_vblock(bank);   // get victim block
	vcount    = get_vcount(bank, vt_vblock);
	gc_vblock = get_gc_vblock(bank);
	free_vpn  = gc_vblock * PAGES_PER_BLK;

	// 1. load p2l list from last page offset of victim block (4B x PAGES_PER_BLK)
	// fix minor bug
	misc_w++;
	nand_page_ptread(bank, vt_vblock, PAGES_PER_BLK - 1, 0,
			((sizeof(UINT32) * PAGES_PER_BLK + BYTES_PER_SECTOR - 1 ) / BYTES_PER_SECTOR), GC_BUF(bank), RETURN_WHEN_DONE);
	mem_copy(g_misc_meta[bank].lpn_list_of_cur_vblock,
			GC_BUF(bank), sizeof(UINT32) * PAGES_PER_BLK);
	// 2. copy-back all valid pages to free space
	for (src_page = 0; src_page < (PAGES_PER_BLK - 1); src_page++)
	{
		// get lpn of victim block from a read lpn list
		src_lpn = get_lpn(bank, src_page);

		// determine whether the page is valid or not
		if (get_vpn(src_lpn) !=
				((vt_vblock * PAGES_PER_BLK) + src_page))
		{
			// invalid page
			continue;
		}
		// if the page is valid,
		// then do copy-back op. to free space
		gc_prog++;
		nand_page_copyback(bank,
				vt_vblock,
				src_page,
				free_vpn / PAGES_PER_BLK,
				free_vpn % PAGES_PER_BLK);
		// update metadata
		set_vpn(src_lpn, free_vpn);
		set_lpn(bank, (free_vpn % PAGES_PER_BLK), src_lpn);

		free_vpn++;
	}
	// 3. erase victim block
	erase++;
	nand_block_erase(bank, vt_vblock);

	// 4. update metadata
	//set_vcount(bank, vt_vblock, VC_MAX);
	set_vcount(bank, vt_vblock, VC_MAX);
	set_vcount(bank, gc_vblock, vcount);
	set_new_write_vpn(bank, free_vpn); // set a free page for new write
	set_gc_vblock(bank, vt_vblock); // next free block (reserve for GC)
	dec_full_blk_cnt(bank); // decrease full block count
	CLEAR_GC;
}