Beispiel #1
0
static void readCompletePage(UINT32 *chunksInPage, UINT32 *srcChunkByteOffsets, UINT32 *chunkIdxs, UINT8 *decreaseInOW)
{
    uart_print("readCompletePage\r\n");
    UINT32 oldLogBank = ChunkToBank(oldChunkAddr_);
    UINT32 oldLogVbn = get_log_vbn(oldLogBank, ChunkToLbn(oldChunkAddr_));
    UINT32 oldLogPageOffset = ChunkToPageOffset(oldChunkAddr_);
    nand_page_ptread(oldLogBank, oldLogVbn, oldLogPageOffset, 0, SECTORS_PER_PAGE, TEMP_BUF_ADDR, RETURN_WHEN_DONE);
    for (int i=0; i<*chunksInPage; i++)
    {
        UINT32 src = TEMP_BUF_ADDR + srcChunkByteOffsets[i];
        UINT32 dst = FTL_BUF(0) + chunkIdxs[i] * BYTES_PER_CHUNK;
        mem_copy(dst, src, BYTES_PER_CHUNK);
        chunksDone_[chunkIdxs[i]]=1;
        if(mode_ == GcMode)
        {
#if Overwrite
            if (decreaseInOW[i])
            {
                decrementValidChunks(&heapDataOverwrite, oldLogBank, ChunkToLbn(oldChunkAddr_));
            }
            else
#endif
            {
                decrementValidChunks(&heapDataWrite, oldLogBank, ChunkToLbn(oldChunkAddr_));
            }
        }
    }
}
Beispiel #2
0
static void writePartialChunkWhenOldChunkIsInFlashLog(UINT32 nSectsToWrite, UINT32 oldChunkAddr) {
    uart_print("writePartialChunkWhenOldChunkIsInFlashLog\r\n");
    UINT32 src = WR_BUF_PTR(g_ftl_write_buf_id)+((sectOffset_ / SECTORS_PER_CHUNK)*BYTES_PER_CHUNK);
    UINT32 dstWBufChunkStart = LOG_BUF(bank_) + (chunkPtr[bank_] * BYTES_PER_CHUNK); // base address of the destination chunk
    UINT32 startOffsetWrite = (sectOffset_ % SECTORS_PER_CHUNK) * BYTES_PER_SECTOR;
    // Old Chunk Location
    UINT32 oldBank = ChunkToBank(oldChunkAddr);
    UINT32 oldVbn = get_log_vbn(oldBank, ChunkToLbn(oldChunkAddr));
    UINT32 oldPageOffset = ChunkToPageOffset(oldChunkAddr);
    UINT32 oldSectOffset = ChunkToSectOffset(oldChunkAddr);
    // Offsets
    UINT32 dstByteOffset = chunkPtr[bank_] * BYTES_PER_CHUNK;
    UINT32 srcByteOffset = ChunkToChunkOffset(oldChunkAddr) * BYTES_PER_CHUNK;
    UINT32 alignedWBufAddr = LOG_BUF(bank_) + dstByteOffset - srcByteOffset;
    waitBusyBank(bank_);
    nand_page_ptread(oldBank, oldVbn, oldPageOffset, oldSectOffset, SECTORS_PER_CHUNK, alignedWBufAddr, RETURN_WHEN_DONE);
    mem_copy(dstWBufChunkStart + startOffsetWrite, src + startOffsetWrite, nSectsToWrite*BYTES_PER_SECTOR);
}
Beispiel #3
0
void precacheLowPage(const UINT32 bank, LogCtrlBlock * ctrlBlock)
{

    //uart_print_level_1("PRECACHE ");
    //uart_print_level_1_int(bank);
    //uart_print_level_1(" ");
    //uart_print_level_1_int(ctrlBlock[bank].nextLowPageOffset);
    //uart_print_level_1("\r\n");


    UINT32 lbn = LogPageToLogBlk(ctrlBlock[bank].logLpn);
    UINT32 vbn = get_log_vbn(bank, lbn);
    UINT32 pageOffset = ctrlBlock[bank].nextLowPageOffset;

    uart_print("precacheLowPage: pageOffset ");
    uart_print_int(pageOffset);
    uart_print("\r\n");

    nand_page_ptread(bank, vbn, pageOffset, 0, SECTORS_PER_PAGE, PrecacheForEncoding(bank), RETURN_ON_ISSUE);
    ctrlBlock[bank].precacheDone = TRUE;
}
Beispiel #4
0
static void readOneChunk(UINT32 *chunksInPage, UINT32 *srcChunkByteOffsets, UINT32 *chunkIdxs, UINT8 *decreaseInOW)
{
    uart_print("readOneChunk\r\n");
    UINT32 bank = ChunkToBank(oldChunkAddr_);
    UINT32 vbn = get_log_vbn(bank, ChunkToLbn(oldChunkAddr_));
    UINT32 pageOffset = ChunkToPageOffset(oldChunkAddr_);
    UINT32 dst = FTL_BUF(0) + (chunkIdxs[0]*BYTES_PER_CHUNK) - srcChunkByteOffsets[0]; // buf addr + dst - src
    nand_page_ptread(bank, vbn, pageOffset, srcChunkByteOffsets[0]/BYTES_PER_SECTOR, SECTORS_PER_CHUNK, dst, RETURN_WHEN_DONE);
    if(mode_ == GcMode)
    {
#if Overwrite
        if (decreaseInOW[0])
        {
            decrementValidChunks(&heapDataOverwrite, bank, ChunkToLbn(oldChunkAddr_));
        }
        else
#endif
        {
            decrementValidChunks(&heapDataWrite, bank, ChunkToLbn(oldChunkAddr_));
        }
    }
}
Beispiel #5
0
void loading_misc_meta()
{
	/*int i;
	flash_finish();

	disable_irq();
	flash_clear_irq();

	for(i = 0 ;i < NUM_BANKS;i++){
		SETREG(FCP_CMD, FC_COL_ROW_READ_OUT);	
		SETREG(FCP_DMA_CNT, sizeof(misc_metadata));
		SETREG(FCP_COL, 0);
		SETREG(FCP_DMA_ADDR, FTL_BUF_ADDR);
		//SETREG(FCP_DMA_ADDR, &(g_misc_meta[i]));
		SETREG(FCP_OPTION, FO_P | FO_E );		
		SETREG(FCP_ROW_L(i), PAGES_PER_VBLK);
		SETREG(FCP_ROW_H(i), PAGES_PER_VBLK);
		flash_issue_cmd(i, RETURN_ON_ISSUE);
		flash_finish();
		CLR_BSP_INTR(i,0xff);
		mem_copy(&(g_misc_meta[i]),FTL_BUF_ADDR,sizeof(misc_metadata));
	}

	enable_irq();*/
	UINT32 load_flag = 0;
	UINT32 bank, page_num;
	UINT32 load_cnt = 0;

	flash_finish();

	disable_irq();
	flash_clear_irq();	// clear any flash interrupt flags that might have been set

	// scan valid metadata in descending order from last page offset
	for (page_num = PAGES_PER_VBLK - 1; page_num != ((UINT32) -1); page_num--)
	{
		for (bank = 0; bank < NUM_BANKS; bank++)
		{
			if (load_flag & (0x1 << bank))
			{
				continue;
			}
			// read valid metadata from misc. metadata area
			nand_page_ptread(bank,
					1,
					page_num,
					0,
					((sizeof(misc_metadata) + BYTES_PER_SECTOR -1 ) / BYTES_PER_SECTOR),	
					FTL_BUF_ADDR,
					RETURN_ON_ISSUE);
			flash_finish();
			mem_copy(&g_misc_meta[bank], FTL_BUF_ADDR, sizeof(misc_metadata));
		}

		for (bank = 0; bank < NUM_BANKS; bank++)
		{
			if (!(load_flag & (0x1 << bank)) && !(BSP_INTR(bank) & FIRQ_ALL_FF))
			{
				load_flag = load_flag | (0x1 << bank);
				load_cnt++;
			}
			CLR_BSP_INTR(bank, 0xFF);
		}
	}
	ASSERT(load_cnt == NUM_BANKS);

	enable_irq();
}
Beispiel #6
0
void initLog()
{

    uart_print("Initializing Write Log Space...\r\n");
    uart_print("Initializing clean list...");
    //testCleanList();
    cleanListInit(&cleanListDataWrite, CleanList(0), LOG_BLK_PER_BANK);
    uart_print("done\r\n");

    //int off = __builtin_offsetof(LogCtrlBlock, increaseLpn);

    for(int bank=0; bank<NUM_BANKS; bank++)
    {
        adaptiveStepDown[bank] = initStepDown;
        adaptiveStepUp[bank] = initStepUp;
        nStepUps[bank] = 0;
        nStepDowns[bank] = 0;

        for(int lbn=0; lbn<LOG_BLK_PER_BANK; lbn++)
        {
            cleanListPush(&cleanListDataWrite, bank, lbn);
        }

        UINT32 lbn = cleanListPop(&cleanListDataWrite, bank);

        hotLogCtrl[bank] = (LogCtrlBlock)
        {
            .logLpn = lbn * PAGES_PER_BLK,
            .lpnsListAddr = LPNS_BUF_BASE_1(bank),
            .logBufferAddr = HOT_LOG_BUF(bank),
            .chunkPtr = 0,
            .increaseLpn=increaseLpnHotBlkFirstUsage,
            .updateChunkPtr=updateChunkPtr,
            .nextLowPageOffset=INVALID,
            .allChunksInLogAreValid = TRUE,
            .useRecycledPage=FALSE,
            .precacheDone=TRUE,
        };

        for(int chunk=0; chunk<CHUNKS_PER_PAGE; ++chunk)
        {
            hotLogCtrl[bank].dataLpn[chunk] = INVALID;
            hotLogCtrl[bank].chunkIdx[chunk] = INVALID;
        }

        lbn = cleanListPop(&cleanListDataWrite, bank);

        coldLogCtrl[bank] = (LogCtrlBlock)
        {
            .logLpn = lbn * PAGES_PER_BLK,
            .lpnsListAddr = LPNS_BUF_BASE_2(bank),
            .logBufferAddr = COLD_LOG_BUF(bank),
            .chunkPtr = 0,
            .increaseLpn=increaseLpnColdBlk,
            .updateChunkPtr=updateChunkPtr,
            .nextLowPageOffset=INVALID,
            .allChunksInLogAreValid = TRUE,
            .useRecycledPage=FALSE,
            .precacheDone=TRUE,
        };
        for(int chunk=0; chunk<CHUNKS_PER_PAGE; ++chunk)
        {
            coldLogCtrl[bank].dataLpn[chunk] = INVALID;
            coldLogCtrl[bank].chunkIdx[chunk] = INVALID;
        }

        nValidChunksFromHeap[bank] = INVALID;
    }
}

static void findNewLpnForColdLog(const UINT32 bank, LogCtrlBlock * ctrlBlock)
{
    uart_print("findNewLpnForColdLog bank "); uart_print_int(bank);

    if (cleanListSize(&cleanListDataWrite, bank) > 2)
    {
        uart_print(" use clean blk\r\n");
        uart_print("cleanList size = "); uart_print_int(cleanListSize(&cleanListDataWrite, bank)); uart_print("\r\n");

        UINT32 lbn = cleanListPop(&cleanListDataWrite, bank);
        ctrlBlock[bank].logLpn = lbn * PAGES_PER_BLK;
        ctrlBlock[bank].increaseLpn = increaseLpnColdBlk;
    }
    else
    {
        if (reuseCondition(bank))
        {
#if PrintStats
            uart_print_level_1("REUSECOLD\r\n");
#endif
            uart_print(" second usage\r\n");
            UINT32 lbn = getVictim(&heapDataFirstUsage, bank);
            UINT32 nValidChunks = getVictimValidPagesNumber(&heapDataFirstUsage, bank);
            resetValidChunksAndRemove(&heapDataFirstUsage, bank, lbn, CHUNKS_PER_LOG_BLK_FIRST_USAGE);
            resetValidChunksAndRemove(&heapDataSecondUsage, bank, lbn, CHUNKS_PER_LOG_BLK_SECOND_USAGE);
            resetValidChunksAndRemove(&heapDataCold, bank, lbn, nValidChunks);
            ctrlBlock[bank].logLpn = (lbn * PAGES_PER_BLK) + 2;
            ctrlBlock[bank].increaseLpn = increaseLpnColdBlkReused;
            nand_page_ptread(bank,
                             get_log_vbn(bank, lbn),
                             125,
                             0,
                             (CHUNK_ADDR_BYTES * CHUNKS_PER_LOG_BLK + BYTES_PER_SECTOR - 1) / BYTES_PER_SECTOR,
                             ctrlBlock[bank].lpnsListAddr,
                             RETURN_WHEN_DONE); // Read the lpns list from the max low page (125) where it was previously written by incrementLpnHotBlkFirstUsage

        }
        else
        {
            uart_print(" get new block\r\n");
            UINT32 lbn = cleanListPop(&cleanListDataWrite, bank);
            ctrlBlock[bank].logLpn = lbn * PAGES_PER_BLK;
            ctrlBlock[bank].increaseLpn = increaseLpnColdBlk;
            while(cleanListSize(&cleanListDataWrite, bank) < 2)
            {
#if PrintStats
                uart_print_level_1("GCCOLD\r\n");
#endif
                garbageCollectLog(bank);
            }
        }
    }
}

void increaseLpnColdBlkReused (UINT32 const bank, LogCtrlBlock * ctrlBlock)
{
    uart_print("increaseLpnColdBlkReused bank "); uart_print_int(bank); uart_print("\r\n");

    UINT32 lpn = ctrlBlock[bank].logLpn;
    UINT32 pageOffset = LogPageToOffset(lpn);

    if (pageOffset == UsedPagesPerLogBlk-1)
    {
        UINT32 lbn = get_log_lbn(lpn);
        nand_page_ptprogram(bank,
                            get_log_vbn(bank, lbn),
                            PAGES_PER_BLK - 1,
                            0,
                            (CHUNK_ADDR_BYTES * CHUNKS_PER_LOG_BLK + BYTES_PER_SECTOR - 1) / BYTES_PER_SECTOR,
                            ctrlBlock[bank].lpnsListAddr,
                            RETURN_WHEN_DONE);
        mem_set_dram(ctrlBlock[bank].lpnsListAddr, INVALID, (CHUNKS_PER_BLK * CHUNK_ADDR_BYTES));
        insertBlkInHeap(&heapDataCold, bank, lbn);

        findNewLpnForColdLog(bank, ctrlBlock);
    }
    else
    {
        ctrlBlock[bank].logLpn = lpn+2;
    }

    uart_print("increaseLpnColdBlkReused (bank="); uart_print_int(bank); uart_print(") new lpn "); uart_print_int(ctrlBlock[bank].logLpn); uart_print("\r\n");
}
Beispiel #7
0
void findNewLpnForHotLog(const UINT32 bank, LogCtrlBlock * ctrlBlock)
{
    uart_print("findNewLpnForHotLog bank "); uart_print_int(bank);

    if (cleanListSize(&cleanListDataWrite, bank) > 2)
    {
        uart_print(" use clean blk\r\n");

        uart_print("cleanList size = "); uart_print_int(cleanListSize(&cleanListDataWrite, bank)); uart_print("\r\n");


        UINT32 lbn = cleanListPop(&cleanListDataWrite, bank);
        ctrlBlock[bank].logLpn = lbn * PAGES_PER_BLK;
        ctrlBlock[bank].increaseLpn = increaseLpnHotBlkFirstUsage; // we are not using a recycled block anymore
        ctrlBlock[bank].updateChunkPtr = updateChunkPtr; // we are not using a recycled block anymore
        ctrlBlock[bank].useRecycledPage = FALSE;

    }
    else
    {
        //if ((heapDataFirstUsage.nElInHeap[bank] > 0) && ((float)validMin > tot) )
        //if (heapDataFirstUsage.nElInHeap[bank] > hotFirstAccumulated[bank])
#if AlwaysReuse
        if(reuseConditionHot(bank))
#else
        if(reuseCondition(bank))
#endif
        {
#if PrintStats
            uart_print_level_1("REUSEHOT\r\n");
#endif

            uart_print(" second usage\r\n");

            UINT32 lbn = getVictim(&heapDataFirstUsage, bank);
            UINT32 nValidChunks = getVictimValidPagesNumber(&heapDataFirstUsage, bank);
            resetValidChunksAndRemove(&heapDataFirstUsage, bank, lbn, CHUNKS_PER_LOG_BLK_FIRST_USAGE);
            //resetValidChunksAndRemove(&heapDataSecondUsage, bank, lbn, CHUNKS_PER_LOG_BLK_SECOND_USAGE);
            resetValidChunksAndRemove(&heapDataSecondUsage, bank, lbn, nValidChunks);
            resetValidChunksAndRemove(&heapDataCold, bank, lbn, CHUNKS_PER_LOG_BLK_SECOND_USAGE);
            ctrlBlock[bank].logLpn = lbn * PAGES_PER_BLK;
            ctrlBlock[bank].increaseLpn = increaseLpnHotBlkSecondUsage;
            ctrlBlock[bank].updateChunkPtr = updateChunkPtrRecycledPage;
            nand_page_ptread(bank,
                             get_log_vbn(bank, lbn),
                             125,
                             0,
                             (CHUNK_ADDR_BYTES * CHUNKS_PER_LOG_BLK + BYTES_PER_SECTOR - 1) / BYTES_PER_SECTOR,
                             ctrlBlock[bank].lpnsListAddr,
                             RETURN_WHEN_DONE); // Read the lpns list from the max low page (125) where it was previously written by incrementLpnHotBlkFirstUsage

            printValidChunksInFirstUsageBlk(bank, ctrlBlock, lbn);

            if (canReuseLowPage(bank, 0, ctrlBlock))
            { // Reuse page 0 prefetching immediately
                precacheLowPage(bank, ctrlBlock);
                ctrlBlock[bank].updateChunkPtr = updateChunkPtrRecycledPage;
                ctrlBlock[bank].useRecycledPage = TRUE;
                ctrlBlock[bank].precacheDone = TRUE;
                ctrlBlock[bank].nextLowPageOffset = 0;
                return;
            }
            ctrlBlock[bank].logLpn++;

            if (canReuseLowPage(bank, 1, ctrlBlock))
            { // Reuse page 1 prefetching immediately
                precacheLowPage(bank, ctrlBlock);
                ctrlBlock[bank].updateChunkPtr = updateChunkPtrRecycledPage;
                ctrlBlock[bank].useRecycledPage = TRUE;
                ctrlBlock[bank].precacheDone = TRUE;
                ctrlBlock[bank].nextLowPageOffset = 1;
                return;
            }
            else
            {
                ctrlBlock[bank].updateChunkPtr = updateChunkPtr;
                ctrlBlock[bank].useRecycledPage = FALSE;
                ctrlBlock[bank].precacheDone = FALSE;
                ctrlBlock[bank].nextLowPageOffset = INVALID;
                increaseLpnHotBlkSecondUsage(bank, ctrlBlock);
            }

        }
        else
        {

            uart_print(" get new block\r\n");
            uart_print("No blks left for second usage\r\n");
            UINT32 lbn = cleanListPop(&cleanListDataWrite, bank);
            ctrlBlock[bank].logLpn = lbn * PAGES_PER_BLK;
            ctrlBlock[bank].increaseLpn = increaseLpnHotBlkFirstUsage; // we are not using a recycled block anymore
            ctrlBlock[bank].updateChunkPtr = updateChunkPtr; // we are not using a recycled block anymore
            ctrlBlock[bank].useRecycledPage = FALSE;

            while(cleanListSize(&cleanListDataWrite, bank) < cleanBlksAfterGcHot)
            {
#if PrintStats
                uart_print_level_1("GCHOT\r\n");
#endif
                garbageCollectLog(bank);
            }
        }
    }
}
Beispiel #8
0
// misc + VCOUNT
static void load_misc_metadata(void)
{
    UINT32 misc_meta_bytes = NUM_MISC_META_SECT * BYTES_PER_SECTOR;
    UINT32 vcount_bytes    = NUM_VCOUNT_SECT * BYTES_PER_SECTOR;
    UINT32 vcount_addr     = VCOUNT_ADDR;
    UINT32 vcount_boundary = VCOUNT_ADDR + VCOUNT_BYTES;

    UINT32 load_flag = 0;
    UINT32 bank, page_num;
    UINT32 load_cnt = 0;

    flash_finish();

	disable_irq();
	flash_clear_irq();	// clear any flash interrupt flags that might have been set

    // scan valid metadata in descending order from last page offset
    for (page_num = PAGES_PER_BLK - 1; page_num != ((UINT32) -1); page_num--)
    {
        for (bank = 0; bank < NUM_BANKS; bank++)
        {
            if (load_flag & (0x1 << bank))
            {
                continue;
            }
            // read valid metadata from misc. metadata area
            nand_page_ptread(bank,
                             MISCBLK_VBN,
                             page_num,
                             0,
                             NUM_MISC_META_SECT + NUM_VCOUNT_SECT,
                             FTL_BUF(bank),
                             RETURN_ON_ISSUE);
        }
        flash_finish();

        for (bank = 0; bank < NUM_BANKS; bank++)
        {
            if (!(load_flag & (0x1 << bank)) && !(BSP_INTR(bank) & FIRQ_ALL_FF))
            {
                load_flag = load_flag | (0x1 << bank);
                load_cnt++;
            }
            CLR_BSP_INTR(bank, 0xFF);
        }
    }
    ASSERT(load_cnt == NUM_BANKS);

    for (bank = 0; bank < NUM_BANKS; bank++)
    {
        // misc. metadata
        mem_copy(&g_misc_meta[bank], FTL_BUF(bank), sizeof(misc_metadata));

        // vcount metadata
        if (vcount_addr <= vcount_boundary)
        {
            mem_copy(vcount_addr, FTL_BUF(bank) + misc_meta_bytes, vcount_bytes);
            vcount_addr += vcount_bytes;

        }
    }
	enable_irq();
}
Beispiel #9
0
//------------------------------------------------------------
// if all blocks except one free block are full,
// do garbage collection for making at least one free page
//-------------------------------------------------------------
static void garbage_collection(UINT32 const bank)
{
    ASSERT(bank < NUM_BANKS);
    g_ftl_statistics[bank].gc_cnt++;

    UINT32 src_lpn;
    UINT32 vt_vblock;
    UINT32 free_vpn;
    UINT32 vcount; // valid page count in victim block
    UINT32 src_page;
    UINT32 gc_vblock;

    g_ftl_statistics[bank].gc_cnt++;

    vt_vblock = get_vt_vblock(bank);   // get victim block
    vcount    = get_vcount(bank, vt_vblock);
    gc_vblock = get_gc_vblock(bank);
    free_vpn  = gc_vblock * PAGES_PER_BLK;

/*     uart_printf("garbage_collection bank %d, vblock %d",bank, vt_vblock); */

    ASSERT(vt_vblock != gc_vblock);
    ASSERT(vt_vblock >= META_BLKS_PER_BANK && vt_vblock < VBLKS_PER_BANK);
    ASSERT(vcount < (PAGES_PER_BLK - 1));
    ASSERT(get_vcount(bank, gc_vblock) == VC_MAX);
    ASSERT(!is_bad_block(bank, gc_vblock));

    // 1. load p2l list from last page offset of victim block (4B x PAGES_PER_BLK)
    // fix minor bug
    nand_page_ptread(bank, vt_vblock, PAGES_PER_BLK - 1, 0,
                     ((sizeof(UINT32) * PAGES_PER_BLK + BYTES_PER_SECTOR - 1 ) / BYTES_PER_SECTOR), FTL_BUF(bank), RETURN_WHEN_DONE);
    mem_copy(g_misc_meta[bank].lpn_list_of_cur_vblock, FTL_BUF(bank), sizeof(UINT32) * PAGES_PER_BLK);
    // 2. copy-back all valid pages to free space
    for (src_page = 0; src_page < (PAGES_PER_BLK - 1); src_page++)
    {
        // get lpn of victim block from a read lpn list
        src_lpn = get_lpn(bank, src_page);
        CHECK_VPAGE(get_vpn(src_lpn));

        // determine whether the page is valid or not
        if (get_vpn(src_lpn) !=
            ((vt_vblock * PAGES_PER_BLK) + src_page))
        {
            // invalid page
            continue;
        }
        ASSERT(get_lpn(bank, src_page) != INVALID);
        CHECK_LPAGE(src_lpn);
        // if the page is valid,
        // then do copy-back op. to free space
        nand_page_copyback(bank,
                           vt_vblock,
                           src_page,
                           free_vpn / PAGES_PER_BLK,
                           free_vpn % PAGES_PER_BLK);
        ASSERT((free_vpn / PAGES_PER_BLK) == gc_vblock);
        // update metadata
        set_vpn(src_lpn, free_vpn);
        set_lpn(bank, (free_vpn % PAGES_PER_BLK), src_lpn);

        free_vpn++;
    }
#if OPTION_ENABLE_ASSERT
    if (vcount == 0)
    {
        ASSERT(free_vpn == (gc_vblock * PAGES_PER_BLK));
    }
#endif
    // 3. erase victim block
    nand_block_erase(bank, vt_vblock);
    ASSERT((free_vpn % PAGES_PER_BLK) < (PAGES_PER_BLK - 2));
    ASSERT((free_vpn % PAGES_PER_BLK == vcount));

/*     uart_printf("gc page count : %d", vcount); */

    // 4. update metadata
    set_vcount(bank, vt_vblock, VC_MAX);
    set_vcount(bank, gc_vblock, vcount);
    set_new_write_vpn(bank, free_vpn); // set a free page for new write
    set_gc_vblock(bank, vt_vblock); // next free block (reserve for GC)
    dec_full_blk_cnt(bank); // decrease full block count
    /* uart_print("garbage_collection end"); */
}
Beispiel #10
0
static void write_page(UINT32 const lpn, UINT32 const sect_offset, UINT32 const num_sectors)
{
    CHECK_LPAGE(lpn);
    ASSERT(sect_offset < SECTORS_PER_PAGE);
    ASSERT(num_sectors > 0 && num_sectors <= SECTORS_PER_PAGE);

    UINT32 bank, old_vpn, new_vpn;
    UINT32 vblock, page_num, page_offset, column_cnt;

    bank        = get_num_bank(lpn); // page striping
    page_offset = sect_offset;
    column_cnt  = num_sectors;

    new_vpn  = assign_new_write_vpn(bank);
    old_vpn  = get_vpn(lpn);

    CHECK_VPAGE (old_vpn);
    CHECK_VPAGE (new_vpn);
    ASSERT(old_vpn != new_vpn);

    g_ftl_statistics[bank].page_wcount++;

    // if old data already exist,
    if (old_vpn != NULL)
    {
        vblock   = old_vpn / PAGES_PER_BLK;
        page_num = old_vpn % PAGES_PER_BLK;

        //--------------------------------------------------------------------------------------
        // `Partial programming'
        // we could not determine whether the new data is loaded in the SATA write buffer.
        // Thus, read the left/right hole sectors of a valid page and copy into the write buffer.
        // And then, program whole valid data
        //--------------------------------------------------------------------------------------
        if (num_sectors != SECTORS_PER_PAGE)
        {
            // Performance optimization (but, not proved)
            // To reduce flash memory access, valid hole copy into SATA write buffer after reading whole page
            // Thus, in this case, we need just one full page read + one or two mem_copy
            if ((num_sectors <= 8) && (page_offset != 0))
            {
                // one page async read
                nand_page_read(bank,
                               vblock,
                               page_num,
                               FTL_BUF(bank));
                // copy `left hole sectors' into SATA write buffer
                if (page_offset != 0)
                {
                    mem_copy(WR_BUF_PTR(g_ftl_write_buf_id),
                             FTL_BUF(bank),
                             page_offset * BYTES_PER_SECTOR);
                }
                // copy `right hole sectors' into SATA write buffer
                if ((page_offset + column_cnt) < SECTORS_PER_PAGE)
                {
                    UINT32 const rhole_base = (page_offset + column_cnt) * BYTES_PER_SECTOR;

                    mem_copy(WR_BUF_PTR(g_ftl_write_buf_id) + rhole_base,
                             FTL_BUF(bank) + rhole_base,
                             BYTES_PER_PAGE - rhole_base);
                }
            }
            // left/right hole async read operation (two partial page read)
            else
            {
                // read `left hole sectors'
                if (page_offset != 0)
                {
                    nand_page_ptread(bank,
                                     vblock,
                                     page_num,
                                     0,
                                     page_offset,
                                     WR_BUF_PTR(g_ftl_write_buf_id),
                                     RETURN_ON_ISSUE);
                }
                // read `right hole sectors'
                if ((page_offset + column_cnt) < SECTORS_PER_PAGE)
                {
                    nand_page_ptread(bank,
                                     vblock,
                                     page_num,
                                     page_offset + column_cnt,
                                     SECTORS_PER_PAGE - (page_offset + column_cnt),
                                     WR_BUF_PTR(g_ftl_write_buf_id),
                                     RETURN_ON_ISSUE);
                }
            }
        }
        // full page write
        page_offset = 0;
        column_cnt  = SECTORS_PER_PAGE;
        // invalid old page (decrease vcount)
        set_vcount(bank, vblock, get_vcount(bank, vblock) - 1);
    }
    vblock   = new_vpn / PAGES_PER_BLK;
    page_num = new_vpn % PAGES_PER_BLK;
    ASSERT(get_vcount(bank,vblock) < (PAGES_PER_BLK - 1));

    // write new data (make sure that the new data is ready in the write buffer frame)
    // (c.f FO_B_SATA_W flag in flash.h)
    nand_page_ptprogram_from_host(bank,
                                  vblock,
                                  page_num,
                                  page_offset,
                                  column_cnt);
    // update metadata
    set_lpn(bank, page_num, lpn);
    set_vpn(lpn, new_vpn);
    set_vcount(bank, vblock, get_vcount(bank, vblock) + 1);
}
Beispiel #11
0
static void load_pmap_table(void)
{
    UINT32 pmap_addr = PAGE_MAP_ADDR;
    UINT32 temp_page_addr;
    UINT32 pmap_bytes = BYTES_PER_PAGE; // per bank
    UINT32 pmap_boundary = PAGE_MAP_ADDR + (NUM_LPAGES * sizeof(UINT32));
    UINT32 mapblk_lbn, bank;
    BOOL32 finished = FALSE;

    flash_finish();

    for (mapblk_lbn = 0; mapblk_lbn < MAPBLKS_PER_BANK; mapblk_lbn++)
    {
        temp_page_addr = pmap_addr; // backup page mapping addr

        for (bank = 0; bank < NUM_BANKS; bank++)
        {
            if (finished)
            {
                break;
            }
            else if (pmap_addr >= pmap_boundary)
            {
                finished = TRUE;
                break;
            }
            else if (pmap_addr + BYTES_PER_PAGE >= pmap_boundary)
            {
                finished = TRUE;
                pmap_bytes = (pmap_boundary - pmap_addr + BYTES_PER_SECTOR - 1) / BYTES_PER_SECTOR * BYTES_PER_SECTOR;
            }
            // read page mapping table from map_block
            nand_page_ptread(bank,
                             get_mapblk_vpn(bank, mapblk_lbn) / PAGES_PER_BLK,
                             get_mapblk_vpn(bank, mapblk_lbn) % PAGES_PER_BLK,
                             0,
                             pmap_bytes / BYTES_PER_SECTOR,
                             FTL_BUF(bank),
                             RETURN_ON_ISSUE);
            pmap_addr += pmap_bytes;
        }
        flash_finish();

        pmap_bytes = BYTES_PER_PAGE;
        for (bank = 0; bank < NUM_BANKS; bank++)
        {
            if (temp_page_addr >= pmap_boundary)
            {
                break;
            }
            else if (temp_page_addr + BYTES_PER_PAGE >= pmap_boundary)
            {
                pmap_bytes = (pmap_boundary - temp_page_addr + BYTES_PER_SECTOR - 1) / BYTES_PER_SECTOR * BYTES_PER_SECTOR;
            }
            // copy page mapping table to PMAP_ADDR from FTL buffer
            mem_copy(temp_page_addr, FTL_BUF(bank), pmap_bytes);

            temp_page_addr += pmap_bytes;
        }
        if (finished)
        {
            break;
        }
    }
}
void readPageSingleStep(UINT32 bank)
{
    uart_print("readPageSingleStep: bank="); uart_print_int(bank); uart_print(" ");
    uart_print("pageOffset[bank]="); uart_print_int(pageOffset[bank]); uart_print("\r\n");

    if (pageOffset[bank] == UsedPagesPerLogBlk)
    {
        if (nValidChunksInBlk[bank] != nValidChunksFromHeap[bank])
        {
            uart_print_level_1("ERROR: found different number of valid chunks than expected at the end of readPageSingleStep on normal block. GC on bank "); uart_print_level_1_int(bank);
            uart_print_level_1(" victimLbn "); uart_print_level_1_int(victimLbn[bank]); uart_print_level_1("\r\n");
            uart_print_level_1("Found "); uart_print_level_1_int(nValidChunksInBlk[bank]);
            uart_print_level_1(" instead of expected "); uart_print_level_1_int(nValidChunksFromHeap[bank]); uart_print_level_1("\r\n");
            uart_print_level_1("pageOffset: "); uart_print_level_1_int(pageOffset[bank]);
            while(1);
        }
        else
        {
            uart_print("readPageSingleStep: successful GC on normal block in bank "); uart_print_int(bank); uart_print("\r\n");
            //checkNoChunksAreValid(bank, victimLbn[bank]);
        }

        resetValidChunksAndRemove(&heapDataFirstUsage, bank, victimLbn[bank], CHUNKS_PER_LOG_BLK_FIRST_USAGE);
        resetValidChunksAndRemove(&heapDataSecondUsage, bank, victimLbn[bank], CHUNKS_PER_LOG_BLK_SECOND_USAGE);
        resetValidChunksAndRemove(&heapDataCold, bank, victimLbn[bank], CHUNKS_PER_LOG_BLK_SECOND_USAGE);
        nand_block_erase(bank, victimVbn[bank]);
        cleanListPush(&cleanListDataWrite, bank, victimLbn[bank]);
#if MeasureGc
        uart_print_level_2("GCW "); uart_print_level_2_int(bank);
        uart_print_level_2(" "); uart_print_level_2_int(0);
        uart_print_level_2(" "); uart_print_level_2_int(nValidChunksFromHeap[bank]);
        uart_print_level_2("\r\n");
#endif
        gcState[bank]=GcIdle;
        return;
    }

    uart_print("\r\npageOffset[bank]="); uart_print_int(pageOffset[bank]); uart_print("\r\n");

    nValidChunksInPage[bank]=0;
    for(UINT32 chunkOffset=0; chunkOffset<CHUNKS_PER_PAGE; chunkOffset++) validChunks[bank][chunkOffset]=FALSE;

    UINT32 victimLpns[CHUNKS_PER_PAGE];
    mem_copy(victimLpns, VICTIM_LPN_LIST(bank)+(pageOffset[bank]*CHUNKS_PER_PAGE)*CHUNK_ADDR_BYTES, CHUNKS_PER_PAGE * sizeof(UINT32));

    gcOnRecycledPage[bank] = FALSE;

    for(UINT32 chunkOffset=0; chunkOffset<CHUNKS_PER_RECYCLED_PAGE; ++chunkOffset)
    {
        if (victimLpns[chunkOffset] != INVALID && victimLpns[chunkOffset] & ColdLogBufBitFlag)
        {
            gcOnRecycledPage[bank] = TRUE;
        }
        else
        {
            if (gcOnRecycledPage[bank])
            {
                uart_print_level_1("ERROR in readSinglePage: inconsistent lpns in recycled page\r\n");
                while(1);
            }
        }
    }

    if (gcOnRecycledPage[bank])
    {

        UINT32 logChunkAddr = ( (bank*LOG_BLK_PER_BANK*CHUNKS_PER_BLK) + (victimLbn[bank]*CHUNKS_PER_BLK) + (pageOffset[bank]*CHUNKS_PER_PAGE) ) | ColdLogBufBitFlag;

        for(UINT32 chunkOffset=0; chunkOffset<CHUNKS_PER_RECYCLED_PAGE; ++chunkOffset)
        {   // This loops finds valid chunks is the page. Note that chunks in GC Buf won't be considered as they temporarily don't occupy space in Log
            UINT32 victimLpn = victimLpns[chunkOffset];
            if (victimLpn != INVALID)
            {
                UINT32 i = mem_search_equ_dram_4_bytes(ChunksMapTable(victimLpn, 0), CHUNKS_PER_PAGE, logChunkAddr);

                if(i<CHUNKS_PER_PAGE)
                {
                    dataChunkOffsets[bank][chunkOffset]=i;
                    dataLpns[bank][chunkOffset]=victimLpn & ~(ColdLogBufBitFlag);
                    validChunks[bank][chunkOffset]=TRUE;
                    nValidChunksInPage[bank]++;
                    nValidChunksInBlk[bank]++;
                }
            }
            logChunkAddr++;
        }
    }
    else
    {
        UINT32 logChunkAddr = (bank*LOG_BLK_PER_BANK*CHUNKS_PER_BLK) + (victimLbn[bank]*CHUNKS_PER_BLK) + (pageOffset[bank]*CHUNKS_PER_PAGE);

        for(UINT32 chunkOffset=0; chunkOffset<CHUNKS_PER_PAGE; chunkOffset++)
        {   // This loops finds valid chunks is the page. Note that chunks in GC Buf won't be considered as they temporarily don't occupy space in Log
            UINT32 victimLpn = victimLpns[chunkOffset];
            if (victimLpn != INVALID)
            {
                UINT32 i = mem_search_equ_dram_4_bytes(ChunksMapTable(victimLpn, 0), CHUNKS_PER_PAGE, logChunkAddr);

                if(i<CHUNKS_PER_PAGE)
                {
                    dataChunkOffsets[bank][chunkOffset]=i;
                    dataLpns[bank][chunkOffset]=victimLpn;
                    validChunks[bank][chunkOffset]=TRUE;
                    nValidChunksInPage[bank]++;
                    nValidChunksInBlk[bank]++;
                }
            }
            logChunkAddr++;
        }

    }

    if(nValidChunksInPage[bank] > 0)
    {
        uart_print("Current bank is full, copy page to another one\r\n");

        nand_page_ptread(bank, victimVbn[bank], pageOffset[bank], 0, SECTORS_PER_PAGE, GC_BUF(bank), RETURN_ON_ISSUE);

        gcState[bank] = GcWrite;
    }
    else
    {
        pageOffset[bank]++;
    }

}
void initGC(UINT32 bank)
{

#if PrintStats
    uart_print_level_1("CNT ");
    uart_print_level_1_int(bank);
    uart_print_level_1(" ");
    uart_print_level_1_int(cleanListSize(&cleanListDataWrite, bank));
    uart_print_level_1(" ");
    uart_print_level_1_int(heapDataFirstUsage.nElInHeap[bank]);
    uart_print_level_1(" ");
    uart_print_level_1_int(heapDataSecondUsage.nElInHeap[bank]);
    uart_print_level_1(" ");
    uart_print_level_1_int(heapDataCold.nElInHeap[bank]);
    uart_print_level_1("\r\n");
#endif

    nValidChunksInBlk[bank] = 0;

    // note(fabio): this version of the GC cleans only completely used blocks (from heapDataSecondUsage).

    UINT32 validCold = getVictimValidPagesNumber(&heapDataCold, bank);
    UINT32 validSecond = getVictimValidPagesNumber(&heapDataSecondUsage, bank);

    uart_print("Valid cold ");
    uart_print_int(validCold);
    uart_print(" valid second ");
    uart_print_int(validSecond);
    uart_print("\r\n");

    if (validCold < ((validSecond*secondHotFactorNum)/secondHotFactorDen))
    {
        uart_print("GC on cold block\r\n");
        nValidChunksFromHeap[bank] = validCold;
        victimLbn[bank] = getVictim(&heapDataCold, bank);

#if PrintStats
#if MeasureGc
        uart_print_level_1("COLD "); uart_print_level_1_int(bank); uart_print_level_1(" ");
        uart_print_level_1_int(validCold); uart_print_level_1("\r\n");
#endif
#endif
    }
    else
    {
        uart_print("GC on second hot block\r\n");
        nValidChunksFromHeap[bank] = validSecond;
        victimLbn[bank] = getVictim(&heapDataSecondUsage, bank);

#if PrintStats
#if MeasureGc
        uart_print_level_1("SECOND "); uart_print_level_1_int(bank); uart_print_level_1(" ");
        uart_print_level_1_int(validSecond); uart_print_level_1("\r\n");
#endif
#endif
    }

    victimVbn[bank] = get_log_vbn(bank, victimLbn[bank]);

    uart_print("initGC, bank "); uart_print_int(bank);
    uart_print(" victimLbn "); uart_print_int(victimLbn[bank]);
    uart_print(" valid chunks "); uart_print_int(nValidChunksFromHeap[bank]); uart_print("\r\n");

#if PrintStats
    { // print the Hot First Accumulated parameters
        uart_print_level_1("HFMAX ");
        for (int i=0; i<NUM_BANKS; ++i)
        {
            uart_print_level_1_int(hotFirstAccumulated[i]);
            uart_print_level_1(" ");
        }
        uart_print_level_1("\r\n");
    }
#endif

    { // Insert new value at position 0 in adaptive window and shift all others
        for (int i=adaptiveWindowSize-1; i>0; --i)
        {
            adaptiveWindow[bank][i] = adaptiveWindow[bank][i-1];
        }
        adaptiveWindow[bank][0] = nValidChunksFromHeap[bank];
    }

    if (nValidChunksFromHeap[bank] > 0)
    {
        nand_page_ptread(bank, victimVbn[bank], PAGES_PER_BLK - 1, 0, (CHUNK_ADDR_BYTES * CHUNKS_PER_LOG_BLK + BYTES_PER_SECTOR - 1) / BYTES_PER_SECTOR, VICTIM_LPN_LIST(bank), RETURN_WHEN_DONE); // read twice the lpns list size because there might be the recycled lpns list appended
        gcOnRecycledPage[bank]=FALSE;
        pageOffset[bank]=0;
        gcState[bank]=GcRead;
    }

    else
    {
        resetValidChunksAndRemove(&heapDataFirstUsage, bank, victimLbn[bank], CHUNKS_PER_LOG_BLK_FIRST_USAGE);
        resetValidChunksAndRemove(&heapDataSecondUsage, bank, victimLbn[bank], CHUNKS_PER_LOG_BLK_SECOND_USAGE);
        resetValidChunksAndRemove(&heapDataCold, bank, victimLbn[bank], CHUNKS_PER_LOG_BLK_SECOND_USAGE);
        nand_block_erase(bank, victimVbn[bank]);
        cleanListPush(&cleanListDataWrite, bank, victimLbn[bank]);

#if MeasureGc
        uart_print_level_2("GCW "); uart_print_level_2_int(bank);
        uart_print_level_2(" "); uart_print_level_2_int(0);
        uart_print_level_2(" "); uart_print_level_2_int(nValidChunksFromHeap[bank]);
        uart_print_level_2("\r\n");
#endif

        gcState[bank]=GcIdle;
    }

}
Beispiel #14
0
static void garbage_collection(UINT32 const bank)
{
	SET_GC;

	gc++;
	//    g_ftl_statistics[bank].gc_cnt++;

	UINT32 src_lpn;
	UINT32 vt_vblock;
	UINT32 free_vpn;
	UINT32 vcount; // valid page count in victim block
	UINT32 src_page;
	UINT32 gc_vblock;

	vt_vblock = get_vt_vblock(bank);   // get victim block
	vcount    = get_vcount(bank, vt_vblock);
	gc_vblock = get_gc_vblock(bank);
	free_vpn  = gc_vblock * PAGES_PER_BLK;

	// 1. load p2l list from last page offset of victim block (4B x PAGES_PER_BLK)
	// fix minor bug
	misc_w++;
	nand_page_ptread(bank, vt_vblock, PAGES_PER_BLK - 1, 0,
			((sizeof(UINT32) * PAGES_PER_BLK + BYTES_PER_SECTOR - 1 ) / BYTES_PER_SECTOR), GC_BUF(bank), RETURN_WHEN_DONE);
	mem_copy(g_misc_meta[bank].lpn_list_of_cur_vblock,
			GC_BUF(bank), sizeof(UINT32) * PAGES_PER_BLK);
	// 2. copy-back all valid pages to free space
	for (src_page = 0; src_page < (PAGES_PER_BLK - 1); src_page++)
	{
		// get lpn of victim block from a read lpn list
		src_lpn = get_lpn(bank, src_page);

		// determine whether the page is valid or not
		if (get_vpn(src_lpn) !=
				((vt_vblock * PAGES_PER_BLK) + src_page))
		{
			// invalid page
			continue;
		}
		// if the page is valid,
		// then do copy-back op. to free space
		gc_prog++;
		nand_page_copyback(bank,
				vt_vblock,
				src_page,
				free_vpn / PAGES_PER_BLK,
				free_vpn % PAGES_PER_BLK);
		// update metadata
		set_vpn(src_lpn, free_vpn);
		set_lpn(bank, (free_vpn % PAGES_PER_BLK), src_lpn);

		free_vpn++;
	}
	// 3. erase victim block
	erase++;
	nand_block_erase(bank, vt_vblock);

	// 4. update metadata
	//set_vcount(bank, vt_vblock, VC_MAX);
	set_vcount(bank, vt_vblock, VC_MAX);
	set_vcount(bank, gc_vblock, vcount);
	set_new_write_vpn(bank, free_vpn); // set a free page for new write
	set_gc_vblock(bank, vt_vblock); // next free block (reserve for GC)
	dec_full_blk_cnt(bank); // decrease full block count
	CLEAR_GC;
}
Beispiel #15
0
static void write_page(UINT32 const lpn, UINT32 const sect_offset, UINT32 const num_sectors)
{
	write_p++;

	UINT32 bank, old_vpn, new_vpn;
	UINT32 vblock, page_num, page_offset, column_cnt;

	bank        = get_num_bank(lpn); // page striping
	page_offset = sect_offset;
	column_cnt  = num_sectors;

	new_vpn  = assign_new_write_vpn(bank);
	old_vpn  = get_vpn(lpn);
	if (old_vpn != NULL)
	{
		vblock   = old_vpn / PAGES_PER_BLK;
		page_num = old_vpn % PAGES_PER_BLK;
		if (num_sectors != SECTORS_PER_PAGE)
		{
			if ((num_sectors <= 8) && (page_offset != 0))
			{
				// one page async read
				data_read++;
				nand_page_read(bank,
						vblock,
						page_num,
						FTL_BUF(bank));
				// copy `left hole sectors' into SATA write buffer
				if (page_offset != 0)
				{
					mem_copy(WR_BUF_PTR(g_ftl_write_buf_id),
							FTL_BUF(bank),
							page_offset * BYTES_PER_SECTOR);
				}
				// copy `right hole sectors' into SATA write buffer
				if ((page_offset + column_cnt) < SECTORS_PER_PAGE)
				{
					UINT32 const rhole_base = (page_offset + column_cnt) * BYTES_PER_SECTOR;

					mem_copy(WR_BUF_PTR(g_ftl_write_buf_id) + rhole_base,
							FTL_BUF(bank) + rhole_base,
							BYTES_PER_PAGE - rhole_base);
				}
			}
			// left/right hole async read operation (two partial page read)
			else
			{
				// read `left hole sectors'
				if (page_offset != 0)
				{
					data_read++;
					nand_page_ptread(bank,
							vblock,
							page_num,
							0,
							page_offset,
							WR_BUF_PTR(g_ftl_write_buf_id),
							RETURN_WHEN_DONE);
				}
				// read `right hole sectors'
				if ((page_offset + column_cnt) < SECTORS_PER_PAGE)
				{
					data_read++;
					nand_page_ptread(bank,
							vblock,
							page_num,
							page_offset + column_cnt,
							SECTORS_PER_PAGE - (page_offset + column_cnt),
							WR_BUF_PTR(g_ftl_write_buf_id),
							RETURN_WHEN_DONE);
				}
			}
		}
		set_vcount(bank, vblock, get_vcount(bank, vblock) - 1);
	}
	else if (num_sectors != SECTORS_PER_PAGE)
	{
		if(page_offset != 0)
			mem_set_dram(WR_BUF_PTR(g_ftl_write_buf_id),
					0,
					page_offset * BYTES_PER_SECTOR);
		if((page_offset + num_sectors) < SECTORS_PER_PAGE)
		{
			UINT32 const rhole_base = (page_offset + num_sectors) * BYTES_PER_SECTOR;
			mem_set_dram(WR_BUF_PTR(g_ftl_write_buf_id) + rhole_base, 0, BYTES_PER_PAGE - rhole_base);
		}
	}
	vblock   = new_vpn / PAGES_PER_BLK;
	page_num = new_vpn % PAGES_PER_BLK;

	// write new data (make sure that the new data is ready in the write buffer frame)
	// (c.f FO_B_SATA_W flag in flash.h)
	data_prog++;
	nand_page_program_from_host(bank,
			vblock,
			page_num);
	// update metadata
	set_lpn(bank, page_num, lpn);
	set_vpn(lpn, new_vpn);
	set_vcount(bank, vblock, get_vcount(bank, vblock) + 1);
}