Esempio n. 1
0
static UINT32 assign_new_write_vpn(UINT32 const bank)
{
	UINT32 write_vpn;
	UINT32 vblock;

	write_vpn = get_cur_write_vpn(bank);
	vblock    = write_vpn / PAGES_PER_BLK;

	// NOTE: if next new write page's offset is
	// the last page offset of vblock (i.e. PAGES_PER_BLK - 1),
	if ((write_vpn % PAGES_PER_BLK) == (PAGES_PER_BLK - 2))
	{
		// then, because of the flash controller limitation
		// (prohibit accessing a spare area (i.e. OOB)),
		// thus, we persistenly write a lpn list into last page of vblock.
		mem_copy(TEMP_BUF(bank), g_misc_meta[bank].lpn_list_of_cur_vblock, sizeof(UINT32) * PAGES_PER_BLK);
		// fix minor bug
		misc_w++;
		nand_page_ptprogram(bank, vblock, PAGES_PER_BLK - 1, 0,
				((sizeof(UINT32) * PAGES_PER_BLK + BYTES_PER_SECTOR - 1 ) / BYTES_PER_SECTOR),
				TEMP_BUF(bank));

		mem_set_sram(g_misc_meta[bank].lpn_list_of_cur_vblock, 0x00000000, sizeof(UINT32) * PAGES_PER_BLK);

		inc_full_blk_cnt(bank);

		// do garbage collection if necessary
		if (is_full_all_blks(bank))
		{
			GC:
			garbage_collection(bank);
			return get_cur_write_vpn(bank);
		}
		do
		{
			vblock++;

			if(vblock == VBLKS_PER_BANK)
			{
				uart_printf(" vblock == VBLKS_PER_BANK");
				goto GC;
			}
		}while (get_vcount(bank, vblock) == VC_MAX);
	}
	// write page -> next block
	if (vblock != (write_vpn / PAGES_PER_BLK))
	{
		write_vpn = vblock * PAGES_PER_BLK;
	}
	else
	{
		write_vpn++;
	}
	set_new_write_vpn(bank, write_vpn);

	return write_vpn;
}
Esempio n. 2
0
static void overwriteChunkOldInOwLog(UINT32 chunkAddr)
{
    //uart_print_level_1("22 ");
    uart_print("overwriteChunkOldInOwLog\r\n");
    UINT32 nSectsToWrite = (((sectOffset_ % SECTORS_PER_CHUNK) + remainingSects_) < SECTORS_PER_CHUNK) ?     remainingSects_ :
                                                            (SECTORS_PER_CHUNK - (sectOffset_ % SECTORS_PER_CHUNK));
    UINT32 bank = ChunkToBank(chunkAddr);
    UINT32 lbn = ChunkToLbn(chunkAddr);
    UINT32 vbn = get_log_vbn(bank, lbn);
    UINT32 pageOffset = ChunkToPageOffset(chunkAddr);
    if (readOwCounter(bank, lbn, pageOffset) < OwLimit)
    { // Can overwrite in place
        UINT32 sectOffset = ChunkToSectOffset(chunkAddr) + (sectOffset_ % SECTORS_PER_CHUNK);
        //UINT32 src = WR_BUF_PTR(g_ftl_write_buf_id) + (sectOffset_ * BYTES_PER_SECTOR) - (sectOffset * BYTES_PER_SECTOR); // startBuf + srcOffset - dstOffset
        if (lastBankUsingFtlBuf1 != INVALID)
        {
            waitBusyBank(lastBankUsingFtlBuf1);
        }
        mem_copy(FTL_BUF(1)+(sectOffset_*BYTES_PER_SECTOR), WR_BUF_PTR(g_ftl_write_buf_id) + (sectOffset_*BYTES_PER_SECTOR), nSectsToWrite*BYTES_PER_SECTOR);
        UINT32 src = FTL_BUF(1) + (sectOffset_ * BYTES_PER_SECTOR) - (sectOffset * BYTES_PER_SECTOR); // startBuf + srcOffset - dstOffset
        lastBankUsingFtlBuf1 = bank;
        nand_page_ptprogram(bank, vbn, pageOffset, sectOffset, nSectsToWrite, src, RETURN_ON_ISSUE);
        increaseOwCounter(bank, lbn, pageOffset);
    }
    else
    { // Need a new page
        if (nSectsToWrite == SECTORS_PER_CHUNK)
        { // Write chunk in ow log and decrease valid chunks in previous ow blk
            decrementValidChunks(&heapDataOverwrite, bank, lbn);
            overwriteCompleteChunkNew();
        }
        else
        { // Must read old chunk and update in ow log
            decrementValidChunks(&heapDataOverwrite, bank, lbn);
            overwritePartialChunkWhenOldChunkIsInExhaustedOWLog(nSectsToWrite, chunkAddr);
        }
        updateOwDramBufMetadata();
        updateOwChunkPtr();
    }
    #if MeasureOwEfficiency
    write_dram_32(OwEffBuf(bank_, ChunkToLbn(chunkAddr)), read_dram_32(OwEffBuf(bank_, ChunkToLbn(chunkAddr))) + nSectsToWrite);
    #endif
    sectOffset_ += nSectsToWrite;
    remainingSects_ -= nSectsToWrite;
}
Esempio n. 3
0
void increaseLpnColdBlk (UINT32 const bank, LogCtrlBlock * ctrlBlock)
{

    uart_print("increaseLpnColdBlk\r\n");

    UINT32 lpn = ctrlBlock[bank].logLpn;

    if (LogPageToOffset(lpn) == UsedPagesPerLogBlk-1)
    { // current rw log block is full

        UINT32 lbn = get_log_lbn(lpn);
        nand_page_ptprogram(bank,
                            get_log_vbn(bank, lbn),
                            PAGES_PER_BLK - 1,
                            0,
                            (CHUNK_ADDR_BYTES * CHUNKS_PER_LOG_BLK + BYTES_PER_SECTOR - 1) / BYTES_PER_SECTOR,
                            ctrlBlock[bank].lpnsListAddr,
                            RETURN_WHEN_DONE);
        mem_set_dram(ctrlBlock[bank].lpnsListAddr, INVALID, (CHUNKS_PER_BLK * CHUNK_ADDR_BYTES));
        insertBlkInHeap(&heapDataCold, bank, lbn);

#if CanReuseBlksForColdData == 0
        lbn = cleanListPop(&cleanListDataWrite, bank); // Now the hybrid approach can pop from the cleanList
        ctrlBlock[bank].logLpn = lbn * PAGES_PER_BLK;

        while(cleanListSize(&cleanListDataWrite, bank) < 2)
        {
#if PrintStats
            uart_print_level_1("GCCOLD\r\n");
#endif
            garbageCollectLog(bank);
        }
#else
        findNewLpnForColdLog(bank, ctrlBlock);
#endif
    }

    else
    {
        ctrlBlock[bank].logLpn = lpn+1;
    }
    uart_print("increaseLpnColdBlk new lpn "); uart_print_int(ctrlBlock[bank].logLpn); uart_print("\r\n");
}
Esempio n. 4
0
// logging misc + vcount metadata
static void logging_misc_metadata(void)
{
    UINT32 misc_meta_bytes = NUM_MISC_META_SECT * BYTES_PER_SECTOR; // per bank
    UINT32 vcount_addr     = VCOUNT_ADDR;
    UINT32 vcount_bytes    = NUM_VCOUNT_SECT * BYTES_PER_SECTOR; // per bank
    UINT32 vcount_boundary = VCOUNT_ADDR + VCOUNT_BYTES; // entire vcount data
    UINT32 bank;

    flash_finish();

    for (bank = 0; bank < NUM_BANKS; bank++)
    {
        inc_miscblk_vpn(bank);

        // note: if misc. meta block is full, just erase old block & write offset #0
        if ((get_miscblk_vpn(bank) / PAGES_PER_BLK) != MISCBLK_VBN)
        {
            nand_block_erase(bank, MISCBLK_VBN);
            set_miscblk_vpn(bank, MISCBLK_VBN * PAGES_PER_BLK); // vpn = 128
        }
        // copy misc. metadata to FTL buffer
        mem_copy(FTL_BUF(bank), &g_misc_meta[bank], misc_meta_bytes);

        // copy vcount metadata to FTL buffer
        if (vcount_addr <= vcount_boundary)
        {
            mem_copy(FTL_BUF(bank) + misc_meta_bytes, vcount_addr, vcount_bytes);
            vcount_addr += vcount_bytes;
        }
    }
    // logging the misc. metadata to nand flash
    for (bank = 0; bank < NUM_BANKS; bank++)
    {
        nand_page_ptprogram(bank,
                            get_miscblk_vpn(bank) / PAGES_PER_BLK,
                            get_miscblk_vpn(bank) % PAGES_PER_BLK,
                            0,
                            NUM_MISC_META_SECT + NUM_VCOUNT_SECT,
                            FTL_BUF(bank));
    }
    flash_finish();
}
Esempio n. 5
0
void logging_misc_meta()
{
	UINT32 bank;
	flash_finish();
	for(bank = 0; bank < NUM_BANKS; bank++)
	{
		g_misc_meta[bank].cur_miscblk_vpn++;
		if(g_misc_meta[bank].cur_miscblk_vpn / PAGES_PER_VBLK != 1 )
		{
			nand_block_erase(bank,1);
			g_misc_meta[bank].cur_miscblk_vpn = PAGES_PER_VBLK;
		}
		mem_copy(FTL_BUF_ADDR , &(g_misc_meta[bank]), sizeof(misc_metadata));
		nand_page_ptprogram(bank, 1,
			g_misc_meta[bank].cur_miscblk_vpn % PAGES_PER_VBLK,
			0,
			((sizeof(misc_metadata) + BYTES_PER_SECTOR -1 ) / BYTES_PER_SECTOR),	
				FTL_BUF_ADDR );
	}
	flash_finish();
}
Esempio n. 6
0
void increaseLpnHotBlkFirstUsage (UINT32 const bank, LogCtrlBlock * ctrlBlock)
{
    uart_print("increaseLpnHotBlkFirstUsage\r\n");

    UINT32 lpn = ctrlBlock[bank].logLpn;
    UINT32 pageOffset = LogPageToOffset(lpn);

    if (pageOffset == 123)
    { // current rw log block is full. Write lpns list in the highest low page (125)
        uart_print("Blk full\r\n");
        UINT32 lbn = get_log_lbn(lpn);
        nand_page_ptprogram(bank,
                            get_log_vbn(bank, lbn),
                            125,
                            0,
                            (CHUNK_ADDR_BYTES * CHUNKS_PER_LOG_BLK + BYTES_PER_SECTOR - 1) / BYTES_PER_SECTOR,
                            ctrlBlock[bank].lpnsListAddr,
                            RETURN_WHEN_DONE);
        mem_set_dram(ctrlBlock[bank].lpnsListAddr, INVALID, (CHUNKS_PER_BLK * CHUNK_ADDR_BYTES));
        insertBlkInHeap(&heapDataFirstUsage, bank, lbn);

        findNewLpnForHotLog(bank, ctrlBlock);
    }

    else
    {
        if(pageOffset == 0)
        {
            ctrlBlock[bank].logLpn = lpn+1;
        }
        else
        {
            ctrlBlock[bank].logLpn = lpn+2;
        }
    }
}
Esempio n. 7
0
void increaseLpnHotBlkSecondUsage (UINT32 const bank, LogCtrlBlock * ctrlBlock)
{
    uart_print("increaseLpnHotBlkSecondUsage\r\n");

    UINT32 lpn = ctrlBlock[bank].logLpn;
    UINT32 pageOffset = LogPageToOffset(lpn);

    if (pageOffset == UsedPagesPerLogBlk-1)
    {
        uart_print("Blk full\r\n");
        UINT32 lbn = LogPageToLogBlk(lpn);
        UINT32 vbn = get_log_vbn(bank, lbn);
        nand_page_ptprogram(bank,
                            vbn,
                            PAGES_PER_BLK - 1,
                            0,
                            (CHUNK_ADDR_BYTES * CHUNKS_PER_LOG_BLK + BYTES_PER_SECTOR - 1) / BYTES_PER_SECTOR,
                            ctrlBlock[bank].lpnsListAddr,
                            RETURN_WHEN_DONE); // write lpns list to the last high page
        mem_set_dram(ctrlBlock[bank].lpnsListAddr, INVALID, (CHUNKS_PER_BLK * CHUNK_ADDR_BYTES));
        insertBlkInHeap(&heapDataSecondUsage, bank, lbn);

        findNewLpnForHotLog(bank, ctrlBlock);
    }

    else
    {
        lpn++;
        ctrlBlock[bank].logLpn = lpn;
        pageOffset++;

        //uart_print_level_1("increaseLpnHotBlkSecondUsage ");
        //uart_print_level_1_int(bank);
        //uart_print_level_1(" ");
        //uart_print_level_1_int(pageOffset);
        //uart_print_level_1("\r\n");

        if (pageOffset % 2 == 1)
        { // Next page is low

            if (ctrlBlock[bank].nextLowPageOffset == pageOffset)
            { // The page tested positively
                // Here we don't care if the page has already been prefetched because this can be done asyncronously
                ctrlBlock[bank].updateChunkPtr = updateChunkPtrRecycledPage;
                ctrlBlock[bank].useRecycledPage = TRUE;
            }

            else
            {

                if (pageOffset == 1)
                { // Special case: pageOffset 1 comes immediately after another low page, so there was no time for precaching
                    if(canReuseLowPage(bank, pageOffset, ctrlBlock))
                    {
                        ctrlBlock[bank].updateChunkPtr = updateChunkPtrRecycledPage;
                        ctrlBlock[bank].useRecycledPage = TRUE;
                        ctrlBlock[bank].precacheDone = FALSE;
                        ctrlBlock[bank].nextLowPageOffset = pageOffset;
                        return;
                    }
                }

                // Skip this page because it tested negatively

                // Set the next page to the next high page
                ctrlBlock[bank].updateChunkPtr = updateChunkPtr;
                ctrlBlock[bank].useRecycledPage = FALSE;
                lpn++;
                pageOffset++;
                ctrlBlock[bank].logLpn = lpn;

                // Already test the next low page
                pageOffset++;
                if (pageOffset < UsedPagesPerLogBlk-1)
                {
                    if(canReuseLowPage(bank, pageOffset, ctrlBlock))
                    {
                        ctrlBlock[bank].precacheDone = FALSE;
                        ctrlBlock[bank].nextLowPageOffset = pageOffset;
                    }
                    else
                    {
                        ctrlBlock[bank].precacheDone = FALSE;
                        ctrlBlock[bank].nextLowPageOffset = INVALID;
                    }
                }

            }
        }

        else
        { // Next page is high
            ctrlBlock[bank].updateChunkPtr = updateChunkPtr;
            ctrlBlock[bank].useRecycledPage = FALSE;

            // Already test the next low page
            pageOffset++;
            if (pageOffset < UsedPagesPerLogBlk-1)
            {
                if(canReuseLowPage(bank, pageOffset, ctrlBlock))
                {
                    ctrlBlock[bank].precacheDone = FALSE;
                    ctrlBlock[bank].nextLowPageOffset = pageOffset;
                }
                else
                {
                    ctrlBlock[bank].precacheDone = FALSE;
                    ctrlBlock[bank].nextLowPageOffset = INVALID;
                }
            }
            else
            {
                ctrlBlock[bank].precacheDone = FALSE;
                ctrlBlock[bank].nextLowPageOffset = INVALID;
            }
        }
    }

    uart_print("New logLpn "); uart_print_int(ctrlBlock[bank].logLpn);
    uart_print(" offset "); uart_print_int(LogPageToOffset(ctrlBlock[bank].logLpn)); uart_print("\r\n");
}
Esempio n. 8
0
void initLog()
{

    uart_print("Initializing Write Log Space...\r\n");
    uart_print("Initializing clean list...");
    //testCleanList();
    cleanListInit(&cleanListDataWrite, CleanList(0), LOG_BLK_PER_BANK);
    uart_print("done\r\n");

    //int off = __builtin_offsetof(LogCtrlBlock, increaseLpn);

    for(int bank=0; bank<NUM_BANKS; bank++)
    {
        adaptiveStepDown[bank] = initStepDown;
        adaptiveStepUp[bank] = initStepUp;
        nStepUps[bank] = 0;
        nStepDowns[bank] = 0;

        for(int lbn=0; lbn<LOG_BLK_PER_BANK; lbn++)
        {
            cleanListPush(&cleanListDataWrite, bank, lbn);
        }

        UINT32 lbn = cleanListPop(&cleanListDataWrite, bank);

        hotLogCtrl[bank] = (LogCtrlBlock)
        {
            .logLpn = lbn * PAGES_PER_BLK,
            .lpnsListAddr = LPNS_BUF_BASE_1(bank),
            .logBufferAddr = HOT_LOG_BUF(bank),
            .chunkPtr = 0,
            .increaseLpn=increaseLpnHotBlkFirstUsage,
            .updateChunkPtr=updateChunkPtr,
            .nextLowPageOffset=INVALID,
            .allChunksInLogAreValid = TRUE,
            .useRecycledPage=FALSE,
            .precacheDone=TRUE,
        };

        for(int chunk=0; chunk<CHUNKS_PER_PAGE; ++chunk)
        {
            hotLogCtrl[bank].dataLpn[chunk] = INVALID;
            hotLogCtrl[bank].chunkIdx[chunk] = INVALID;
        }

        lbn = cleanListPop(&cleanListDataWrite, bank);

        coldLogCtrl[bank] = (LogCtrlBlock)
        {
            .logLpn = lbn * PAGES_PER_BLK,
            .lpnsListAddr = LPNS_BUF_BASE_2(bank),
            .logBufferAddr = COLD_LOG_BUF(bank),
            .chunkPtr = 0,
            .increaseLpn=increaseLpnColdBlk,
            .updateChunkPtr=updateChunkPtr,
            .nextLowPageOffset=INVALID,
            .allChunksInLogAreValid = TRUE,
            .useRecycledPage=FALSE,
            .precacheDone=TRUE,
        };
        for(int chunk=0; chunk<CHUNKS_PER_PAGE; ++chunk)
        {
            coldLogCtrl[bank].dataLpn[chunk] = INVALID;
            coldLogCtrl[bank].chunkIdx[chunk] = INVALID;
        }

        nValidChunksFromHeap[bank] = INVALID;
    }
}

static void findNewLpnForColdLog(const UINT32 bank, LogCtrlBlock * ctrlBlock)
{
    uart_print("findNewLpnForColdLog bank "); uart_print_int(bank);

    if (cleanListSize(&cleanListDataWrite, bank) > 2)
    {
        uart_print(" use clean blk\r\n");
        uart_print("cleanList size = "); uart_print_int(cleanListSize(&cleanListDataWrite, bank)); uart_print("\r\n");

        UINT32 lbn = cleanListPop(&cleanListDataWrite, bank);
        ctrlBlock[bank].logLpn = lbn * PAGES_PER_BLK;
        ctrlBlock[bank].increaseLpn = increaseLpnColdBlk;
    }
    else
    {
        if (reuseCondition(bank))
        {
#if PrintStats
            uart_print_level_1("REUSECOLD\r\n");
#endif
            uart_print(" second usage\r\n");
            UINT32 lbn = getVictim(&heapDataFirstUsage, bank);
            UINT32 nValidChunks = getVictimValidPagesNumber(&heapDataFirstUsage, bank);
            resetValidChunksAndRemove(&heapDataFirstUsage, bank, lbn, CHUNKS_PER_LOG_BLK_FIRST_USAGE);
            resetValidChunksAndRemove(&heapDataSecondUsage, bank, lbn, CHUNKS_PER_LOG_BLK_SECOND_USAGE);
            resetValidChunksAndRemove(&heapDataCold, bank, lbn, nValidChunks);
            ctrlBlock[bank].logLpn = (lbn * PAGES_PER_BLK) + 2;
            ctrlBlock[bank].increaseLpn = increaseLpnColdBlkReused;
            nand_page_ptread(bank,
                             get_log_vbn(bank, lbn),
                             125,
                             0,
                             (CHUNK_ADDR_BYTES * CHUNKS_PER_LOG_BLK + BYTES_PER_SECTOR - 1) / BYTES_PER_SECTOR,
                             ctrlBlock[bank].lpnsListAddr,
                             RETURN_WHEN_DONE); // Read the lpns list from the max low page (125) where it was previously written by incrementLpnHotBlkFirstUsage

        }
        else
        {
            uart_print(" get new block\r\n");
            UINT32 lbn = cleanListPop(&cleanListDataWrite, bank);
            ctrlBlock[bank].logLpn = lbn * PAGES_PER_BLK;
            ctrlBlock[bank].increaseLpn = increaseLpnColdBlk;
            while(cleanListSize(&cleanListDataWrite, bank) < 2)
            {
#if PrintStats
                uart_print_level_1("GCCOLD\r\n");
#endif
                garbageCollectLog(bank);
            }
        }
    }
}

void increaseLpnColdBlkReused (UINT32 const bank, LogCtrlBlock * ctrlBlock)
{
    uart_print("increaseLpnColdBlkReused bank "); uart_print_int(bank); uart_print("\r\n");

    UINT32 lpn = ctrlBlock[bank].logLpn;
    UINT32 pageOffset = LogPageToOffset(lpn);

    if (pageOffset == UsedPagesPerLogBlk-1)
    {
        UINT32 lbn = get_log_lbn(lpn);
        nand_page_ptprogram(bank,
                            get_log_vbn(bank, lbn),
                            PAGES_PER_BLK - 1,
                            0,
                            (CHUNK_ADDR_BYTES * CHUNKS_PER_LOG_BLK + BYTES_PER_SECTOR - 1) / BYTES_PER_SECTOR,
                            ctrlBlock[bank].lpnsListAddr,
                            RETURN_WHEN_DONE);
        mem_set_dram(ctrlBlock[bank].lpnsListAddr, INVALID, (CHUNKS_PER_BLK * CHUNK_ADDR_BYTES));
        insertBlkInHeap(&heapDataCold, bank, lbn);

        findNewLpnForColdLog(bank, ctrlBlock);
    }
    else
    {
        ctrlBlock[bank].logLpn = lpn+2;
    }

    uart_print("increaseLpnColdBlkReused (bank="); uart_print_int(bank); uart_print(") new lpn "); uart_print_int(ctrlBlock[bank].logLpn); uart_print("\r\n");
}
Esempio n. 9
0
static void logging_pmap_table(void)
{
    UINT32 pmap_addr  = PAGE_MAP_ADDR;
    UINT32 pmap_bytes = BYTES_PER_PAGE; // per bank
    UINT32 mapblk_vpn;
    UINT32 bank;
    UINT32 pmap_boundary = PAGE_MAP_ADDR + PAGE_MAP_BYTES;
    BOOL32 finished = FALSE;

    for (UINT32 mapblk_lbn = 0; mapblk_lbn < MAPBLKS_PER_BANK; mapblk_lbn++)
    {
        flash_finish();

        for (bank = 0; bank < NUM_BANKS; bank++)
        {
            if (finished)
            {
                break;
            }
            else if (pmap_addr >= pmap_boundary)
            {
                finished = TRUE;
                break;
            }
            else if (pmap_addr + BYTES_PER_PAGE >= pmap_boundary)
            {
                finished = TRUE;
                pmap_bytes = (pmap_boundary - pmap_addr + BYTES_PER_SECTOR - 1) / BYTES_PER_SECTOR * BYTES_PER_SECTOR ;
            }
            inc_mapblk_vpn(bank, mapblk_lbn);

            mapblk_vpn = get_mapblk_vpn(bank, mapblk_lbn);

            // note: if there is no free page, then erase old map block first.
            if ((mapblk_vpn % PAGES_PER_BLK) == 0)
            {
                // erase full map block
                nand_block_erase(bank, (mapblk_vpn - 1) / PAGES_PER_BLK);

                // next vpn of mapblk is offset #0
                set_mapblk_vpn(bank, mapblk_lbn, ((mapblk_vpn - 1) / PAGES_PER_BLK) * PAGES_PER_BLK);
                mapblk_vpn = get_mapblk_vpn(bank, mapblk_lbn);
            }
            // copy the page mapping table to FTL buffer
            mem_copy(FTL_BUF(bank), pmap_addr, pmap_bytes);

            // logging update page mapping table into map_block
            nand_page_ptprogram(bank,
                                mapblk_vpn / PAGES_PER_BLK,
                                mapblk_vpn % PAGES_PER_BLK,
                                0,
                                pmap_bytes / BYTES_PER_SECTOR,
                                FTL_BUF(bank));
            pmap_addr += pmap_bytes;
        }
        if (finished)
        {
            break;
        }
    }
    flash_finish();
}