Esempio n. 1
0
static void overwriteCompletePageInOwLog() {
#if DetailedOwStats == 1
    uart_print_level_1("-\r\n");
#endif
    uart_print("overwriteCompletePageInOwLog\r\n");
    #if MeasureDetailedOverwrite
    start_interval_measurement(TIMER_CH3, TIMER_PRESCALE_0);
    #endif
    chooseNewBank_();
    manageOldCompletePage();
    UINT32 newLogLpn = getOWLpn(bank_);
    UINT32 lbn = LogPageToLogBlk(newLogLpn);
    UINT32 vBlk = get_log_vbn(bank_, lbn);
    UINT32 pageOffset = LogPageToOffset(newLogLpn);
    nand_page_ptprogram_from_host(bank_, vBlk, pageOffset, 0, SECTORS_PER_PAGE);
    increaseOwCounter(bank_, lbn, pageOffset);
    #if MeasureOwEfficiency
    write_dram_32(OwEffBuf(bank_, LogPageToLogBlk(newLogLpn)), read_dram_32(OwEffBuf(bank_, LogPageToLogBlk(newLogLpn))) + SECTORS_PER_PAGE);
    #endif
    for(UINT32 i=0; i<CHUNKS_PER_PAGE; i++)
    {
        write_dram_32(chunkInLpnsList(OWCtrl[bank_].lpnsListPtr, LogPageToOffset(newLogLpn), i), lpn_);
        write_dram_32(ChunksMapTable(lpn_, i), ( (bank_ * LOG_BLK_PER_BANK * CHUNKS_PER_BLK) + (newLogLpn * CHUNKS_PER_PAGE) + i ) | StartOwLogLpn);
    }
    increaseOWLpn(bank_);
    #if MeasureDetailedOverwrite
    UINT32 timerValue=GET_TIMER_VALUE(TIMER_CH3);
    UINT32 nTicks = 0xFFFFFFFF - timerValue;
    uart_print_level_2("OPN0 "); uart_print_level_2_int(nTicks); uart_print_level_2("\r\n");
    #endif
}
Esempio n. 2
0
static void flushLogBufferDuringGC(const UINT32 bank)
{
    uart_print("flushLogBufferDuringGC bank="); uart_print_int(bank); uart_print("\r\n");
    UINT32 newLogLpn = getRWLpn(bank); // TODO: completely ignoring SW Log. Should use prepare_to_new_write if we want to use it
    uart_print("FlushLog to lpn="); uart_print_int(newLogLpn); uart_print("\r\n");
    UINT32 vBlk = get_log_vbn(bank, LogPageToLogBlk(newLogLpn));
    UINT32 pageOffset = LogPageToOffset(newLogLpn);
    nand_page_program(bank, vBlk, pageOffset, LOG_BUF(bank), RETURN_ON_ISSUE);
    for(int i=0; i<CHUNKS_PER_PAGE; i++)
    {
        UINT32 lChunkAddr = (newLogLpn * CHUNKS_PER_PAGE) + i;
        if( (chunkInLpnsList(RWCtrl[bank].lpnsListPtr, LogPageToOffset(newLogLpn), i)) >= (DRAM_BASE + DRAM_SIZE))
        {
            uart_print_level_1("ERROR in write::flushLogBufferDuringGC 1: writing to "); uart_print_level_1_int(chunkInLpnsList(RWCtrl[bank].lpnsListPtr, LogPageToOffset(newLogLpn), i)); uart_print_level_1("\r\n");
        }
        write_dram_32(chunkInLpnsList(RWCtrl[bank].lpnsListPtr, LogPageToOffset(newLogLpn), i), logBufMeta[bank].dataLpn[i]);
        if (logBufMeta[bank].dataLpn[i] != INVALID)
        {
            write_dram_32(ChunksMapTable(logBufMeta[bank].dataLpn[i], logBufMeta[bank].chunkIdx[i]), (bank * LOG_BLK_PER_BANK * CHUNKS_PER_BLK) + lChunkAddr);
        }
        else
        {
            decrementValidChunks(&heapDataWrite, bank, LogPageToLogBlk(newLogLpn)); // decrement blk with previous copy
        }
    }
    increaseRWLpn(bank);
}
Esempio n. 3
0
/* NOTE: This function calls rebuildPageToFtlBuf with GcMode, therefore the valid chunks counters of old blocks are already managed.
 * Do not call manageOldChunks before calling this!
 */
static void appendPageToSWBlock (const UINT32 dataLpn, const UINT32 sectOffset, const UINT32 nSects)
{
    uart_print("appendPageToSWBlock dataLpn="); uart_print_int(dataLpn);
    uart_print(", sectOffset="); uart_print_int(sectOffset);
    uart_print(", nSects="); uart_print_int(nSects); uart_print("\r\n");
    UINT32 nSectsToWrite = SECTORS_PER_PAGE - sectOffset;
    UINT32 logLpn = getSWLpn(bank_);
    UINT32 vBlk = get_log_vbn(bank_, LogPageToLogBlk(logLpn));
    UINT32 dst = FTL_BUF(0) + (sectOffset*BYTES_PER_SECTOR);
    UINT32 src = WR_BUF_PTR(g_ftl_write_buf_id)+(sectOffset*BYTES_PER_SECTOR);
    rebuildPageToFtlBuf(dataLpn, 0, SECTORS_PER_PAGE, GcMode); // Rebuild rest of the page in FTL buffer (rebuild entire page to be sure that all chunks are correctly garbage collected, especially if they are in DRAM)
    //waitBusyBank(bank_);
    flash_finish();
    mem_copy(dst, src, nSectsToWrite * BYTES_PER_SECTOR);                                       // Fill FTL buffer with new data
    //TODO: this program shouldn't be sincronous, need a global variable storing last bank writing data from FTL_BUF(0)
    nand_page_program(bank_, vBlk, LogPageToOffset(logLpn), FTL_BUF(0), RETURN_WHEN_DONE);      // Write FTL buffer to the next sequential page
    UINT32 chunkIdx;
    for(chunkIdx=0; chunkIdx<sectOffset / SECTORS_PER_CHUNK; ++chunkIdx)
    { // For sector before the start of new data we update only if previously there was some valid data, which is now in the new page, otherwise we insert invalid in the lpns list to speed up GC later
        if (ChunksMapTable(dataLpn, chunkIdx) > DRAM_BASE + DRAM_SIZE)
        {
            uart_print_level_1("ERROR in appendPageToSWBlk 1: reading above DRAM address space\r\n");
        }
        if (read_dram_32(ChunksMapTable(dataLpn, chunkIdx)) != INVALID)
        {
            UINT32 lChunkAddr = (logLpn * CHUNKS_PER_PAGE) + chunkIdx;
            if((chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)) >=(DRAM_BASE + DRAM_SIZE))
            {
                uart_print_level_1("ERROR in write::appendPageToSWBlk 1: writing to "); uart_print_level_1_int(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)); uart_print_level_1("\r\n");
            }
            write_dram_32(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx), dataLpn);
            write_dram_32(ChunksMapTable(dataLpn, chunkIdx), (bank_ * LOG_BLK_PER_BANK * CHUNKS_PER_BLK) + lChunkAddr);
        }
        else
        { //Decrement valid chunks in the blk we're going to write in because we inserted null data
            if((chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)) >=(DRAM_BASE + DRAM_SIZE))
            {
                uart_print_level_1("ERROR in write::appendPageToSWBlk 2: writing to "); uart_print_level_1_int(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)); uart_print_level_1("\r\n");
            }
            write_dram_32(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx), INVALID);
            decrementValidChunks(&heapDataWrite, bank_, LogPageToLogBlk(logLpn));
        }
    }
    for( ; chunkIdx < CHUNKS_PER_PAGE; ++chunkIdx)
    { // The new sectors are instead all valid, therefore we don't bother checking if they were valid before
            UINT32 lChunkAddr = (logLpn * CHUNKS_PER_PAGE) + chunkIdx;
            if((chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)) >=(DRAM_BASE + DRAM_SIZE))
            {
                uart_print_level_1("ERROR in write::appendPageToSWBlk 3: writing to "); uart_print_level_1_int(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)); uart_print_level_1("\r\n");
            }
            write_dram_32(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx), dataLpn);
            write_dram_32(ChunksMapTable(dataLpn, chunkIdx), (bank_ * LOG_BLK_PER_BANK * CHUNKS_PER_BLK) + lChunkAddr);
    }
    SWCtrl[bank_].nextDataLpn=dataLpn+1;
    increaseSWLpn(bank_);
    g_ftl_write_buf_id = (g_ftl_write_buf_id + 1) % NUM_WR_BUFFERS;
    SETREG (BM_STACK_WRSET, g_ftl_write_buf_id);
    SETREG (BM_STACK_RESET, 0x01);
}
Esempio n. 4
0
static void set_psn(UINT32 const lba, UINT32 const psn)			//added by RED
{
	//UINT32 src = (UINT32)g_psn_write + (sizeof(UINT32) * g_psn_write_temp);
	//UINT32 dst = SMT_ADDR + (lba * sizeof(UINT32));
	
	//UINT32 size = sizeof(UINT32) * totals;
	//int i;
	//mem_copy(dst,src,size);
	UINT32 dst, bank, block, sector;
	UINT32 sectors_per_mblk = (SECTORS_PER_BANK + SMT_BANK_NUM - 1) / SMT_BANK_NUM;

	bank = lba / SECTORS_PER_BANK;
	block = ((lba % SECTORS_PER_BANK)) / (sectors_per_mblk);
	sector = ((lba % SECTORS_PER_BANK)) % (sectors_per_mblk);

	dst = smt_piece_map[bank * SMT_BANK_NUM + block];
	if(dst == (UINT32)-1)
	{
#if OPTION_FTL_TEST == 1
		num_miss++;
#endif
		load_smt_piece( bank * SMT_BANK_NUM + block);
		dst = smt_piece_map[bank * SMT_BANK_NUM + block];
		while(_BSP_FSM(g_bank_to_wait) != BANK_IDLE);
	}
	dst = SMT_ADDR + (SMT_PIECE_BYTES * dst) + (sector * sizeof(UINT32));
	smt_bit_map[bank][block / NUM_BANKS_MAX] |= ( 1 <<( block% NUM_BANKS_MAX) );

	write_dram_32( (UINT32*)dst , psn );
}
Esempio n. 5
0
// set vpn to PAGE_MAP
static void set_vpn(UINT32 const lpn, UINT32 const vpn)
{
    CHECK_LPAGE(lpn);
    ASSERT(vpn >= (META_BLKS_PER_BANK * PAGES_PER_BLK) && vpn < (VBLKS_PER_BANK * PAGES_PER_BLK));

    write_dram_32(PAGE_MAP_ADDR + lpn * sizeof(UINT32), vpn);
}
Esempio n. 6
0
static void set_psn(UINT32 const lba, UINT32 const psn)			//added by RED
{
	//UINT32 src = (UINT32)g_psn_write + (sizeof(UINT32) * g_psn_write_temp);
	//UINT32 dst = SMT_ADDR + (lba * sizeof(UINT32));
	
	//UINT32 size = sizeof(UINT32) * totals;
	//int i;
	//mem_copy(dst,src,size);
	UINT32 dst, bank, block, sector;

	UINT32 sectors_per_mblk = (SECTORS_PER_BANK) / NUM_BANKS_MAX;

	bank = lba / SECTORS_PER_BANK;
	block = ((lba % SECTORS_PER_BANK)) / (sectors_per_mblk);
	sector = ((lba % SECTORS_PER_BANK)) % (sectors_per_mblk);

	dst = smt_piece_map[bank * NUM_BANKS_MAX + block];
	if(dst == (UINT32)-1)
	{
		load_smt_piece( bank * NUM_BANKS_MAX + block);
		dst = smt_piece_map[bank * NUM_BANKS_MAX + block];
	}
	dst = SMT_ADDR + (SMT_PIECE_BYTES * dst) + (sector * sizeof(UINT32));
	smt_bit_map[bank] |= ( 1 <<block );

	write_dram_32( (UINT32*)dst , psn );
}
Esempio n. 7
0
static void updateOwDramBufMetadata() {
    uart_print("updateOwDramBufMetadata\r\n");
    UINT32 chunkIdx = sectOffset_ / SECTORS_PER_CHUNK;
    owLogBufMeta[bank_].dataLpn[owChunkPtr[bank_]]=lpn_;
    owLogBufMeta[bank_].chunkIdx[owChunkPtr[bank_]]=chunkIdx;
    //shashtblUpdate(lpn_, chunkIdx, ( (bank_ * LOG_BLK_PER_BANK * CHUNKS_PER_BLK) + (LogBufLpn * CHUNKS_PER_PAGE) + owChunkPtr[bank_] ) | StartOwLogLpn); // Optimize: already have node, why use hash update???
    write_dram_32(ChunksMapTable(lpn_, chunkIdx), ( (bank_ * LOG_BLK_PER_BANK * CHUNKS_PER_BLK) + (LogBufLpn * CHUNKS_PER_PAGE) + owChunkPtr[bank_] ) | StartOwLogLpn);
}
Esempio n. 8
0
static void writeCompletePage() {
    //uart_print_level_1("3 ");
    uart_print("writeCompletePage\r\n");
    UINT32 newLogLpn = getRWLpn(bank_);
    UINT32 vBlk = get_log_vbn(bank_, LogPageToLogBlk(newLogLpn));
    UINT32 pageOffset = LogPageToOffset(newLogLpn);
    nand_page_ptprogram_from_host (bank_, vBlk, pageOffset, 0, SECTORS_PER_PAGE); // write new data (make sure that the new data is ready in the write buffer frame) (c.f FO_B_SATA_W flag in flash.h)
    for(UINT32 i=0; i<CHUNKS_PER_PAGE; i++)
    {
        if((chunkInLpnsList(RWCtrl[bank_].lpnsListPtr, LogPageToOffset(newLogLpn), i)) >=(DRAM_BASE + DRAM_SIZE))
        {
            uart_print_level_1("ERROR in write::writeCompletePage 1: writing to "); uart_print_level_1_int(chunkInLpnsList(RWCtrl[bank_].lpnsListPtr, LogPageToOffset(newLogLpn), i)); uart_print_level_1("\r\n");
        }
        write_dram_32(chunkInLpnsList(RWCtrl[bank_].lpnsListPtr, LogPageToOffset(newLogLpn), i), lpn_);
        //shashtblUpdate(lpn_, i, (bank_ * LOG_BLK_PER_BANK * CHUNKS_PER_BLK) + (newLogLpn * CHUNKS_PER_PAGE) + i);
        write_dram_32(ChunksMapTable(lpn_, i), (bank_ * LOG_BLK_PER_BANK * CHUNKS_PER_BLK) + (newLogLpn * CHUNKS_PER_PAGE) + i);
    }
    increaseRWLpn(bank_);
}
Esempio n. 9
0
static void flushOwLogBuffer()
{
    uart_print("bank "); uart_print_int(bank_);
    uart_print(" flushOwLogBuffer\r\n");
    UINT32 newLogLpn = getOWLpn(bank_);
    if (newLogLpn == INVALID)
    {
        uart_print_level_1("ERROR in flushOwLogBuffer: got INVALID lpn\r\n");
        while(1);
    }
    uart_print("new log lpn="); uart_print_int(newLogLpn); uart_print("\r\n");
    UINT32 lbn = LogPageToLogBlk(newLogLpn);
    UINT32 vBlk = get_log_vbn(bank_, lbn);
    UINT32 pageOffset = LogPageToOffset(newLogLpn);
    nand_page_program(bank_, vBlk, pageOffset, OW_LOG_BUF(bank_), RETURN_ON_ISSUE);
    increaseOwCounter(bank_, lbn, pageOffset);
#if MeasureOwEfficiency
    write_dram_32(OwEffBuf(bank_, LogPageToLogBlk(newLogLpn)), read_dram_32(OwEffBuf(bank_, LogPageToLogBlk(newLogLpn))) + SECTORS_PER_PAGE);
#endif
    for(UINT32 i=0; i<CHUNKS_PER_PAGE; i++)
    {
        uart_print("Chunk "); uart_print_int(i); uart_print(" ");
        UINT32 lChunkAddr = ( (bank_ * LOG_BLK_PER_BANK * CHUNKS_PER_BLK) + (newLogLpn * CHUNKS_PER_PAGE) + i );
        uart_print("lChunkAddr "); uart_print_int(lChunkAddr); uart_print(" ");
        lChunkAddr = lChunkAddr | StartOwLogLpn;
        uart_print("full "); uart_print_int(lChunkAddr); uart_print("\r\n");
        write_dram_32(chunkInLpnsList(OWCtrl[bank_].lpnsListPtr, LogPageToOffset(newLogLpn), i), owLogBufMeta[bank_].dataLpn[i]);
        if (owLogBufMeta[bank_].dataLpn[i] != INVALID)
        {
            write_dram_32(ChunksMapTable(owLogBufMeta[bank_].dataLpn[i], owLogBufMeta[bank_].chunkIdx[i]), lChunkAddr);
        }
        else
        {
            decrementValidChunks(&heapDataOverwrite, bank_, LogPageToLogBlk(newLogLpn));
        }
    }
    increaseOWLpn(bank_);
}
Esempio n. 10
0
static void updateDramBufMetadataDuringGc(const UINT32 bank, const UINT32 lpn, const UINT32 sectOffset)
{
    uart_print("updateDramBufMetadataDuringGc\r\n");
    UINT32 chunkIdx = sectOffset / SECTORS_PER_CHUNK;
    logBufMeta[bank].dataLpn[chunkPtr[bank]]=lpn;
    logBufMeta[bank].chunkIdx[chunkPtr[bank]]=chunkIdx;
    //int ret = shashtblUpdate(lpn, chunkIdx, (bank * LOG_BLK_PER_BANK * CHUNKS_PER_BLK) + (LogBufLpn * CHUNKS_PER_PAGE) + chunkPtr[bank]); // Optimize: already have node, why use hash update???
    write_dram_32(ChunksMapTable(lpn, chunkIdx), (bank * LOG_BLK_PER_BANK * CHUNKS_PER_BLK) + (LogBufLpn * CHUNKS_PER_PAGE) + chunkPtr[bank]);
    /*if (ret != 0)
    {
        uart_print_level_1("ERROR: in updateDramBufMetadataDuringGc failed to update node.\r\n");
        uart_print_level_1("lpn="); uart_print_level_1_int(lpn); uart_print_level_1("\r\n");
        uart_print_level_1("node lpn="); uart_print_level_1_int(read_dram_32(&node->key)); uart_print_level_1("\r\n");
        while(1);
    }
    */
}
Esempio n. 11
0
static void overwriteChunkOldInOwLog(UINT32 chunkAddr)
{
    //uart_print_level_1("22 ");
    uart_print("overwriteChunkOldInOwLog\r\n");
    UINT32 nSectsToWrite = (((sectOffset_ % SECTORS_PER_CHUNK) + remainingSects_) < SECTORS_PER_CHUNK) ?     remainingSects_ :
                                                            (SECTORS_PER_CHUNK - (sectOffset_ % SECTORS_PER_CHUNK));
    UINT32 bank = ChunkToBank(chunkAddr);
    UINT32 lbn = ChunkToLbn(chunkAddr);
    UINT32 vbn = get_log_vbn(bank, lbn);
    UINT32 pageOffset = ChunkToPageOffset(chunkAddr);
    if (readOwCounter(bank, lbn, pageOffset) < OwLimit)
    { // Can overwrite in place
        UINT32 sectOffset = ChunkToSectOffset(chunkAddr) + (sectOffset_ % SECTORS_PER_CHUNK);
        //UINT32 src = WR_BUF_PTR(g_ftl_write_buf_id) + (sectOffset_ * BYTES_PER_SECTOR) - (sectOffset * BYTES_PER_SECTOR); // startBuf + srcOffset - dstOffset
        if (lastBankUsingFtlBuf1 != INVALID)
        {
            waitBusyBank(lastBankUsingFtlBuf1);
        }
        mem_copy(FTL_BUF(1)+(sectOffset_*BYTES_PER_SECTOR), WR_BUF_PTR(g_ftl_write_buf_id) + (sectOffset_*BYTES_PER_SECTOR), nSectsToWrite*BYTES_PER_SECTOR);
        UINT32 src = FTL_BUF(1) + (sectOffset_ * BYTES_PER_SECTOR) - (sectOffset * BYTES_PER_SECTOR); // startBuf + srcOffset - dstOffset
        lastBankUsingFtlBuf1 = bank;
        nand_page_ptprogram(bank, vbn, pageOffset, sectOffset, nSectsToWrite, src, RETURN_ON_ISSUE);
        increaseOwCounter(bank, lbn, pageOffset);
    }
    else
    { // Need a new page
        if (nSectsToWrite == SECTORS_PER_CHUNK)
        { // Write chunk in ow log and decrease valid chunks in previous ow blk
            decrementValidChunks(&heapDataOverwrite, bank, lbn);
            overwriteCompleteChunkNew();
        }
        else
        { // Must read old chunk and update in ow log
            decrementValidChunks(&heapDataOverwrite, bank, lbn);
            overwritePartialChunkWhenOldChunkIsInExhaustedOWLog(nSectsToWrite, chunkAddr);
        }
        updateOwDramBufMetadata();
        updateOwChunkPtr();
    }
    #if MeasureOwEfficiency
    write_dram_32(OwEffBuf(bank_, ChunkToLbn(chunkAddr)), read_dram_32(OwEffBuf(bank_, ChunkToLbn(chunkAddr))) + nSectsToWrite);
    #endif
    sectOffset_ += nSectsToWrite;
    remainingSects_ -= nSectsToWrite;
}
Esempio n. 12
0
static void set_psn(UINT32 const lba, UINT32 const psn)			//added by RED
{
	//UINT32 src = (UINT32)g_psn_write + (sizeof(UINT32) * g_psn_write_temp);
	//UINT32 dst = SMT_ADDR + (lba * sizeof(UINT32));
	
	//UINT32 size = sizeof(UINT32) * totals;
	//int i;
	//mem_copy(dst,src,size);
	UINT32 dst, bank, block, sector;

	bank = lba / SECTORS_PER_BANK;
	block = (lba % SECTORS_PER_BANK ) / (SECTORS_PER_PAGE * PAGES_PER_VBLK * ((VBLKS_PER_BANK + NUM_BANKS_MAX -1) / NUM_BANKS_MAX));
	sector = (lba % SECTORS_PER_BANK ) % (SECTORS_PER_PAGE * PAGES_PER_VBLK * ((VBLKS_PER_BANK + NUM_BANKS_MAX -1) / NUM_BANKS_MAX));
	if(( smt_dram_bit[ bank ] & (1 << block)) == 0)
	{
		load_smt_piece( bank * NUM_BANKS_MAX + block);
	}
	dst = smt_piece_map[bank * NUM_BANKS_MAX + block];
	dst = SMT_ADDR + (SMT_PIECE_BYTES * dst) + (sector * sizeof(UINT32));
	smt_bit_map[bank] |= ( 1 <<block );

	write_dram_32( (UINT32*)dst , psn );
}
Esempio n. 13
0
BOOL8 canReuseLowPage(const UINT32 bank, const UINT32 pageOffset, LogCtrlBlock * ctrlBlock)
{
    //uart_print_level_1("canReuseLowPage ");
    //uart_print_level_1_int(bank);
    //uart_print_level_1(" ");
    //uart_print_level_1_int(pageOffset);
    //uart_print_level_1("\r\n");

    UINT32 lbn = LogPageToLogBlk(ctrlBlock[bank].logLpn);
    //UINT32 vbn = get_log_vbn(bank, lbn);
    UINT32 victimLpns[CHUNKS_PER_PAGE];
    mem_copy(victimLpns, ctrlBlock[bank].lpnsListAddr + (pageOffset * CHUNKS_PER_PAGE * sizeof(UINT32)), CHUNKS_PER_PAGE * sizeof(UINT32));

    UINT32 logChunkAddr = (bank*LOG_BLK_PER_BANK*CHUNKS_PER_BLK) + (lbn*CHUNKS_PER_BLK) + (pageOffset*CHUNKS_PER_PAGE);

    UINT32 dataChunkOffsets[CHUNKS_PER_PAGE];
    UINT32 dataLpns[CHUNKS_PER_PAGE];
    UINT32 validChunks[CHUNKS_PER_PAGE];
    UINT32 nValidChunksInPage = 0;

    for(UINT32 chunkOffset=0; chunkOffset<CHUNKS_PER_PAGE; chunkOffset++)
    { validChunks[chunkOffset] = FALSE; }

    for(UINT32 chunkOffset=0; chunkOffset<CHUNKS_PER_PAGE; chunkOffset++)
    {   // This loops finds valid chunks is the page.

        uart_print("chunkOffset "); uart_print_int(chunkOffset);

        UINT32 victimLpn = victimLpns[chunkOffset];
        if (victimLpn != INVALID)
        {
            UINT32 i = mem_search_equ_dram_4_bytes(ChunksMapTable(victimLpn, 0), CHUNKS_PER_PAGE, logChunkAddr);

            if(i<CHUNKS_PER_PAGE)
            {
                dataChunkOffsets[chunkOffset]=i;
                dataLpns[chunkOffset]=victimLpn;
                validChunks[chunkOffset]=TRUE;
                nValidChunksInPage++;
                uart_print(" valid\r\n");
            }
            else
            {
                uart_print(" somewhere else\r\n");
            }
        }
        else
        {
            uart_print(" invalid\r\n");
        }
        logChunkAddr++;
    }

    if (nValidChunksInPage == 0)
    {

        // note(fabio): this will be done in precacheLowPage
        //nand_page_ptread(bank, vbn, pageOffset, 0, SECTORS_PER_PAGE, PrecacheForEncoding(bank), RETURN_ON_ISSUE);

        // note(fabio): cannot use mem_set_dram here because we want to clear only 32B while the minimum allowed is 128
        //mem_set_dram(ctrlBlock[bank].lpnsListAddr + (pageOffset * CHUNKS_PER_PAGE * sizeof(UINT32)), INVALID, (CHUNKS_PER_PAGE * sizeof(UINT32)));
        UINT32 addrToClear = (ctrlBlock[bank].lpnsListAddr + (pageOffset * CHUNKS_PER_PAGE * sizeof(UINT32)));
        for (UINT32 i=0; i<CHUNKS_PER_PAGE; ++i)
        {
            write_dram_32(addrToClear + (i * sizeof(UINT32)), INVALID);
        }
        //mem_set_dram(ctrlBlock[bank].lpnsListAddr + (pageOffset * CHUNKS_PER_PAGE * sizeof(UINT32)), INVALID, (CHUNKS_PER_PAGE * sizeof(UINT32)));
        incrementValidChunksByN(&heapDataSecondUsage, bank, lbn, CHUNKS_PER_PAGE);
        return TRUE;
    }

    if (nValidChunksInPage < nValidChunksInPageToReuseThreshold)
    {
#if PrintStats
        uart_print_level_1("k\r\n");
#endif

        // note(fabio): this will be done in precacheLowPage
        //nand_page_ptread(bank, vbn, pageOffset, 0, SECTORS_PER_PAGE, PrecacheForEncoding(bank), RETURN_WHEN_DONE);

        for(UINT32 chunkOffset=0; chunkOffset<CHUNKS_PER_PAGE; chunkOffset++)
        {
            if(validChunks[chunkOffset])
            {
                writeChunkOnLogBlockDuringGC(bank,
                                             dataLpns[chunkOffset],
                                             dataChunkOffsets[chunkOffset],
                                             chunkOffset,
                                             PrecacheForEncoding(bank));
            }
        }
        //mem_set_dram(ctrlBlock[bank].lpnsListAddr + (pageOffset * CHUNKS_PER_PAGE * sizeof(UINT32)), INVALID, (CHUNKS_PER_PAGE * sizeof(UINT32)));
        UINT32 addrToClear = (ctrlBlock[bank].lpnsListAddr + (pageOffset * CHUNKS_PER_PAGE * sizeof(UINT32)));
        for (UINT32 i=0; i<CHUNKS_PER_PAGE; ++i)
        {
            write_dram_32(addrToClear + (i * sizeof(UINT32)), INVALID);
        }
        incrementValidChunksByN(&heapDataSecondUsage, bank, lbn, CHUNKS_PER_PAGE - nValidChunksInPage);
        return TRUE;
    }
    return FALSE;
}
void writePage(UINT32 bank)
{
    uart_print("writePage: bank="); uart_print_int(bank);
    uart_print(" victimLbn "); uart_print_int(victimLbn[bank]);
    uart_print(" pageOffset "); uart_print_int(pageOffset[bank]); uart_print(" ");

    if(nValidChunksInPage[bank] == 8)
    {

        UINT32 logChunkBase = ((bank*LOG_BLK_PER_BANK*CHUNKS_PER_BLK) + (victimLbn[bank]*CHUNKS_PER_BLK) + (pageOffset[bank]*CHUNKS_PER_PAGE));
        if (gcOnRecycledPage[bank])
        {
            logChunkBase = logChunkBase | ColdLogBufBitFlag;
        }

        for(UINT32 chunkOffset=0; chunkOffset<CHUNKS_PER_PAGE; chunkOffset++)
        {
            UINT32 chunkAddr = read_dram_32(ChunksMapTable(dataLpns[bank][chunkOffset], dataChunkOffsets[bank][chunkOffset]));

            // note (fabio): here we check against the normal chunkAddr (not recycled) because if there are 8 valid chunks the blk cannot be a recycled one
            if(chunkAddr != logChunkBase + chunkOffset)
            {
                // note(fabio): here invalidate only the first chunk that was moved by another write. If other chunks were also moved they'll be found by the code after the goto
                validChunks[bank][chunkOffset]=FALSE;
                nValidChunksInPage[bank]--;
                nValidChunksInBlk[bank]--;
                goto WritePartialPage;
            }

        }

        UINT32 dstLpn = getRWLpn(bank, coldLogCtrl);
        UINT32 dstVbn = get_log_vbn(bank, LogPageToLogBlk(dstLpn));
        UINT32 dstPageOffset = LogPageToOffset(dstLpn);

        uart_print(" dstLpn="); uart_print_int(dstLpn);
        uart_print(" dstVbn="); uart_print_int(dstVbn); uart_print(" dstPageOffset="); uart_print_int(dstPageOffset); uart_print("\r\n");

#if PrintStats
        uart_print_level_1("^\r\n");
#endif

        nand_page_program(bank, dstVbn, dstPageOffset, GC_BUF(bank), RETURN_ON_ISSUE);

        mem_copy(chunkInLpnsList(coldLogCtrl[bank].lpnsListAddr, dstPageOffset, 0), dataLpns[bank], CHUNKS_PER_PAGE * sizeof(UINT32));

        for (UINT32 chunkOffset=0; chunkOffset<CHUNKS_PER_PAGE; ++chunkOffset)
        {
            write_dram_32(ChunksMapTable(dataLpns[bank][chunkOffset], dataChunkOffsets[bank][chunkOffset]), (bank * LOG_BLK_PER_BANK * CHUNKS_PER_BLK) + (dstLpn * CHUNKS_PER_PAGE) + chunkOffset);
        }

        nValidChunksInPage[bank] = 0;
        gcOnRecycledPage[bank]=FALSE;

        gcState[bank] = GcRead;

        pageOffset[bank]++;

        coldLogCtrl[bank].increaseLpn(bank, coldLogCtrl);

    }
    else
WritePartialPage:
    {
        uart_print("write partial ");
        UINT32 chunkOffset=0;
        UINT32 logChunkBase=((bank*LOG_BLK_PER_BANK*CHUNKS_PER_BLK) + (victimLbn[bank]*CHUNKS_PER_BLK) + (pageOffset[bank]*CHUNKS_PER_PAGE));
        if (gcOnRecycledPage[bank])
        {
            logChunkBase = logChunkBase | ColdLogBufBitFlag;
            // note(fabio): Here we should decode
        }
        while(nValidChunksInPage[bank] > 0)
        {

            if(validChunks[bank][chunkOffset])
            {
                validChunks[bank][chunkOffset] = FALSE;
                nValidChunksInPage[bank]--;

                UINT32 chunkAddr = read_dram_32(ChunksMapTable(dataLpns[bank][chunkOffset], dataChunkOffsets[bank][chunkOffset]));

                if(chunkAddr == logChunkBase+chunkOffset)
                {

                    writeChunkOnLogBlockDuringGC(bank,
                                                 dataLpns[bank][chunkOffset],
                                                 dataChunkOffsets[bank][chunkOffset],
                                                 chunkOffset,
                                                 GC_BUF(bank));
                }
                else
                {
                    uart_print(" one chunk was moved during GC ");
                    nValidChunksInBlk[bank]--;
                }
            }
            chunkOffset++;
        }
        uart_print(" current nValidChunksInBlk="); uart_print_int(nValidChunksInBlk[bank]); uart_print("\r\n");

        if (gcState[bank] == GcWrite)
        {
            gcState[bank] = GcRead;
            gcOnRecycledPage[bank]=FALSE;
            pageOffset[bank]++;
        }
    }
}
Esempio n. 15
0
static void overwritePageOldInOrderInOwLog()
{
    #if MeasureDetailedOverwrite
    start_interval_measurement(TIMER_CH3, TIMER_PRESCALE_0);
    #endif
    uart_print("overwritePageOldInOrderInOwLog\r\n");
    UINT32 firstChunk = sectOffset_ / SECTORS_PER_CHUNK;
    UINT32 chunk = read_dram_32(ChunksMapTable(lpn_, firstChunk));
    chunk = chunk & 0x7FFFFFFF;
    UINT32 bank = ChunkToBank(chunk);
    UINT32 lbn = ChunkToLbn(chunk);
    UINT32 vBlk = get_log_vbn(bank, lbn);
    UINT32 pageOffset = ChunkToPageOffset(chunk);
    if(readOwCounter(bank, lbn, pageOffset) < OwLimit)
    {
#if DetailedOwStats == 1
        uart_print_level_1("*\r\n");
#endif
        uart_print("Can overwrite in place\r\n");
        nand_page_ptprogram_from_host(bank, vBlk, pageOffset, sectOffset_, nSects_);
        increaseOwCounter(bank, lbn, pageOffset);
        #if MeasureOwEfficiency
        write_dram_32(OwEffBuf(bank_, ChunkToLbn(chunk)), read_dram_32(OwEffBuf(bank_, ChunkToLbn(chunk))) + nSects_);
        #endif
    }
    else
    {
        uart_print("Exceeding limit, must find a new page\r\n");
        if (remainingSects_ == SECTORS_PER_PAGE)
        {
            overwriteCompletePageInOwLog();
        }
        else
        {
            syncWithWriteLimit();
            UINT16 invalidChunksToDecrement = 0;
            chooseNewBank_();
            while(remainingSects_)
            {
                invalidChunksToDecrement++;
                UINT32 nSectsToWrite = (((sectOffset_ % SECTORS_PER_CHUNK) + remainingSects_) < SECTORS_PER_CHUNK) ? remainingSects_ :
                                                                                                                     (SECTORS_PER_CHUNK - (sectOffset_ % SECTORS_PER_CHUNK));
                if(nSectsToWrite == SECTORS_PER_CHUNK)
                {
                    uart_print("Copy chunk "); uart_print_int( (sectOffset_ % SECTORS_PER_CHUNK) / SECTORS_PER_CHUNK); uart_print(" to OW_LOG_BUF\r\n");
                    overwriteCompleteChunkNew();
                    updateOwDramBufMetadata();
                    updateOwChunkPtr();
                }
                else
                {
                    UINT32 chunkIdx = sectOffset_ / SECTORS_PER_CHUNK;
                    chunk = read_dram_32(ChunksMapTable(lpn_, chunkIdx));
                    chunk = chunk & 0x7FFFFFFF;
                    overwritePartialChunkWhenOldChunkIsInExhaustedOWLog(nSectsToWrite, chunk);
                    updateOwDramBufMetadata();
                    updateOwChunkPtr();
                }
                sectOffset_ += nSectsToWrite;
                remainingSects_ -= nSectsToWrite;
            }
            decrementValidChunksByN(&heapDataOverwrite, bank, lbn, invalidChunksToDecrement);
            g_ftl_write_buf_id = (g_ftl_write_buf_id + 1) % NUM_WR_BUFFERS;
            SETREG (BM_STACK_WRSET, g_ftl_write_buf_id);
            SETREG (BM_STACK_RESET, 0x01);
        }
    }
    #if MeasureDetailedOverwrite
    UINT32 timerValue=GET_TIMER_VALUE(TIMER_CH3);
    UINT32 nTicks = 0xFFFFFFFF - timerValue;
    uart_print_level_2("OPIO "); uart_print_level_2_int(nTicks); uart_print_level_2("\r\n");
    #endif
}
Esempio n. 16
0
static void evict_mapping(void)
{
	if(cmt[cmt_hand].lpn == INVALID)
		return;
	while(1)
	{
		if(cmt[cmt_hand].sc == TRUE)
		{
			cmt[cmt_hand].sc = FALSE;
			cmt_hand = (cmt_hand + 1) % CMT_SIZE;
		}
		else
			break;
	}

	UINT32 gtd_index;
	UINT32 victim_lpn, victim_vpn;
	UINT32 mapping_vpn;
	UINT32 mapping_bank;
	victim_vpn = cmt[cmt_hand].vpn;

	/*
	 * VICTIM : cmt_hand
	 * dirty : 같은 translation page 에 속하는 dirty를
	 * 같이 업데이트 해 준다
	 * clean : 그냥 버린다
	 */
	if(IS_CLEAN(victim_vpn))
	{
		return;
	}

	//Dirty
	victim_lpn = cmt[cmt_hand].lpn;

	gtd_index = victim_lpn / (MAPPINGS_PER_PAGE*NUM_BANKS);
	mapping_bank = get_num_bank(victim_lpn);
	mapping_vpn = gtd[mapping_bank][gtd_index];

	if(mapping_vpn != INVALID)
	{
		map_read++;

		nand_page_read(mapping_bank,
				mapping_vpn / PAGES_PER_BLK,
				mapping_vpn % PAGES_PER_BLK,
				TRANS_BUF(mapping_bank));
	}
	else
	{
		mem_set_dram(TRANS_BUF(mapping_bank), 0, BYTES_PER_PAGE);
	}

	int index;
	for(index = 0; index < CMT_SIZE; index++)
	{
		if(get_num_bank(cmt[index].lpn) == mapping_bank)
		{
			if((!IS_CLEAN(cmt[index].vpn)) && \
					((cmt[index].lpn / (MAPPINGS_PER_PAGE*NUM_BANKS)) == gtd_index))
			{
				cmt[index].vpn = SET_CLEAN(cmt[index].vpn);
				write_dram_32(TRANS_BUF(mapping_bank) + \
						sizeof(UINT32 ) * ((cmt[index].lpn/NUM_BANKS) % MAPPINGS_PER_PAGE),
						cmt[index].vpn);
			}
		}
	}

	mapping_vpn = assign_new_map_write_vpn(mapping_bank);

	gtd[mapping_bank][gtd_index] = mapping_vpn;

	map_prog++;
	nand_page_program(mapping_bank,
			mapping_vpn / PAGES_PER_BLK,
			mapping_vpn % PAGES_PER_BLK,
			TRANS_BUF(mapping_bank));
}