Beispiel #1
0
static void flushLogBufferDuringGC(const UINT32 bank)
{
    uart_print("flushLogBufferDuringGC bank="); uart_print_int(bank); uart_print("\r\n");
    UINT32 newLogLpn = getRWLpn(bank); // TODO: completely ignoring SW Log. Should use prepare_to_new_write if we want to use it
    uart_print("FlushLog to lpn="); uart_print_int(newLogLpn); uart_print("\r\n");
    UINT32 vBlk = get_log_vbn(bank, LogPageToLogBlk(newLogLpn));
    UINT32 pageOffset = LogPageToOffset(newLogLpn);
    nand_page_program(bank, vBlk, pageOffset, LOG_BUF(bank), RETURN_ON_ISSUE);
    for(int i=0; i<CHUNKS_PER_PAGE; i++)
    {
        UINT32 lChunkAddr = (newLogLpn * CHUNKS_PER_PAGE) + i;
        if( (chunkInLpnsList(RWCtrl[bank].lpnsListPtr, LogPageToOffset(newLogLpn), i)) >= (DRAM_BASE + DRAM_SIZE))
        {
            uart_print_level_1("ERROR in write::flushLogBufferDuringGC 1: writing to "); uart_print_level_1_int(chunkInLpnsList(RWCtrl[bank].lpnsListPtr, LogPageToOffset(newLogLpn), i)); uart_print_level_1("\r\n");
        }
        write_dram_32(chunkInLpnsList(RWCtrl[bank].lpnsListPtr, LogPageToOffset(newLogLpn), i), logBufMeta[bank].dataLpn[i]);
        if (logBufMeta[bank].dataLpn[i] != INVALID)
        {
            write_dram_32(ChunksMapTable(logBufMeta[bank].dataLpn[i], logBufMeta[bank].chunkIdx[i]), (bank * LOG_BLK_PER_BANK * CHUNKS_PER_BLK) + lChunkAddr);
        }
        else
        {
            decrementValidChunks(&heapDataWrite, bank, LogPageToLogBlk(newLogLpn)); // decrement blk with previous copy
        }
    }
    increaseRWLpn(bank);
}
Beispiel #2
0
/* NOTE: This function calls rebuildPageToFtlBuf with GcMode, therefore the valid chunks counters of old blocks are already managed.
 * Do not call manageOldChunks before calling this!
 */
static void appendPageToSWBlock (const UINT32 dataLpn, const UINT32 sectOffset, const UINT32 nSects)
{
    uart_print("appendPageToSWBlock dataLpn="); uart_print_int(dataLpn);
    uart_print(", sectOffset="); uart_print_int(sectOffset);
    uart_print(", nSects="); uart_print_int(nSects); uart_print("\r\n");
    UINT32 nSectsToWrite = SECTORS_PER_PAGE - sectOffset;
    UINT32 logLpn = getSWLpn(bank_);
    UINT32 vBlk = get_log_vbn(bank_, LogPageToLogBlk(logLpn));
    UINT32 dst = FTL_BUF(0) + (sectOffset*BYTES_PER_SECTOR);
    UINT32 src = WR_BUF_PTR(g_ftl_write_buf_id)+(sectOffset*BYTES_PER_SECTOR);
    rebuildPageToFtlBuf(dataLpn, 0, SECTORS_PER_PAGE, GcMode); // Rebuild rest of the page in FTL buffer (rebuild entire page to be sure that all chunks are correctly garbage collected, especially if they are in DRAM)
    //waitBusyBank(bank_);
    flash_finish();
    mem_copy(dst, src, nSectsToWrite * BYTES_PER_SECTOR);                                       // Fill FTL buffer with new data
    //TODO: this program shouldn't be sincronous, need a global variable storing last bank writing data from FTL_BUF(0)
    nand_page_program(bank_, vBlk, LogPageToOffset(logLpn), FTL_BUF(0), RETURN_WHEN_DONE);      // Write FTL buffer to the next sequential page
    UINT32 chunkIdx;
    for(chunkIdx=0; chunkIdx<sectOffset / SECTORS_PER_CHUNK; ++chunkIdx)
    { // For sector before the start of new data we update only if previously there was some valid data, which is now in the new page, otherwise we insert invalid in the lpns list to speed up GC later
        if (ChunksMapTable(dataLpn, chunkIdx) > DRAM_BASE + DRAM_SIZE)
        {
            uart_print_level_1("ERROR in appendPageToSWBlk 1: reading above DRAM address space\r\n");
        }
        if (read_dram_32(ChunksMapTable(dataLpn, chunkIdx)) != INVALID)
        {
            UINT32 lChunkAddr = (logLpn * CHUNKS_PER_PAGE) + chunkIdx;
            if((chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)) >=(DRAM_BASE + DRAM_SIZE))
            {
                uart_print_level_1("ERROR in write::appendPageToSWBlk 1: writing to "); uart_print_level_1_int(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)); uart_print_level_1("\r\n");
            }
            write_dram_32(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx), dataLpn);
            write_dram_32(ChunksMapTable(dataLpn, chunkIdx), (bank_ * LOG_BLK_PER_BANK * CHUNKS_PER_BLK) + lChunkAddr);
        }
        else
        { //Decrement valid chunks in the blk we're going to write in because we inserted null data
            if((chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)) >=(DRAM_BASE + DRAM_SIZE))
            {
                uart_print_level_1("ERROR in write::appendPageToSWBlk 2: writing to "); uart_print_level_1_int(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)); uart_print_level_1("\r\n");
            }
            write_dram_32(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx), INVALID);
            decrementValidChunks(&heapDataWrite, bank_, LogPageToLogBlk(logLpn));
        }
    }
    for( ; chunkIdx < CHUNKS_PER_PAGE; ++chunkIdx)
    { // The new sectors are instead all valid, therefore we don't bother checking if they were valid before
            UINT32 lChunkAddr = (logLpn * CHUNKS_PER_PAGE) + chunkIdx;
            if((chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)) >=(DRAM_BASE + DRAM_SIZE))
            {
                uart_print_level_1("ERROR in write::appendPageToSWBlk 3: writing to "); uart_print_level_1_int(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)); uart_print_level_1("\r\n");
            }
            write_dram_32(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx), dataLpn);
            write_dram_32(ChunksMapTable(dataLpn, chunkIdx), (bank_ * LOG_BLK_PER_BANK * CHUNKS_PER_BLK) + lChunkAddr);
    }
    SWCtrl[bank_].nextDataLpn=dataLpn+1;
    increaseSWLpn(bank_);
    g_ftl_write_buf_id = (g_ftl_write_buf_id + 1) % NUM_WR_BUFFERS;
    SETREG (BM_STACK_WRSET, g_ftl_write_buf_id);
    SETREG (BM_STACK_RESET, 0x01);
}
Beispiel #3
0
static void flushOwLogBuffer()
{
    uart_print("bank "); uart_print_int(bank_);
    uart_print(" flushOwLogBuffer\r\n");
    UINT32 newLogLpn = getOWLpn(bank_);
    if (newLogLpn == INVALID)
    {
        uart_print_level_1("ERROR in flushOwLogBuffer: got INVALID lpn\r\n");
        while(1);
    }
    uart_print("new log lpn="); uart_print_int(newLogLpn); uart_print("\r\n");
    UINT32 lbn = LogPageToLogBlk(newLogLpn);
    UINT32 vBlk = get_log_vbn(bank_, lbn);
    UINT32 pageOffset = LogPageToOffset(newLogLpn);
    nand_page_program(bank_, vBlk, pageOffset, OW_LOG_BUF(bank_), RETURN_ON_ISSUE);
    increaseOwCounter(bank_, lbn, pageOffset);
#if MeasureOwEfficiency
    write_dram_32(OwEffBuf(bank_, LogPageToLogBlk(newLogLpn)), read_dram_32(OwEffBuf(bank_, LogPageToLogBlk(newLogLpn))) + SECTORS_PER_PAGE);
#endif
    for(UINT32 i=0; i<CHUNKS_PER_PAGE; i++)
    {
        uart_print("Chunk "); uart_print_int(i); uart_print(" ");
        UINT32 lChunkAddr = ( (bank_ * LOG_BLK_PER_BANK * CHUNKS_PER_BLK) + (newLogLpn * CHUNKS_PER_PAGE) + i );
        uart_print("lChunkAddr "); uart_print_int(lChunkAddr); uart_print(" ");
        lChunkAddr = lChunkAddr | StartOwLogLpn;
        uart_print("full "); uart_print_int(lChunkAddr); uart_print("\r\n");
        write_dram_32(chunkInLpnsList(OWCtrl[bank_].lpnsListPtr, LogPageToOffset(newLogLpn), i), owLogBufMeta[bank_].dataLpn[i]);
        if (owLogBufMeta[bank_].dataLpn[i] != INVALID)
        {
            write_dram_32(ChunksMapTable(owLogBufMeta[bank_].dataLpn[i], owLogBufMeta[bank_].chunkIdx[i]), lChunkAddr);
        }
        else
        {
            decrementValidChunks(&heapDataOverwrite, bank_, LogPageToLogBlk(newLogLpn));
        }
    }
    increaseOWLpn(bank_);
}
void writePage(UINT32 bank)
{
    uart_print("writePage: bank="); uart_print_int(bank);
    uart_print(" victimLbn "); uart_print_int(victimLbn[bank]);
    uart_print(" pageOffset "); uart_print_int(pageOffset[bank]); uart_print(" ");

    if(nValidChunksInPage[bank] == 8)
    {

        UINT32 logChunkBase = ((bank*LOG_BLK_PER_BANK*CHUNKS_PER_BLK) + (victimLbn[bank]*CHUNKS_PER_BLK) + (pageOffset[bank]*CHUNKS_PER_PAGE));
        if (gcOnRecycledPage[bank])
        {
            logChunkBase = logChunkBase | ColdLogBufBitFlag;
        }

        for(UINT32 chunkOffset=0; chunkOffset<CHUNKS_PER_PAGE; chunkOffset++)
        {
            UINT32 chunkAddr = read_dram_32(ChunksMapTable(dataLpns[bank][chunkOffset], dataChunkOffsets[bank][chunkOffset]));

            // note (fabio): here we check against the normal chunkAddr (not recycled) because if there are 8 valid chunks the blk cannot be a recycled one
            if(chunkAddr != logChunkBase + chunkOffset)
            {
                // note(fabio): here invalidate only the first chunk that was moved by another write. If other chunks were also moved they'll be found by the code after the goto
                validChunks[bank][chunkOffset]=FALSE;
                nValidChunksInPage[bank]--;
                nValidChunksInBlk[bank]--;
                goto WritePartialPage;
            }

        }

        UINT32 dstLpn = getRWLpn(bank, coldLogCtrl);
        UINT32 dstVbn = get_log_vbn(bank, LogPageToLogBlk(dstLpn));
        UINT32 dstPageOffset = LogPageToOffset(dstLpn);

        uart_print(" dstLpn="); uart_print_int(dstLpn);
        uart_print(" dstVbn="); uart_print_int(dstVbn); uart_print(" dstPageOffset="); uart_print_int(dstPageOffset); uart_print("\r\n");

#if PrintStats
        uart_print_level_1("^\r\n");
#endif

        nand_page_program(bank, dstVbn, dstPageOffset, GC_BUF(bank), RETURN_ON_ISSUE);

        mem_copy(chunkInLpnsList(coldLogCtrl[bank].lpnsListAddr, dstPageOffset, 0), dataLpns[bank], CHUNKS_PER_PAGE * sizeof(UINT32));

        for (UINT32 chunkOffset=0; chunkOffset<CHUNKS_PER_PAGE; ++chunkOffset)
        {
            write_dram_32(ChunksMapTable(dataLpns[bank][chunkOffset], dataChunkOffsets[bank][chunkOffset]), (bank * LOG_BLK_PER_BANK * CHUNKS_PER_BLK) + (dstLpn * CHUNKS_PER_PAGE) + chunkOffset);
        }

        nValidChunksInPage[bank] = 0;
        gcOnRecycledPage[bank]=FALSE;

        gcState[bank] = GcRead;

        pageOffset[bank]++;

        coldLogCtrl[bank].increaseLpn(bank, coldLogCtrl);

    }
    else
WritePartialPage:
    {
        uart_print("write partial ");
        UINT32 chunkOffset=0;
        UINT32 logChunkBase=((bank*LOG_BLK_PER_BANK*CHUNKS_PER_BLK) + (victimLbn[bank]*CHUNKS_PER_BLK) + (pageOffset[bank]*CHUNKS_PER_PAGE));
        if (gcOnRecycledPage[bank])
        {
            logChunkBase = logChunkBase | ColdLogBufBitFlag;
            // note(fabio): Here we should decode
        }
        while(nValidChunksInPage[bank] > 0)
        {

            if(validChunks[bank][chunkOffset])
            {
                validChunks[bank][chunkOffset] = FALSE;
                nValidChunksInPage[bank]--;

                UINT32 chunkAddr = read_dram_32(ChunksMapTable(dataLpns[bank][chunkOffset], dataChunkOffsets[bank][chunkOffset]));

                if(chunkAddr == logChunkBase+chunkOffset)
                {

                    writeChunkOnLogBlockDuringGC(bank,
                                                 dataLpns[bank][chunkOffset],
                                                 dataChunkOffsets[bank][chunkOffset],
                                                 chunkOffset,
                                                 GC_BUF(bank));
                }
                else
                {
                    uart_print(" one chunk was moved during GC ");
                    nValidChunksInBlk[bank]--;
                }
            }
            chunkOffset++;
        }
        uart_print(" current nValidChunksInBlk="); uart_print_int(nValidChunksInBlk[bank]); uart_print("\r\n");

        if (gcState[bank] == GcWrite)
        {
            gcState[bank] = GcRead;
            gcOnRecycledPage[bank]=FALSE;
            pageOffset[bank]++;
        }
    }
}
Beispiel #5
0
static void evict_mapping(void)
{
	if(cmt[cmt_hand].lpn == INVALID)
		return;
	while(1)
	{
		if(cmt[cmt_hand].sc == TRUE)
		{
			cmt[cmt_hand].sc = FALSE;
			cmt_hand = (cmt_hand + 1) % CMT_SIZE;
		}
		else
			break;
	}

	UINT32 gtd_index;
	UINT32 victim_lpn, victim_vpn;
	UINT32 mapping_vpn;
	UINT32 mapping_bank;
	victim_vpn = cmt[cmt_hand].vpn;

	/*
	 * VICTIM : cmt_hand
	 * dirty : 같은 translation page 에 속하는 dirty를
	 * 같이 업데이트 해 준다
	 * clean : 그냥 버린다
	 */
	if(IS_CLEAN(victim_vpn))
	{
		return;
	}

	//Dirty
	victim_lpn = cmt[cmt_hand].lpn;

	gtd_index = victim_lpn / (MAPPINGS_PER_PAGE*NUM_BANKS);
	mapping_bank = get_num_bank(victim_lpn);
	mapping_vpn = gtd[mapping_bank][gtd_index];

	if(mapping_vpn != INVALID)
	{
		map_read++;

		nand_page_read(mapping_bank,
				mapping_vpn / PAGES_PER_BLK,
				mapping_vpn % PAGES_PER_BLK,
				TRANS_BUF(mapping_bank));
	}
	else
	{
		mem_set_dram(TRANS_BUF(mapping_bank), 0, BYTES_PER_PAGE);
	}

	int index;
	for(index = 0; index < CMT_SIZE; index++)
	{
		if(get_num_bank(cmt[index].lpn) == mapping_bank)
		{
			if((!IS_CLEAN(cmt[index].vpn)) && \
					((cmt[index].lpn / (MAPPINGS_PER_PAGE*NUM_BANKS)) == gtd_index))
			{
				cmt[index].vpn = SET_CLEAN(cmt[index].vpn);
				write_dram_32(TRANS_BUF(mapping_bank) + \
						sizeof(UINT32 ) * ((cmt[index].lpn/NUM_BANKS) % MAPPINGS_PER_PAGE),
						cmt[index].vpn);
			}
		}
	}

	mapping_vpn = assign_new_map_write_vpn(mapping_bank);

	gtd[mapping_bank][gtd_index] = mapping_vpn;

	map_prog++;
	nand_page_program(mapping_bank,
			mapping_vpn / PAGES_PER_BLK,
			mapping_vpn % PAGES_PER_BLK,
			TRANS_BUF(mapping_bank));
}