示例#1
0
void ftl_trim(UINT32 const lba, UINT32 const num_sectors)
{
	ASSERT(num_sectors > 0);

	uart_printf("Num sectors: %u", num_sectors);
	uart_printf("SATA_WBUF_PTR: %u", GETREG(SATA_WBUF_PTR));
	uart_printf("g_ftl_write_buf_id: %u", g_ftl_write_buf_id);

	UINT32 next_write_buf_id = (g_ftl_write_buf_id + num_sectors) % NUM_WR_BUFFERS;

	for (UINT32 i=0;i<num_sectors;i++)
	{
		for (UINT32 j=0;j<512/8;j=j+2)
		{
			UINT32 address = read_dram_32(WR_BUF_PTR(g_ftl_write_buf_id)+j*sizeof(UINT32));
			UINT32 reg2 = read_dram_32(WR_BUF_PTR(g_ftl_write_buf_id)+(j+1)*sizeof(UINT32));
			UINT32 count = reg2 & 0xFFFF0000; // Count stored in the first four words.

			// If count is zero. We continue, but also, if address is 48bit.
			// We shouldn't get these unless it is an error.
			if (count == 0 || (reg2 & 0x0000FFFF) > 0) //
				continue;

//			uart_print_hex(address);
//			uart_print_hex(count);
		}

		g_ftl_write_buf_id = (g_ftl_write_buf_id + 1) % NUM_WR_BUFFERS;
	}
	SETREG(BM_STACK_WRSET, next_write_buf_id);	// change bm_read_limit
	SETREG(BM_STACK_RESET, 0x02);				// change bm_read_limi
}
示例#2
0
void nand_page_ptprogram_from_host(UINT32 const bank, UINT32 const vblock, UINT32 const page_num, UINT32 const sect_offset, UINT32 const num_sectors)
{
    UINT32 row;

    ASSERT(bank < NUM_BANKS);
    ASSERT(vblock < VBLKS_PER_BANK);
    ASSERT(page_num < PAGES_PER_BLK);

    row = (vblock * PAGES_PER_BLK) + page_num;

    SETREG(FCP_CMD, FC_COL_ROW_IN_PROG);
#if OPTION_FTL_TEST == TRUE
    SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY);
#else
    SETREG(FCP_OPTION, FO_P | FO_E | FO_B_SATA_W);
#endif
    SETREG(FCP_DMA_ADDR, WR_BUF_PTR(g_ftl_write_buf_id));
    SETREG(FCP_DMA_CNT, num_sectors * BYTES_PER_SECTOR);

    SETREG(FCP_COL, sect_offset);
    SETREG(FCP_ROW_L(bank), row);
    SETREG(FCP_ROW_H(bank), row);

    flash_issue_cmd(bank, RETURN_ON_ISSUE);

    g_ftl_write_buf_id = (g_ftl_write_buf_id + 1) % NUM_WR_BUFFERS;
}
示例#3
0
/* NOTE: This function calls rebuildPageToFtlBuf with GcMode, therefore the valid chunks counters of old blocks are already managed.
 * Do not call manageOldChunks before calling this!
 */
static void appendPageToSWBlock (const UINT32 dataLpn, const UINT32 sectOffset, const UINT32 nSects)
{
    uart_print("appendPageToSWBlock dataLpn="); uart_print_int(dataLpn);
    uart_print(", sectOffset="); uart_print_int(sectOffset);
    uart_print(", nSects="); uart_print_int(nSects); uart_print("\r\n");
    UINT32 nSectsToWrite = SECTORS_PER_PAGE - sectOffset;
    UINT32 logLpn = getSWLpn(bank_);
    UINT32 vBlk = get_log_vbn(bank_, LogPageToLogBlk(logLpn));
    UINT32 dst = FTL_BUF(0) + (sectOffset*BYTES_PER_SECTOR);
    UINT32 src = WR_BUF_PTR(g_ftl_write_buf_id)+(sectOffset*BYTES_PER_SECTOR);
    rebuildPageToFtlBuf(dataLpn, 0, SECTORS_PER_PAGE, GcMode); // Rebuild rest of the page in FTL buffer (rebuild entire page to be sure that all chunks are correctly garbage collected, especially if they are in DRAM)
    //waitBusyBank(bank_);
    flash_finish();
    mem_copy(dst, src, nSectsToWrite * BYTES_PER_SECTOR);                                       // Fill FTL buffer with new data
    //TODO: this program shouldn't be sincronous, need a global variable storing last bank writing data from FTL_BUF(0)
    nand_page_program(bank_, vBlk, LogPageToOffset(logLpn), FTL_BUF(0), RETURN_WHEN_DONE);      // Write FTL buffer to the next sequential page
    UINT32 chunkIdx;
    for(chunkIdx=0; chunkIdx<sectOffset / SECTORS_PER_CHUNK; ++chunkIdx)
    { // For sector before the start of new data we update only if previously there was some valid data, which is now in the new page, otherwise we insert invalid in the lpns list to speed up GC later
        if (ChunksMapTable(dataLpn, chunkIdx) > DRAM_BASE + DRAM_SIZE)
        {
            uart_print_level_1("ERROR in appendPageToSWBlk 1: reading above DRAM address space\r\n");
        }
        if (read_dram_32(ChunksMapTable(dataLpn, chunkIdx)) != INVALID)
        {
            UINT32 lChunkAddr = (logLpn * CHUNKS_PER_PAGE) + chunkIdx;
            if((chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)) >=(DRAM_BASE + DRAM_SIZE))
            {
                uart_print_level_1("ERROR in write::appendPageToSWBlk 1: writing to "); uart_print_level_1_int(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)); uart_print_level_1("\r\n");
            }
            write_dram_32(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx), dataLpn);
            write_dram_32(ChunksMapTable(dataLpn, chunkIdx), (bank_ * LOG_BLK_PER_BANK * CHUNKS_PER_BLK) + lChunkAddr);
        }
        else
        { //Decrement valid chunks in the blk we're going to write in because we inserted null data
            if((chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)) >=(DRAM_BASE + DRAM_SIZE))
            {
                uart_print_level_1("ERROR in write::appendPageToSWBlk 2: writing to "); uart_print_level_1_int(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)); uart_print_level_1("\r\n");
            }
            write_dram_32(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx), INVALID);
            decrementValidChunks(&heapDataWrite, bank_, LogPageToLogBlk(logLpn));
        }
    }
    for( ; chunkIdx < CHUNKS_PER_PAGE; ++chunkIdx)
    { // The new sectors are instead all valid, therefore we don't bother checking if they were valid before
            UINT32 lChunkAddr = (logLpn * CHUNKS_PER_PAGE) + chunkIdx;
            if((chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)) >=(DRAM_BASE + DRAM_SIZE))
            {
                uart_print_level_1("ERROR in write::appendPageToSWBlk 3: writing to "); uart_print_level_1_int(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)); uart_print_level_1("\r\n");
            }
            write_dram_32(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx), dataLpn);
            write_dram_32(ChunksMapTable(dataLpn, chunkIdx), (bank_ * LOG_BLK_PER_BANK * CHUNKS_PER_BLK) + lChunkAddr);
    }
    SWCtrl[bank_].nextDataLpn=dataLpn+1;
    increaseSWLpn(bank_);
    g_ftl_write_buf_id = (g_ftl_write_buf_id + 1) % NUM_WR_BUFFERS;
    SETREG (BM_STACK_WRSET, g_ftl_write_buf_id);
    SETREG (BM_STACK_RESET, 0x01);
}
示例#4
0
static void overwriteCompleteChunkNew() {
    //uart_print_level_1("26 ");
    uart_print("overwriteCompleteChunkNew\r\n");
    chooseNewBank_();
    UINT32 src = WR_BUF_PTR(g_ftl_write_buf_id)+(sectOffset_*BYTES_PER_SECTOR);
    UINT32 dst = OW_LOG_BUF(bank_)+(owChunkPtr[bank_]*BYTES_PER_CHUNK); // base address of the destination chunk
    waitBusyBank(bank_);
    mem_copy(dst, src, BYTES_PER_CHUNK);
}
示例#5
0
static void overwritePartialChunkNew(UINT32 nSectsToWrite) {
    //uart_print_level_1("27 ");
    uart_print("overwritePartialChunkNew\r\n");
    chooseNewBank_();
    UINT32 src = WR_BUF_PTR(g_ftl_write_buf_id)+(sectOffset_*BYTES_PER_SECTOR);
    UINT32 chunkBufStartAddr = OW_LOG_BUF(bank_)+(owChunkPtr[bank_]*BYTES_PER_CHUNK); // base address of the destination chunk
    waitBusyBank(bank_);
    mem_set_dram (chunkBufStartAddr, 0xFFFFFFFF, BYTES_PER_CHUNK);
    UINT32 dst = chunkBufStartAddr + (sectOffset_ % SECTORS_PER_CHUNK) * BYTES_PER_SECTOR;
    mem_copy(dst, src, nSectsToWrite * BYTES_PER_SECTOR);
}
示例#6
0
static void writeChunkNew(UINT32 nSectsToWrite)
{
    uart_print("writeChunkNew\r\n");
    UINT32 src = WR_BUF_PTR(g_ftl_write_buf_id)+(sectOffset_*BYTES_PER_SECTOR);
    UINT32 dst = LOG_BUF(bank_)+(chunkPtr[bank_]*BYTES_PER_CHUNK); // base address of the destination chunk
    waitBusyBank(bank_);
    if (nSectsToWrite != SECTORS_PER_CHUNK)
    {
        mem_set_dram (dst, 0xFFFFFFFF, BYTES_PER_CHUNK); // Initialize chunk in dram log buffer with 0xFF
    }
    mem_copy(dst+((sectOffset_ % SECTORS_PER_CHUNK) * BYTES_PER_SECTOR), src, nSectsToWrite*BYTES_PER_SECTOR);
}
示例#7
0
static void writePartialChunkWhenOldIsInOWBuf(UINT32 nSectsToWrite, UINT32 oldChunkAddr)
{
    uart_print("writePartialChunkWhenOldIsInOWBuf\r\n");
    // Old Chunk Location
    UINT32 oldBank = ChunkToBank(oldChunkAddr);
    UINT32 oldSectOffset = ChunkToSectOffset(oldChunkAddr);
    // Buffers
    UINT32 dstWBufStart = LOG_BUF(bank_) + (chunkPtr[bank_] * BYTES_PER_CHUNK); // base address of the destination chunk
    UINT32 srcOWBufStart = OW_LOG_BUF(oldBank)+(oldSectOffset*BYTES_PER_SECTOR); // location of old chunk
    // Sizes
    waitBusyBank(bank_);
    mem_copy(dstWBufStart, srcOWBufStart, BYTES_PER_CHUNK);                                                         // First copy old data from OW Buf
    UINT32 startOffsetWrite = (sectOffset_ % SECTORS_PER_CHUNK) * BYTES_PER_SECTOR;
    UINT32 srcSataBufStart = WR_BUF_PTR(g_ftl_write_buf_id)+((sectOffset_ / SECTORS_PER_CHUNK)*BYTES_PER_CHUNK);
    mem_copy(dstWBufStart+startOffsetWrite, srcSataBufStart+startOffsetWrite, nSectsToWrite*BYTES_PER_SECTOR);      // Then copy new data from SATA Buf
}
示例#8
0
static void writePartialChunkWhenOldIsInWBuf(UINT32 nSectsToWrite, UINT32 oldChunkAddr) {
    uart_print("writePartialChunkWhenOldIsInWBuf\r\n");
    // Old Chunk Location
    UINT32 oldBank = ChunkToBank(oldChunkAddr);
    UINT32 oldSectOffset = ChunkToSectOffset(oldChunkAddr);
    // Buffers
    UINT32 dstWBufStart = LOG_BUF(oldBank)+(oldSectOffset*BYTES_PER_SECTOR); // location of old chunk
    UINT32 srcSataBufStart = WR_BUF_PTR(g_ftl_write_buf_id)+((sectOffset_ / SECTORS_PER_CHUNK)*BYTES_PER_CHUNK);
    // Sizes
    UINT32 startOffsetWrite = (sectOffset_ % SECTORS_PER_CHUNK) * BYTES_PER_SECTOR;
    //waitBusyBank(bank_);
    waitBusyBank(oldBank);
    mem_copy(dstWBufStart+startOffsetWrite, srcSataBufStart+startOffsetWrite, nSectsToWrite*BYTES_PER_SECTOR);
    #if MeasureDramAbsorb
    uart_print_level_1("WRDRAM "); uart_print_level_1_int(nSectsToWrite); uart_print_level_1("\r\n");
    #endif
}
示例#9
0
static void overwriteChunkOldInOwBuf(UINT32 chunkAddr) {
    //uart_print_level_1("21 ");
    uart_print("overwriteChunkOldInOwBuf\r\n");
    UINT32 nSectsToWrite = (((sectOffset_ % SECTORS_PER_CHUNK) + remainingSects_) < SECTORS_PER_CHUNK) ?     remainingSects_ :
                                                            (SECTORS_PER_CHUNK - (sectOffset_ % SECTORS_PER_CHUNK));
    UINT32 startOffset = (sectOffset_ % SECTORS_PER_CHUNK) * BYTES_PER_SECTOR;
    UINT32 oldBank = ChunkToBank(chunkAddr);
    UINT32 oldSectOffset = ChunkToSectOffset(chunkAddr);
    UINT32 startBufAddr = OW_LOG_BUF(oldBank)+(oldSectOffset*BYTES_PER_SECTOR)+startOffset; // location of old chunk, overwrite in place
    UINT32 startSataAddr = WR_BUF_PTR(g_ftl_write_buf_id) + (sectOffset_*BYTES_PER_SECTOR);
    mem_copy(startBufAddr, startSataAddr, nSectsToWrite*BYTES_PER_SECTOR);
    sectOffset_ += nSectsToWrite;
    remainingSects_ -= nSectsToWrite;
    #if MeasureDramAbsorb
    uart_print_level_1("WRDRAM "); uart_print_level_1_int(nSectsToWrite); uart_print_level_1("\r\n");
    #endif
}
示例#10
0
static void overwriteChunkOldInOwLog(UINT32 chunkAddr)
{
    //uart_print_level_1("22 ");
    uart_print("overwriteChunkOldInOwLog\r\n");
    UINT32 nSectsToWrite = (((sectOffset_ % SECTORS_PER_CHUNK) + remainingSects_) < SECTORS_PER_CHUNK) ?     remainingSects_ :
                                                            (SECTORS_PER_CHUNK - (sectOffset_ % SECTORS_PER_CHUNK));
    UINT32 bank = ChunkToBank(chunkAddr);
    UINT32 lbn = ChunkToLbn(chunkAddr);
    UINT32 vbn = get_log_vbn(bank, lbn);
    UINT32 pageOffset = ChunkToPageOffset(chunkAddr);
    if (readOwCounter(bank, lbn, pageOffset) < OwLimit)
    { // Can overwrite in place
        UINT32 sectOffset = ChunkToSectOffset(chunkAddr) + (sectOffset_ % SECTORS_PER_CHUNK);
        //UINT32 src = WR_BUF_PTR(g_ftl_write_buf_id) + (sectOffset_ * BYTES_PER_SECTOR) - (sectOffset * BYTES_PER_SECTOR); // startBuf + srcOffset - dstOffset
        if (lastBankUsingFtlBuf1 != INVALID)
        {
            waitBusyBank(lastBankUsingFtlBuf1);
        }
        mem_copy(FTL_BUF(1)+(sectOffset_*BYTES_PER_SECTOR), WR_BUF_PTR(g_ftl_write_buf_id) + (sectOffset_*BYTES_PER_SECTOR), nSectsToWrite*BYTES_PER_SECTOR);
        UINT32 src = FTL_BUF(1) + (sectOffset_ * BYTES_PER_SECTOR) - (sectOffset * BYTES_PER_SECTOR); // startBuf + srcOffset - dstOffset
        lastBankUsingFtlBuf1 = bank;
        nand_page_ptprogram(bank, vbn, pageOffset, sectOffset, nSectsToWrite, src, RETURN_ON_ISSUE);
        increaseOwCounter(bank, lbn, pageOffset);
    }
    else
    { // Need a new page
        if (nSectsToWrite == SECTORS_PER_CHUNK)
        { // Write chunk in ow log and decrease valid chunks in previous ow blk
            decrementValidChunks(&heapDataOverwrite, bank, lbn);
            overwriteCompleteChunkNew();
        }
        else
        { // Must read old chunk and update in ow log
            decrementValidChunks(&heapDataOverwrite, bank, lbn);
            overwritePartialChunkWhenOldChunkIsInExhaustedOWLog(nSectsToWrite, chunkAddr);
        }
        updateOwDramBufMetadata();
        updateOwChunkPtr();
    }
    #if MeasureOwEfficiency
    write_dram_32(OwEffBuf(bank_, ChunkToLbn(chunkAddr)), read_dram_32(OwEffBuf(bank_, ChunkToLbn(chunkAddr))) + nSectsToWrite);
    #endif
    sectOffset_ += nSectsToWrite;
    remainingSects_ -= nSectsToWrite;
}
示例#11
0
static void writePartialChunkWhenOldChunkIsInFlashLog(UINT32 nSectsToWrite, UINT32 oldChunkAddr) {
    uart_print("writePartialChunkWhenOldChunkIsInFlashLog\r\n");
    UINT32 src = WR_BUF_PTR(g_ftl_write_buf_id)+((sectOffset_ / SECTORS_PER_CHUNK)*BYTES_PER_CHUNK);
    UINT32 dstWBufChunkStart = LOG_BUF(bank_) + (chunkPtr[bank_] * BYTES_PER_CHUNK); // base address of the destination chunk
    UINT32 startOffsetWrite = (sectOffset_ % SECTORS_PER_CHUNK) * BYTES_PER_SECTOR;
    // Old Chunk Location
    UINT32 oldBank = ChunkToBank(oldChunkAddr);
    UINT32 oldVbn = get_log_vbn(oldBank, ChunkToLbn(oldChunkAddr));
    UINT32 oldPageOffset = ChunkToPageOffset(oldChunkAddr);
    UINT32 oldSectOffset = ChunkToSectOffset(oldChunkAddr);
    // Offsets
    UINT32 dstByteOffset = chunkPtr[bank_] * BYTES_PER_CHUNK;
    UINT32 srcByteOffset = ChunkToChunkOffset(oldChunkAddr) * BYTES_PER_CHUNK;
    UINT32 alignedWBufAddr = LOG_BUF(bank_) + dstByteOffset - srcByteOffset;
    waitBusyBank(bank_);
    nand_page_ptread(oldBank, oldVbn, oldPageOffset, oldSectOffset, SECTORS_PER_CHUNK, alignedWBufAddr, RETURN_WHEN_DONE);
    mem_copy(dstWBufChunkStart + startOffsetWrite, src + startOffsetWrite, nSectsToWrite*BYTES_PER_SECTOR);
}
示例#12
0
void nand_page_ptprogram_from_host(UINT32 const bank, UINT32 const vblock, UINT32 const page_num, UINT32 const sect_offset, UINT32 const num_sectors)
{
#if PrintStats
    uart_print_level_1("FP ");
    uart_print_level_1_int(num_sectors);
    uart_print_level_1("\r\n");
#endif

    totSecWrites += num_sectors;

    UINT32 row;

    ASSERT(bank < NUM_BANKS);
    ASSERT(vblock < VBLKS_PER_BANK);
    ASSERT(page_num < PAGES_PER_BLK);

    row = (vblock * PAGES_PER_BLK) + page_num;
    uart_print("nand_page_ptprogram_from_host bank="); uart_print_int(bank);
    uart_print(", vblock="); uart_print_int(vblock);
    uart_print(", page="); uart_print_int(page_num);
    uart_print(", sect_offset="); uart_print_int(sect_offset);
    uart_print(", num_sectors="); uart_print_int(num_sectors); uart_print("\r\n");
    uart_print("Writing row="); uart_print_int(row); uart_print("\r\n");

    SETREG(FCP_CMD, FC_COL_ROW_IN_PROG);
#if OPTION_FTL_TEST == TRUE
    SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY);
#else
    SETREG(FCP_OPTION, FO_P | FO_E | FO_B_SATA_W);
#endif
    SETREG(FCP_DMA_ADDR, WR_BUF_PTR(g_ftl_write_buf_id));
    SETREG(FCP_DMA_CNT, num_sectors * BYTES_PER_SECTOR);

    SETREG(FCP_COL, sect_offset);
    SETREG(FCP_ROW_L(bank), row);
    SETREG(FCP_ROW_H(bank), row);

    //flash_issue_cmd(bank, RETURN_WHEN_DONE);
    flash_issue_cmd(bank, RETURN_ON_ISSUE);

    g_ftl_write_buf_id = (g_ftl_write_buf_id + 1) % NUM_WR_BUFFERS;
}
示例#13
0
static void overwriteChunkOldInWBuf(UINT32 chunkAddr) {
    //uart_print_level_1("23 ");
    /* Question: is it better to copy the ow buf or to overwrite in place in w buf?
      * Current implementation: copy to the ow buf, because probably will be overwritten again in the future.
      */
    uart_print("overwriteChunkOldInWBuf\r\n");
    chooseNewBank_();
    UINT32 nSectsToWrite = (((sectOffset_ % SECTORS_PER_CHUNK) + remainingSects_) < SECTORS_PER_CHUNK) ?     remainingSects_ :
                                                            (SECTORS_PER_CHUNK - (sectOffset_ % SECTORS_PER_CHUNK));
    UINT32 srcBank = ChunkToBank(chunkAddr);
    UINT32 srcChunkIdx = ChunkToChunkOffset(chunkAddr);
    UINT32 startOffsetOverwrite = (sectOffset_ % SECTORS_PER_CHUNK) * BYTES_PER_SECTOR;
    UINT32 endOffsetOverwrite =  ((sectOffset_ % SECTORS_PER_CHUNK)  + nSectsToWrite) * BYTES_PER_SECTOR;
    UINT32 wBufAddr = LOG_BUF(srcBank) + srcChunkIdx*BYTES_PER_CHUNK;
    UINT32 owBufAddr = OW_LOG_BUF(bank_) + owChunkPtr[bank_] * BYTES_PER_CHUNK;
    UINT32 sataBufAddr = WR_BUF_PTR(g_ftl_write_buf_id)+(sectOffset_*BYTES_PER_SECTOR);
    UINT32 leftHoleSize = startOffsetOverwrite;
    UINT32 rightHoleSize =  BYTES_PER_CHUNK - endOffsetOverwrite;
    waitBusyBank(bank_); // (Fabio) probably should wait. In contrast to overwriteChunkOldInOwBuf, here we are writing to a new chunk in ow buf, thus it might be that a previous operation involving ow buf is in flight
    mem_copy(owBufAddr + leftHoleSize, sataBufAddr, nSectsToWrite*BYTES_PER_SECTOR);
    if(leftHoleSize > 0) {
        uart_print("copy left hole\r\n");
        mem_copy(owBufAddr, wBufAddr, leftHoleSize); // copy left hole
    }
    if(rightHoleSize > 0) {
        uart_print("copy right hole\r\n");
        mem_copy(owBufAddr+endOffsetOverwrite, wBufAddr+endOffsetOverwrite, rightHoleSize); // copy right hole
    }
    logBufMeta[srcBank].dataLpn[srcChunkIdx]=INVALID; // invalidate in w buf
    updateOwDramBufMetadata();
    updateOwChunkPtr();
    sectOffset_ += nSectsToWrite;
    remainingSects_ -= nSectsToWrite;
    #if MeasureDramAbsorb
    uart_print_level_1("OWDRAM "); uart_print_level_1_int(nSectsToWrite); uart_print_level_1("\r\n");
    #endif
}
示例#14
0
static void write_page(UINT32 const lpn, UINT32 const sect_offset, UINT32 const num_sectors)
{
	write_p++;

	UINT32 bank, old_vpn, new_vpn;
	UINT32 vblock, page_num, page_offset, column_cnt;

	bank        = get_num_bank(lpn); // page striping
	page_offset = sect_offset;
	column_cnt  = num_sectors;

	new_vpn  = assign_new_write_vpn(bank);
	old_vpn  = get_vpn(lpn);
	if (old_vpn != NULL)
	{
		vblock   = old_vpn / PAGES_PER_BLK;
		page_num = old_vpn % PAGES_PER_BLK;
		if (num_sectors != SECTORS_PER_PAGE)
		{
			if ((num_sectors <= 8) && (page_offset != 0))
			{
				// one page async read
				data_read++;
				nand_page_read(bank,
						vblock,
						page_num,
						FTL_BUF(bank));
				// copy `left hole sectors' into SATA write buffer
				if (page_offset != 0)
				{
					mem_copy(WR_BUF_PTR(g_ftl_write_buf_id),
							FTL_BUF(bank),
							page_offset * BYTES_PER_SECTOR);
				}
				// copy `right hole sectors' into SATA write buffer
				if ((page_offset + column_cnt) < SECTORS_PER_PAGE)
				{
					UINT32 const rhole_base = (page_offset + column_cnt) * BYTES_PER_SECTOR;

					mem_copy(WR_BUF_PTR(g_ftl_write_buf_id) + rhole_base,
							FTL_BUF(bank) + rhole_base,
							BYTES_PER_PAGE - rhole_base);
				}
			}
			// left/right hole async read operation (two partial page read)
			else
			{
				// read `left hole sectors'
				if (page_offset != 0)
				{
					data_read++;
					nand_page_ptread(bank,
							vblock,
							page_num,
							0,
							page_offset,
							WR_BUF_PTR(g_ftl_write_buf_id),
							RETURN_WHEN_DONE);
				}
				// read `right hole sectors'
				if ((page_offset + column_cnt) < SECTORS_PER_PAGE)
				{
					data_read++;
					nand_page_ptread(bank,
							vblock,
							page_num,
							page_offset + column_cnt,
							SECTORS_PER_PAGE - (page_offset + column_cnt),
							WR_BUF_PTR(g_ftl_write_buf_id),
							RETURN_WHEN_DONE);
				}
			}
		}
		set_vcount(bank, vblock, get_vcount(bank, vblock) - 1);
	}
	else if (num_sectors != SECTORS_PER_PAGE)
	{
		if(page_offset != 0)
			mem_set_dram(WR_BUF_PTR(g_ftl_write_buf_id),
					0,
					page_offset * BYTES_PER_SECTOR);
		if((page_offset + num_sectors) < SECTORS_PER_PAGE)
		{
			UINT32 const rhole_base = (page_offset + num_sectors) * BYTES_PER_SECTOR;
			mem_set_dram(WR_BUF_PTR(g_ftl_write_buf_id) + rhole_base, 0, BYTES_PER_PAGE - rhole_base);
		}
	}
	vblock   = new_vpn / PAGES_PER_BLK;
	page_num = new_vpn % PAGES_PER_BLK;

	// write new data (make sure that the new data is ready in the write buffer frame)
	// (c.f FO_B_SATA_W flag in flash.h)
	data_prog++;
	nand_page_program_from_host(bank,
			vblock,
			page_num);
	// update metadata
	set_lpn(bank, page_num, lpn);
	set_vpn(lpn, new_vpn);
	set_vcount(bank, vblock, get_vcount(bank, vblock) + 1);
}
示例#15
0
static void tc_write_rand(const UINT32 start_lsn, const UINT32 io_num, const UINT32 sector_size)
{
    UINT32 i, j, wr_buf_addr, rd_buf_addr, data, r_data;
    UINT32 lba, num_sectors = sector_size;
    UINT32 io_cnt = io_num;

    /* UINT32 volatile g_barrier = 0; while (g_barrier == 0); */
    led(0);
    srand(RANDOM_SEED);

    for (UINT32 loop = 0; loop < 1; loop++) {
        wr_buf_addr = WR_BUF_ADDR;
        data = 0;
        uart_printf("test loop cnt: %d", loop);

        for (i = 0; i < io_cnt; i++) {
            do {
                lba = rand() % IO_LIMIT;
            }while(lba + num_sectors >= IO_LIMIT);

            wr_buf_addr = WR_BUF_PTR(g_ftl_write_buf_id) + ((lba % SECTORS_PER_PAGE) * BYTES_PER_SECTOR);
            r_data = data;

            for (j = 0; j < num_sectors; j++) {
                mem_set_dram(wr_buf_addr, data, BYTES_PER_SECTOR);

                wr_buf_addr += BYTES_PER_SECTOR;

                if (wr_buf_addr >= WR_BUF_ADDR + WR_BUF_BYTES) {
                    wr_buf_addr = WR_BUF_ADDR;
                }
                data++;
            }
/*             ptimer_start(); */
            ftl_write(lba, num_sectors);
/*             ptimer_stop_and_uart_print(); */
            rd_buf_addr = RD_BUF_PTR(g_ftl_read_buf_id) + ((lba % SECTORS_PER_PAGE) * BYTES_PER_SECTOR);
/*             ptimer_start(); */
            ftl_read(lba, num_sectors);
/*             ptimer_stop_and_uart_print(); */

            flash_finish();

            for (j = 0; j < num_sectors; j++) {
                UINT32 sample = read_dram_32(rd_buf_addr);

                if (sample != r_data) {
                    uart_printf("ftl test fail...io#: %d, %d", lba, num_sectors);
                    uart_printf("sample data %d should be %d", sample, r_data);
                    led_blink();
                }
                rd_buf_addr += BYTES_PER_SECTOR;

                if (rd_buf_addr >= RD_BUF_ADDR + RD_BUF_BYTES) {
                    rd_buf_addr = RD_BUF_ADDR;
                }
                r_data++;
            }
        } // end for
    }
    ftl_flush();
}
示例#16
0
void ftl_write_sector(UINT32 const lba, UINT32 const totals)
{
	UINT32 new_bank, vsect_num, new_row;
	UINT32 new_psn;
	UINT32 temp;
	UINT32 dst,src;
	UINT32 index = lba % SECTORS_PER_PAGE;
	int i;
	//new_bank = lba % NUM_BANKS; // get bank number of sector
	new_bank = g_target_bank;
	

	// copy sata host data to dram memory merge buffer page 
	//vsect_num = g_misc_meta[new_bank].g_merge_buff_sect;
	vsect_num = g_target_sect;

	dst = MERGE_BUFFER_ADDR + new_bank * BYTES_PER_PAGE + vsect_num * BYTES_PER_SECTOR;
	src = WR_BUF_PTR(g_ftl_write_buf_id) + index * BYTES_PER_SECTOR;

	// Because Firmware does not know 
	// about status of previous nand flash command, 
	// must wait until target bank is IDLE 
	// ( targeted DRAM space is fully flushed ) 
	while(_BSP_FSM(new_bank) != BANK_IDLE);
	mem_copy(dst, src, BYTES_PER_SECTOR * totals);

	// set psn to -1 , it means that data is in dram 
	
	// for change psn 
	for(i = 0 ;i < totals;i++)
		g_merge_buffer_lsn[vsect_num + i ] = lba + i;
	vsect_num += totals;
	if( vsect_num >= SECTORS_PER_PAGE ){
		/* get free page */
		new_row = get_free_page(new_bank);
		SETREG(FCP_CMD, FC_COL_ROW_IN_PROG);
		SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY);
		SETREG(FCP_DMA_ADDR, MERGE_BUFFER_ADDR + new_bank * BYTES_PER_PAGE);
		SETREG(FCP_DMA_CNT, BYTES_PER_PAGE);
		SETREG(FCP_COL,0);
		SETREG(FCP_ROW_L(new_bank),new_row);
		SETREG(FCP_ROW_H(new_bank),new_row);

		flash_issue_cmd(new_bank,RETURN_ON_ISSUE);

		/* initialize merge buffer page's sector point */
		//	g_misc_meta[new_bank].g_merge_buff_sect = 0;
		g_target_sect = 0;
		g_target_bank = (g_target_bank + 1 ) % NUM_BANKS;
		// allocate new psn 
		//new_psn = new_row * SECTORS_PER_PAGE;

		new_psn = new_bank * SECTORS_PER_BANK + new_row * SECTORS_PER_PAGE;
		// vsn - > psn mapping  
		for(i = 0 ;i < SECTORS_PER_PAGE; i++ )
		{
			set_psn( g_merge_buffer_lsn[i],
					new_psn + i );
		}
	}
	else
	{
		//g_misc_meta[new_bank].g_merge_buff_sect++;
		for(i = 0 ;i < totals;i++)
			set_psn(lba+i, ((UINT32)BIT31 | (g_target_sect+i) ));
		g_target_sect+=totals;
	}
}
示例#17
0
static void tc_write_seq(const UINT32 start_lsn, const UINT32 io_num, const UINT32 sector_size)
{
    UINT32 i, j, wr_buf_addr, rd_buf_addr, data;
    UINT32 lba, num_sectors = sector_size;
    UINT32 io_cnt = io_num;
    UINT32 const start_lba = start_lsn;

    /* UINT32 volatile g_barrier = 0; while (g_barrier == 0); */
    led(0);

    // STEP 1 - write
    for (UINT32 loop = 0; loop < 5; loop++)
    {
        wr_buf_addr = WR_BUF_ADDR;
        data = 0;
        lba  = start_lba;

        uart_print_32(loop); uart_print("");

        for (i = 0; i < io_cnt; i++)
        {
            wr_buf_addr = WR_BUF_PTR(g_ftl_write_buf_id) + ((lba % SECTORS_PER_PAGE) * BYTES_PER_SECTOR);
            for (j = 0; j < num_sectors; j++)
            {
                mem_set_dram(wr_buf_addr, data, BYTES_PER_SECTOR);

                wr_buf_addr += BYTES_PER_SECTOR;

                if (wr_buf_addr >= WR_BUF_ADDR + WR_BUF_BYTES)
                {
                    wr_buf_addr = WR_BUF_ADDR;
                }
                data++;
            }
	    if( i == 0x0000081C)
		    i = i;
            ptimer_start();
            ftl_write(lba, num_sectors);
            ptimer_stop_and_uart_print();

            lba += num_sectors;

            if (lba >= (UINT32)NUM_LSECTORS)
            {
                uart_print("adjust lba because of out of lba");
                lba = 0;
            }
        }

        // STEP 2 - read and verify
        rd_buf_addr = RD_BUF_ADDR;
        data = 0;
        lba  = start_lba;
        num_sectors = MIN(num_sectors, NUM_RD_BUFFERS * SECTORS_PER_PAGE);

        for (i = 0; i < io_cnt; i++)
        {
            rd_buf_addr = RD_BUF_PTR(g_ftl_read_buf_id) + ((lba % SECTORS_PER_PAGE) * BYTES_PER_SECTOR);
            /* ptimer_start(); */
	    if( i == 0x0000081C)
		    i = i;
            ftl_read(lba, num_sectors);

            flash_finish();
            /* ptimer_stop_and_uart_print(); */

            for (j = 0; j < num_sectors; j++)
            {
                UINT32 sample = read_dram_32(rd_buf_addr);

                if (sample != data)
                {
                    uart_printf("ftl test fail...io#: %d, %d", lba, num_sectors);
                    uart_printf("sample data %d should be %d", sample, data);
                    led_blink();
                }

                rd_buf_addr += BYTES_PER_SECTOR;

                if (rd_buf_addr >= RD_BUF_ADDR + RD_BUF_BYTES)
                {
                    rd_buf_addr = RD_BUF_ADDR;
                }
                data++;
            }

            lba += num_sectors;

            if (lba >= IO_LIMIT + num_sectors)
            {
                lba = 0;
            }
        }
    }
    ftl_flush();
}
示例#18
0
void ftl_write_sector(UINT32 const lba)
{
	UINT32 new_bank, vsect_num, new_row;
	UINT32 new_psn;
	UINT32 temp;
	UINT32 dst,src;
	UINT32 index = lba % SECTORS_PER_PAGE;
	int i;
	//new_bank = lba % NUM_BANKS; // get bank number of sector
	
	temp = get_psn(lba);

	if( (temp & (UINT32)BIT31) != 0 ){
		// If data, which located in same lba, is already in dram
		// copy sata host data to same merge buffer sector
		vsect_num = (temp ^ (UINT32)BIT31); 
		new_bank = vsect_num / SECTORS_PER_PAGE;
		vsect_num = vsect_num % SECTORS_PER_PAGE;

		dst = MERGE_BUFFER_ADDR + new_bank * BYTES_PER_PAGE + vsect_num * BYTES_PER_SECTOR;
		src = WR_BUF_PTR(g_ftl_write_buf_id) + index * BYTES_PER_SECTOR;
		mem_copy(dst,src, BYTES_PER_SECTOR);
	}
	else{
		// copy sata host data to dram memory merge buffer page 
		//vsect_num = g_misc_meta[new_bank].g_merge_buff_sect;
		new_bank = g_target_bank;
		vsect_num = g_target_sect[new_bank];

		dst = MERGE_BUFFER_ADDR + new_bank * BYTES_PER_PAGE + vsect_num * BYTES_PER_SECTOR;
		src = WR_BUF_PTR(g_ftl_write_buf_id) + index * BYTES_PER_SECTOR;

		// Because Firmware does not know 
		// about status of previous nand flash command, 
		// wait until target bank is IDLE 
		// ( target DRAM space is fully flashed ) 
		while(g_prev_bank[new_bank] != (UINT32)-1 && _BSP_FSM(g_prev_bank[new_bank]) != BANK_IDLE);
		mem_copy(dst, src, BYTES_PER_SECTOR);

		// set psn to -1 , it means that data is in dram 
		set_psn(lba, ((UINT32)BIT31 | (new_bank * SECTORS_PER_PAGE + vsect_num)));

		// for change psn 
		g_merge_buffer_lsn[new_bank][vsect_num] = lba;

		vsect_num++;

		// If merge_buffer of bank is full ,
		// than flush the merge buffer page to nand flash
		// and set a psn number of all sectors.
		if( vsect_num >= SECTORS_PER_PAGE ){
			/* get free page */
			SETREG(FCP_CMD, FC_COL_ROW_IN_PROG);
			SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY);
			SETREG(FCP_DMA_ADDR, MERGE_BUFFER_ADDR + new_bank * BYTES_PER_PAGE);
			SETREG(FCP_DMA_CNT, BYTES_PER_PAGE);
			SETREG(FCP_COL,0);

			flash_issue_cmd(AUTO_SEL,RETURN_ON_ACCEPT);

			g_prev_bank[new_bank] = GETREG(WR_BANK);

			new_row = get_free_page(g_prev_bank[new_bank]);
			SETREG(FCP_ROW_L(g_prev_bank[new_bank]),new_row);
			SETREG(FCP_ROW_H(g_prev_bank[new_bank]),new_row);

			/* initialize merge buffer page's sector point */
		//	g_misc_meta[new_bank].g_merge_buff_sect = 0;
			g_target_sect[new_bank] = 0;
			// allocate new psn 
			//new_psn = new_row * SECTORS_PER_PAGE;

			new_psn = g_prev_bank[new_bank] * SECTORS_PER_BANK + g_target_row[g_prev_bank[new_bank]] * SECTORS_PER_PAGE;
			// vsn - > psn mapping  
			for(i = 0 ;i < SECTORS_PER_PAGE; i++ )
			{
				set_psn( g_merge_buffer_lsn[new_bank][i],
						new_psn + i );
			}
			g_target_row[g_prev_bank[new_bank]] = new_row;
		}
		else
		{
			//g_misc_meta[new_bank].g_merge_buff_sect++;
			g_target_sect[new_bank]++;
		}
		g_target_bank = (g_target_bank + 1 ) % NUM_BANKS;
	}
}
示例#19
0
void ftl_write_sector(UINT32 const lba)
{
	UINT32 new_bank, vsect_num, new_row;
	UINT32 new_psn;
	UINT32 temp;
	UINT32 dst,src;
	UINT32 index = lba % SECTORS_PER_PAGE;
	int i;
	//new_bank = lba % NUM_BANKS; // get bank number of sector
	new_bank = g_target_bank;
	
	temp = get_psn(lba);

	if( (temp & (UINT32)BIT31) != 0 ){
		// If data, which located in same lba, is already in dram
		// copy sata host data to same merge buffer sector
		vsect_num = (temp ^ (UINT32)BIT31); 

		dst = MERGE_BUFFER_ADDR + new_bank * BYTES_PER_PAGE + vsect_num * BYTES_PER_SECTOR;
		src = WR_BUF_PTR(g_ftl_write_buf_id) + index * BYTES_PER_SECTOR;
		mem_copy(dst,src, BYTES_PER_SECTOR);
	}
	else{
		// copy sata host data to dram memory merge buffer page 
		//vsect_num = g_misc_meta[new_bank].g_merge_buff_sect;
		vsect_num = g_target_sect;

		dst = MERGE_BUFFER_ADDR + new_bank * BYTES_PER_PAGE + vsect_num * BYTES_PER_SECTOR;
		src = WR_BUF_PTR(g_ftl_write_buf_id) + index * BYTES_PER_SECTOR;
		mem_copy(dst, src, BYTES_PER_SECTOR);
		// set psn to -1 , it means that data is in dram 
		set_psn(lba, ((UINT32)BIT31 | vsect_num ));

		// for change psn 
		g_merge_buffer_lsn[vsect_num] = lba;

		vsect_num++;

		// If merge_buffer of bank is full ,
		// than flush the merge buffer page to nand flash
		// and set a psn number of all sectors.
		if( vsect_num >= SECTORS_PER_PAGE ){
			/* get free page */
			new_row = get_free_page(new_bank);
			SETREG(FCP_CMD, FC_COL_ROW_IN_PROG);
			SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY);
			SETREG(FCP_DMA_ADDR, MERGE_BUFFER_ADDR + new_bank * BYTES_PER_PAGE);
			SETREG(FCP_DMA_CNT, BYTES_PER_PAGE);
			SETREG(FCP_COL,0);
			SETREG(FCP_ROW_L(new_bank),new_row);
			SETREG(FCP_ROW_H(new_bank),new_row);

			flash_issue_cmd(new_bank,RETURN_ON_ISSUE);

			/* initialize merge buffer page's sector point */
		//	g_misc_meta[new_bank].g_merge_buff_sect = 0;
			g_target_sect = 0;
			g_target_bank = (g_target_bank + 1 ) % NUM_BANKS;
			// allocate new psn 
			//new_psn = new_row * SECTORS_PER_PAGE;

			new_psn = new_bank * SECTORS_PER_BANK + new_row * SECTORS_PER_PAGE;
			// vsn - > psn mapping  
			for(i = 0 ;i < SECTORS_PER_PAGE; i++ )
			{
				set_psn( g_merge_buffer_lsn[i],
						new_psn + i );
			}
		}
		else
		{
			//g_misc_meta[new_bank].g_merge_buff_sect++;
			g_target_sect++;
		}
	}
}
示例#20
0
static void write_page(UINT32 const lpn, UINT32 const sect_offset, UINT32 const num_sectors)
{
    CHECK_LPAGE(lpn);
    ASSERT(sect_offset < SECTORS_PER_PAGE);
    ASSERT(num_sectors > 0 && num_sectors <= SECTORS_PER_PAGE);

    UINT32 bank, old_vpn, new_vpn;
    UINT32 vblock, page_num, page_offset, column_cnt;

    bank        = get_num_bank(lpn); // page striping
    page_offset = sect_offset;
    column_cnt  = num_sectors;

    new_vpn  = assign_new_write_vpn(bank);
    old_vpn  = get_vpn(lpn);

    CHECK_VPAGE (old_vpn);
    CHECK_VPAGE (new_vpn);
    ASSERT(old_vpn != new_vpn);

    g_ftl_statistics[bank].page_wcount++;

    // if old data already exist,
    if (old_vpn != NULL)
    {
        vblock   = old_vpn / PAGES_PER_BLK;
        page_num = old_vpn % PAGES_PER_BLK;

        //--------------------------------------------------------------------------------------
        // `Partial programming'
        // we could not determine whether the new data is loaded in the SATA write buffer.
        // Thus, read the left/right hole sectors of a valid page and copy into the write buffer.
        // And then, program whole valid data
        //--------------------------------------------------------------------------------------
        if (num_sectors != SECTORS_PER_PAGE)
        {
            // Performance optimization (but, not proved)
            // To reduce flash memory access, valid hole copy into SATA write buffer after reading whole page
            // Thus, in this case, we need just one full page read + one or two mem_copy
            if ((num_sectors <= 8) && (page_offset != 0))
            {
                // one page async read
                nand_page_read(bank,
                               vblock,
                               page_num,
                               FTL_BUF(bank));
                // copy `left hole sectors' into SATA write buffer
                if (page_offset != 0)
                {
                    mem_copy(WR_BUF_PTR(g_ftl_write_buf_id),
                             FTL_BUF(bank),
                             page_offset * BYTES_PER_SECTOR);
                }
                // copy `right hole sectors' into SATA write buffer
                if ((page_offset + column_cnt) < SECTORS_PER_PAGE)
                {
                    UINT32 const rhole_base = (page_offset + column_cnt) * BYTES_PER_SECTOR;

                    mem_copy(WR_BUF_PTR(g_ftl_write_buf_id) + rhole_base,
                             FTL_BUF(bank) + rhole_base,
                             BYTES_PER_PAGE - rhole_base);
                }
            }
            // left/right hole async read operation (two partial page read)
            else
            {
                // read `left hole sectors'
                if (page_offset != 0)
                {
                    nand_page_ptread(bank,
                                     vblock,
                                     page_num,
                                     0,
                                     page_offset,
                                     WR_BUF_PTR(g_ftl_write_buf_id),
                                     RETURN_ON_ISSUE);
                }
                // read `right hole sectors'
                if ((page_offset + column_cnt) < SECTORS_PER_PAGE)
                {
                    nand_page_ptread(bank,
                                     vblock,
                                     page_num,
                                     page_offset + column_cnt,
                                     SECTORS_PER_PAGE - (page_offset + column_cnt),
                                     WR_BUF_PTR(g_ftl_write_buf_id),
                                     RETURN_ON_ISSUE);
                }
            }
        }
        // full page write
        page_offset = 0;
        column_cnt  = SECTORS_PER_PAGE;
        // invalid old page (decrease vcount)
        set_vcount(bank, vblock, get_vcount(bank, vblock) - 1);
    }
    vblock   = new_vpn / PAGES_PER_BLK;
    page_num = new_vpn % PAGES_PER_BLK;
    ASSERT(get_vcount(bank,vblock) < (PAGES_PER_BLK - 1));

    // write new data (make sure that the new data is ready in the write buffer frame)
    // (c.f FO_B_SATA_W flag in flash.h)
    nand_page_ptprogram_from_host(bank,
                                  vblock,
                                  page_num,
                                  page_offset,
                                  column_cnt);
    // update metadata
    set_lpn(bank, page_num, lpn);
    set_vpn(lpn, new_vpn);
    set_vcount(bank, vblock, get_vcount(bank, vblock) + 1);
}