Пример #1
0
/* NOTE: This function calls rebuildPageToFtlBuf with GcMode, therefore the valid chunks counters of old blocks are already managed.
 * Do not call manageOldChunks before calling this!
 */
static void appendPageToSWBlock (const UINT32 dataLpn, const UINT32 sectOffset, const UINT32 nSects)
{
    uart_print("appendPageToSWBlock dataLpn="); uart_print_int(dataLpn);
    uart_print(", sectOffset="); uart_print_int(sectOffset);
    uart_print(", nSects="); uart_print_int(nSects); uart_print("\r\n");
    UINT32 nSectsToWrite = SECTORS_PER_PAGE - sectOffset;
    UINT32 logLpn = getSWLpn(bank_);
    UINT32 vBlk = get_log_vbn(bank_, LogPageToLogBlk(logLpn));
    UINT32 dst = FTL_BUF(0) + (sectOffset*BYTES_PER_SECTOR);
    UINT32 src = WR_BUF_PTR(g_ftl_write_buf_id)+(sectOffset*BYTES_PER_SECTOR);
    rebuildPageToFtlBuf(dataLpn, 0, SECTORS_PER_PAGE, GcMode); // Rebuild rest of the page in FTL buffer (rebuild entire page to be sure that all chunks are correctly garbage collected, especially if they are in DRAM)
    //waitBusyBank(bank_);
    flash_finish();
    mem_copy(dst, src, nSectsToWrite * BYTES_PER_SECTOR);                                       // Fill FTL buffer with new data
    //TODO: this program shouldn't be sincronous, need a global variable storing last bank writing data from FTL_BUF(0)
    nand_page_program(bank_, vBlk, LogPageToOffset(logLpn), FTL_BUF(0), RETURN_WHEN_DONE);      // Write FTL buffer to the next sequential page
    UINT32 chunkIdx;
    for(chunkIdx=0; chunkIdx<sectOffset / SECTORS_PER_CHUNK; ++chunkIdx)
    { // For sector before the start of new data we update only if previously there was some valid data, which is now in the new page, otherwise we insert invalid in the lpns list to speed up GC later
        if (ChunksMapTable(dataLpn, chunkIdx) > DRAM_BASE + DRAM_SIZE)
        {
            uart_print_level_1("ERROR in appendPageToSWBlk 1: reading above DRAM address space\r\n");
        }
        if (read_dram_32(ChunksMapTable(dataLpn, chunkIdx)) != INVALID)
        {
            UINT32 lChunkAddr = (logLpn * CHUNKS_PER_PAGE) + chunkIdx;
            if((chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)) >=(DRAM_BASE + DRAM_SIZE))
            {
                uart_print_level_1("ERROR in write::appendPageToSWBlk 1: writing to "); uart_print_level_1_int(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)); uart_print_level_1("\r\n");
            }
            write_dram_32(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx), dataLpn);
            write_dram_32(ChunksMapTable(dataLpn, chunkIdx), (bank_ * LOG_BLK_PER_BANK * CHUNKS_PER_BLK) + lChunkAddr);
        }
        else
        { //Decrement valid chunks in the blk we're going to write in because we inserted null data
            if((chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)) >=(DRAM_BASE + DRAM_SIZE))
            {
                uart_print_level_1("ERROR in write::appendPageToSWBlk 2: writing to "); uart_print_level_1_int(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)); uart_print_level_1("\r\n");
            }
            write_dram_32(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx), INVALID);
            decrementValidChunks(&heapDataWrite, bank_, LogPageToLogBlk(logLpn));
        }
    }
    for( ; chunkIdx < CHUNKS_PER_PAGE; ++chunkIdx)
    { // The new sectors are instead all valid, therefore we don't bother checking if they were valid before
            UINT32 lChunkAddr = (logLpn * CHUNKS_PER_PAGE) + chunkIdx;
            if((chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)) >=(DRAM_BASE + DRAM_SIZE))
            {
                uart_print_level_1("ERROR in write::appendPageToSWBlk 3: writing to "); uart_print_level_1_int(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)); uart_print_level_1("\r\n");
            }
            write_dram_32(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx), dataLpn);
            write_dram_32(ChunksMapTable(dataLpn, chunkIdx), (bank_ * LOG_BLK_PER_BANK * CHUNKS_PER_BLK) + lChunkAddr);
    }
    SWCtrl[bank_].nextDataLpn=dataLpn+1;
    increaseSWLpn(bank_);
    g_ftl_write_buf_id = (g_ftl_write_buf_id + 1) % NUM_WR_BUFFERS;
    SETREG (BM_STACK_WRSET, g_ftl_write_buf_id);
    SETREG (BM_STACK_RESET, 0x01);
}
Пример #2
0
static void readCompletePage(UINT32 *chunksInPage, UINT32 *srcChunkByteOffsets, UINT32 *chunkIdxs, UINT8 *decreaseInOW)
{
    uart_print("readCompletePage\r\n");
    UINT32 oldLogBank = ChunkToBank(oldChunkAddr_);
    UINT32 oldLogVbn = get_log_vbn(oldLogBank, ChunkToLbn(oldChunkAddr_));
    UINT32 oldLogPageOffset = ChunkToPageOffset(oldChunkAddr_);
    nand_page_ptread(oldLogBank, oldLogVbn, oldLogPageOffset, 0, SECTORS_PER_PAGE, TEMP_BUF_ADDR, RETURN_WHEN_DONE);
    for (int i=0; i<*chunksInPage; i++)
    {
        UINT32 src = TEMP_BUF_ADDR + srcChunkByteOffsets[i];
        UINT32 dst = FTL_BUF(0) + chunkIdxs[i] * BYTES_PER_CHUNK;
        mem_copy(dst, src, BYTES_PER_CHUNK);
        chunksDone_[chunkIdxs[i]]=1;
        if(mode_ == GcMode)
        {
#if Overwrite
            if (decreaseInOW[i])
            {
                decrementValidChunks(&heapDataOverwrite, oldLogBank, ChunkToLbn(oldChunkAddr_));
            }
            else
#endif
            {
                decrementValidChunks(&heapDataWrite, oldLogBank, ChunkToLbn(oldChunkAddr_));
            }
        }
    }
}
Пример #3
0
static void chunkInOWDramBuf()
{
    uart_print("chunkInOWDramBuf\r\n");
    UINT32 dst = FTL_BUF(0) + (chunkIdx_*BYTES_PER_CHUNK);
    UINT32 src = OW_LOG_BUF(ChunkToBank(oldChunkAddr_))+(ChunkToSectOffset(oldChunkAddr_)*BYTES_PER_SECTOR);
    mem_copy(dst, src, BYTES_PER_CHUNK);
    if (mode_ == GcMode) owLogBufMeta[ChunkToBank(oldChunkAddr_)].dataLpn[oldChunkAddr_ % CHUNKS_PER_PAGE] = INVALID;
}
Пример #4
0
static void overwriteChunkOldInOwLog(UINT32 chunkAddr)
{
    //uart_print_level_1("22 ");
    uart_print("overwriteChunkOldInOwLog\r\n");
    UINT32 nSectsToWrite = (((sectOffset_ % SECTORS_PER_CHUNK) + remainingSects_) < SECTORS_PER_CHUNK) ?     remainingSects_ :
                                                            (SECTORS_PER_CHUNK - (sectOffset_ % SECTORS_PER_CHUNK));
    UINT32 bank = ChunkToBank(chunkAddr);
    UINT32 lbn = ChunkToLbn(chunkAddr);
    UINT32 vbn = get_log_vbn(bank, lbn);
    UINT32 pageOffset = ChunkToPageOffset(chunkAddr);
    if (readOwCounter(bank, lbn, pageOffset) < OwLimit)
    { // Can overwrite in place
        UINT32 sectOffset = ChunkToSectOffset(chunkAddr) + (sectOffset_ % SECTORS_PER_CHUNK);
        //UINT32 src = WR_BUF_PTR(g_ftl_write_buf_id) + (sectOffset_ * BYTES_PER_SECTOR) - (sectOffset * BYTES_PER_SECTOR); // startBuf + srcOffset - dstOffset
        if (lastBankUsingFtlBuf1 != INVALID)
        {
            waitBusyBank(lastBankUsingFtlBuf1);
        }
        mem_copy(FTL_BUF(1)+(sectOffset_*BYTES_PER_SECTOR), WR_BUF_PTR(g_ftl_write_buf_id) + (sectOffset_*BYTES_PER_SECTOR), nSectsToWrite*BYTES_PER_SECTOR);
        UINT32 src = FTL_BUF(1) + (sectOffset_ * BYTES_PER_SECTOR) - (sectOffset * BYTES_PER_SECTOR); // startBuf + srcOffset - dstOffset
        lastBankUsingFtlBuf1 = bank;
        nand_page_ptprogram(bank, vbn, pageOffset, sectOffset, nSectsToWrite, src, RETURN_ON_ISSUE);
        increaseOwCounter(bank, lbn, pageOffset);
    }
    else
    { // Need a new page
        if (nSectsToWrite == SECTORS_PER_CHUNK)
        { // Write chunk in ow log and decrease valid chunks in previous ow blk
            decrementValidChunks(&heapDataOverwrite, bank, lbn);
            overwriteCompleteChunkNew();
        }
        else
        { // Must read old chunk and update in ow log
            decrementValidChunks(&heapDataOverwrite, bank, lbn);
            overwritePartialChunkWhenOldChunkIsInExhaustedOWLog(nSectsToWrite, chunkAddr);
        }
        updateOwDramBufMetadata();
        updateOwChunkPtr();
    }
    #if MeasureOwEfficiency
    write_dram_32(OwEffBuf(bank_, ChunkToLbn(chunkAddr)), read_dram_32(OwEffBuf(bank_, ChunkToLbn(chunkAddr))) + nSectsToWrite);
    #endif
    sectOffset_ += nSectsToWrite;
    remainingSects_ -= nSectsToWrite;
}
Пример #5
0
// logging misc + vcount metadata
static void logging_misc_metadata(void)
{
    UINT32 misc_meta_bytes = NUM_MISC_META_SECT * BYTES_PER_SECTOR; // per bank
    UINT32 vcount_addr     = VCOUNT_ADDR;
    UINT32 vcount_bytes    = NUM_VCOUNT_SECT * BYTES_PER_SECTOR; // per bank
    UINT32 vcount_boundary = VCOUNT_ADDR + VCOUNT_BYTES; // entire vcount data
    UINT32 bank;

    flash_finish();

    for (bank = 0; bank < NUM_BANKS; bank++)
    {
        inc_miscblk_vpn(bank);

        // note: if misc. meta block is full, just erase old block & write offset #0
        if ((get_miscblk_vpn(bank) / PAGES_PER_BLK) != MISCBLK_VBN)
        {
            nand_block_erase(bank, MISCBLK_VBN);
            set_miscblk_vpn(bank, MISCBLK_VBN * PAGES_PER_BLK); // vpn = 128
        }
        // copy misc. metadata to FTL buffer
        mem_copy(FTL_BUF(bank), &g_misc_meta[bank], misc_meta_bytes);

        // copy vcount metadata to FTL buffer
        if (vcount_addr <= vcount_boundary)
        {
            mem_copy(FTL_BUF(bank) + misc_meta_bytes, vcount_addr, vcount_bytes);
            vcount_addr += vcount_bytes;
        }
    }
    // logging the misc. metadata to nand flash
    for (bank = 0; bank < NUM_BANKS; bank++)
    {
        nand_page_ptprogram(bank,
                            get_miscblk_vpn(bank) / PAGES_PER_BLK,
                            get_miscblk_vpn(bank) % PAGES_PER_BLK,
                            0,
                            NUM_MISC_META_SECT + NUM_VCOUNT_SECT,
                            FTL_BUF(bank));
    }
    flash_finish();
}
Пример #6
0
static UINT32 assign_new_write_vpn(UINT32 const bank)
{
    ASSERT(bank < NUM_BANKS);

    UINT32 write_vpn;
    UINT32 vblock;

    write_vpn = get_cur_write_vpn(bank);
    vblock    = write_vpn / PAGES_PER_BLK;

    // NOTE: if next new write page's offset is
    // the last page offset of vblock (i.e. PAGES_PER_BLK - 1),
    if ((write_vpn % PAGES_PER_BLK) == (PAGES_PER_BLK - 2))
    {
        // then, because of the flash controller limitation
        // (prohibit accessing a spare area (i.e. OOB)),
        // thus, we persistenly write a lpn list into last page of vblock.
        mem_copy(FTL_BUF(bank), g_misc_meta[bank].lpn_list_of_cur_vblock, sizeof(UINT32) * PAGES_PER_BLK);
        // fix minor bug
        nand_page_ptprogram(bank, vblock, PAGES_PER_BLK - 1, 0,
                            ((sizeof(UINT32) * PAGES_PER_BLK + BYTES_PER_SECTOR - 1 ) / BYTES_PER_SECTOR), FTL_BUF(bank));

        mem_set_sram(g_misc_meta[bank].lpn_list_of_cur_vblock, 0x00000000, sizeof(UINT32) * PAGES_PER_BLK);

        inc_full_blk_cnt(bank);

        // do garbage collection if necessary
        if (is_full_all_blks(bank))
        {
            garbage_collection(bank);
            return get_cur_write_vpn(bank);
        }
        do
        {
            vblock++;

            ASSERT(vblock != VBLKS_PER_BANK);
        }while (get_vcount(bank, vblock) == VC_MAX);
    }
    // write page -> next block
    if (vblock != (write_vpn / PAGES_PER_BLK))
    {
        write_vpn = vblock * PAGES_PER_BLK;
    }
    else
    {
        write_vpn++;
    }
    set_new_write_vpn(bank, write_vpn);

    return write_vpn;
}
Пример #7
0
void readFromLogBlk (UINT32 const dataLpn, UINT32 const sectOffset, UINT32 const nSects){
    uart_print("readFromLogBlk dataLpn="); uart_print_int(dataLpn);
    uart_print(", sect_offset="); uart_print_int(sectOffset);
    uart_print(", num_sectors="); uart_print_int(nSects); uart_print("\r\n");

    UINT32 dst = RD_BUF_PTR(g_ftl_read_buf_id)+(sectOffset*BYTES_PER_SECTOR);
    UINT32 src = FTL_BUF(0)+(sectOffset*BYTES_PER_SECTOR);
    rebuildPageToFtlBuf(dataLpn, sectOffset, nSects, ReadMode);
    mem_copy(dst, src, nSects*BYTES_PER_SECTOR);
    g_ftl_read_buf_id = (g_ftl_read_buf_id + 1) % NUM_RD_BUFFERS;
    SETREG (BM_STACK_RDSET, g_ftl_read_buf_id);    // change bm_read_limit
    SETREG (BM_STACK_RESET, 0x02);    // change bm_read_limit
    //UINT32 bank = choose new bank
    //int bankToGarbageCollect = (bank + NUM_BANKS - 1) % NUM_BANKS;
    //callPM(bankToGarbageCollect);
}
Пример #8
0
static void readOneChunk(UINT32 *chunksInPage, UINT32 *srcChunkByteOffsets, UINT32 *chunkIdxs, UINT8 *decreaseInOW)
{
    uart_print("readOneChunk\r\n");
    UINT32 bank = ChunkToBank(oldChunkAddr_);
    UINT32 vbn = get_log_vbn(bank, ChunkToLbn(oldChunkAddr_));
    UINT32 pageOffset = ChunkToPageOffset(oldChunkAddr_);
    UINT32 dst = FTL_BUF(0) + (chunkIdxs[0]*BYTES_PER_CHUNK) - srcChunkByteOffsets[0]; // buf addr + dst - src
    nand_page_ptread(bank, vbn, pageOffset, srcChunkByteOffsets[0]/BYTES_PER_SECTOR, SECTORS_PER_CHUNK, dst, RETURN_WHEN_DONE);
    if(mode_ == GcMode)
    {
#if Overwrite
        if (decreaseInOW[0])
        {
            decrementValidChunks(&heapDataOverwrite, bank, ChunkToLbn(oldChunkAddr_));
        }
        else
#endif
        {
            decrementValidChunks(&heapDataWrite, bank, ChunkToLbn(oldChunkAddr_));
        }
    }
}
Пример #9
0
// misc + VCOUNT
static void load_misc_metadata(void)
{
    UINT32 misc_meta_bytes = NUM_MISC_META_SECT * BYTES_PER_SECTOR;
    UINT32 vcount_bytes    = NUM_VCOUNT_SECT * BYTES_PER_SECTOR;
    UINT32 vcount_addr     = VCOUNT_ADDR;
    UINT32 vcount_boundary = VCOUNT_ADDR + VCOUNT_BYTES;

    UINT32 load_flag = 0;
    UINT32 bank, page_num;
    UINT32 load_cnt = 0;

    flash_finish();

	disable_irq();
	flash_clear_irq();	// clear any flash interrupt flags that might have been set

    // scan valid metadata in descending order from last page offset
    for (page_num = PAGES_PER_BLK - 1; page_num != ((UINT32) -1); page_num--)
    {
        for (bank = 0; bank < NUM_BANKS; bank++)
        {
            if (load_flag & (0x1 << bank))
            {
                continue;
            }
            // read valid metadata from misc. metadata area
            nand_page_ptread(bank,
                             MISCBLK_VBN,
                             page_num,
                             0,
                             NUM_MISC_META_SECT + NUM_VCOUNT_SECT,
                             FTL_BUF(bank),
                             RETURN_ON_ISSUE);
        }
        flash_finish();

        for (bank = 0; bank < NUM_BANKS; bank++)
        {
            if (!(load_flag & (0x1 << bank)) && !(BSP_INTR(bank) & FIRQ_ALL_FF))
            {
                load_flag = load_flag | (0x1 << bank);
                load_cnt++;
            }
            CLR_BSP_INTR(bank, 0xFF);
        }
    }
    ASSERT(load_cnt == NUM_BANKS);

    for (bank = 0; bank < NUM_BANKS; bank++)
    {
        // misc. metadata
        mem_copy(&g_misc_meta[bank], FTL_BUF(bank), sizeof(misc_metadata));

        // vcount metadata
        if (vcount_addr <= vcount_boundary)
        {
            mem_copy(vcount_addr, FTL_BUF(bank) + misc_meta_bytes, vcount_bytes);
            vcount_addr += vcount_bytes;

        }
    }
	enable_irq();
}
Пример #10
0
static void logging_pmap_table(void)
{
    UINT32 pmap_addr  = PAGE_MAP_ADDR;
    UINT32 pmap_bytes = BYTES_PER_PAGE; // per bank
    UINT32 mapblk_vpn;
    UINT32 bank;
    UINT32 pmap_boundary = PAGE_MAP_ADDR + PAGE_MAP_BYTES;
    BOOL32 finished = FALSE;

    for (UINT32 mapblk_lbn = 0; mapblk_lbn < MAPBLKS_PER_BANK; mapblk_lbn++)
    {
        flash_finish();

        for (bank = 0; bank < NUM_BANKS; bank++)
        {
            if (finished)
            {
                break;
            }
            else if (pmap_addr >= pmap_boundary)
            {
                finished = TRUE;
                break;
            }
            else if (pmap_addr + BYTES_PER_PAGE >= pmap_boundary)
            {
                finished = TRUE;
                pmap_bytes = (pmap_boundary - pmap_addr + BYTES_PER_SECTOR - 1) / BYTES_PER_SECTOR * BYTES_PER_SECTOR ;
            }
            inc_mapblk_vpn(bank, mapblk_lbn);

            mapblk_vpn = get_mapblk_vpn(bank, mapblk_lbn);

            // note: if there is no free page, then erase old map block first.
            if ((mapblk_vpn % PAGES_PER_BLK) == 0)
            {
                // erase full map block
                nand_block_erase(bank, (mapblk_vpn - 1) / PAGES_PER_BLK);

                // next vpn of mapblk is offset #0
                set_mapblk_vpn(bank, mapblk_lbn, ((mapblk_vpn - 1) / PAGES_PER_BLK) * PAGES_PER_BLK);
                mapblk_vpn = get_mapblk_vpn(bank, mapblk_lbn);
            }
            // copy the page mapping table to FTL buffer
            mem_copy(FTL_BUF(bank), pmap_addr, pmap_bytes);

            // logging update page mapping table into map_block
            nand_page_ptprogram(bank,
                                mapblk_vpn / PAGES_PER_BLK,
                                mapblk_vpn % PAGES_PER_BLK,
                                0,
                                pmap_bytes / BYTES_PER_SECTOR,
                                FTL_BUF(bank));
            pmap_addr += pmap_bytes;
        }
        if (finished)
        {
            break;
        }
    }
    flash_finish();
}
Пример #11
0
//------------------------------------------------------------
// if all blocks except one free block are full,
// do garbage collection for making at least one free page
//-------------------------------------------------------------
static void garbage_collection(UINT32 const bank)
{
    ASSERT(bank < NUM_BANKS);
    g_ftl_statistics[bank].gc_cnt++;

    UINT32 src_lpn;
    UINT32 vt_vblock;
    UINT32 free_vpn;
    UINT32 vcount; // valid page count in victim block
    UINT32 src_page;
    UINT32 gc_vblock;

    g_ftl_statistics[bank].gc_cnt++;

    vt_vblock = get_vt_vblock(bank);   // get victim block
    vcount    = get_vcount(bank, vt_vblock);
    gc_vblock = get_gc_vblock(bank);
    free_vpn  = gc_vblock * PAGES_PER_BLK;

/*     uart_printf("garbage_collection bank %d, vblock %d",bank, vt_vblock); */

    ASSERT(vt_vblock != gc_vblock);
    ASSERT(vt_vblock >= META_BLKS_PER_BANK && vt_vblock < VBLKS_PER_BANK);
    ASSERT(vcount < (PAGES_PER_BLK - 1));
    ASSERT(get_vcount(bank, gc_vblock) == VC_MAX);
    ASSERT(!is_bad_block(bank, gc_vblock));

    // 1. load p2l list from last page offset of victim block (4B x PAGES_PER_BLK)
    // fix minor bug
    nand_page_ptread(bank, vt_vblock, PAGES_PER_BLK - 1, 0,
                     ((sizeof(UINT32) * PAGES_PER_BLK + BYTES_PER_SECTOR - 1 ) / BYTES_PER_SECTOR), FTL_BUF(bank), RETURN_WHEN_DONE);
    mem_copy(g_misc_meta[bank].lpn_list_of_cur_vblock, FTL_BUF(bank), sizeof(UINT32) * PAGES_PER_BLK);
    // 2. copy-back all valid pages to free space
    for (src_page = 0; src_page < (PAGES_PER_BLK - 1); src_page++)
    {
        // get lpn of victim block from a read lpn list
        src_lpn = get_lpn(bank, src_page);
        CHECK_VPAGE(get_vpn(src_lpn));

        // determine whether the page is valid or not
        if (get_vpn(src_lpn) !=
            ((vt_vblock * PAGES_PER_BLK) + src_page))
        {
            // invalid page
            continue;
        }
        ASSERT(get_lpn(bank, src_page) != INVALID);
        CHECK_LPAGE(src_lpn);
        // if the page is valid,
        // then do copy-back op. to free space
        nand_page_copyback(bank,
                           vt_vblock,
                           src_page,
                           free_vpn / PAGES_PER_BLK,
                           free_vpn % PAGES_PER_BLK);
        ASSERT((free_vpn / PAGES_PER_BLK) == gc_vblock);
        // update metadata
        set_vpn(src_lpn, free_vpn);
        set_lpn(bank, (free_vpn % PAGES_PER_BLK), src_lpn);

        free_vpn++;
    }
#if OPTION_ENABLE_ASSERT
    if (vcount == 0)
    {
        ASSERT(free_vpn == (gc_vblock * PAGES_PER_BLK));
    }
#endif
    // 3. erase victim block
    nand_block_erase(bank, vt_vblock);
    ASSERT((free_vpn % PAGES_PER_BLK) < (PAGES_PER_BLK - 2));
    ASSERT((free_vpn % PAGES_PER_BLK == vcount));

/*     uart_printf("gc page count : %d", vcount); */

    // 4. update metadata
    set_vcount(bank, vt_vblock, VC_MAX);
    set_vcount(bank, gc_vblock, vcount);
    set_new_write_vpn(bank, free_vpn); // set a free page for new write
    set_gc_vblock(bank, vt_vblock); // next free block (reserve for GC)
    dec_full_blk_cnt(bank); // decrease full block count
    /* uart_print("garbage_collection end"); */
}
Пример #12
0
static void write_page(UINT32 const lpn, UINT32 const sect_offset, UINT32 const num_sectors)
{
    CHECK_LPAGE(lpn);
    ASSERT(sect_offset < SECTORS_PER_PAGE);
    ASSERT(num_sectors > 0 && num_sectors <= SECTORS_PER_PAGE);

    UINT32 bank, old_vpn, new_vpn;
    UINT32 vblock, page_num, page_offset, column_cnt;

    bank        = get_num_bank(lpn); // page striping
    page_offset = sect_offset;
    column_cnt  = num_sectors;

    new_vpn  = assign_new_write_vpn(bank);
    old_vpn  = get_vpn(lpn);

    CHECK_VPAGE (old_vpn);
    CHECK_VPAGE (new_vpn);
    ASSERT(old_vpn != new_vpn);

    g_ftl_statistics[bank].page_wcount++;

    // if old data already exist,
    if (old_vpn != NULL)
    {
        vblock   = old_vpn / PAGES_PER_BLK;
        page_num = old_vpn % PAGES_PER_BLK;

        //--------------------------------------------------------------------------------------
        // `Partial programming'
        // we could not determine whether the new data is loaded in the SATA write buffer.
        // Thus, read the left/right hole sectors of a valid page and copy into the write buffer.
        // And then, program whole valid data
        //--------------------------------------------------------------------------------------
        if (num_sectors != SECTORS_PER_PAGE)
        {
            // Performance optimization (but, not proved)
            // To reduce flash memory access, valid hole copy into SATA write buffer after reading whole page
            // Thus, in this case, we need just one full page read + one or two mem_copy
            if ((num_sectors <= 8) && (page_offset != 0))
            {
                // one page async read
                nand_page_read(bank,
                               vblock,
                               page_num,
                               FTL_BUF(bank));
                // copy `left hole sectors' into SATA write buffer
                if (page_offset != 0)
                {
                    mem_copy(WR_BUF_PTR(g_ftl_write_buf_id),
                             FTL_BUF(bank),
                             page_offset * BYTES_PER_SECTOR);
                }
                // copy `right hole sectors' into SATA write buffer
                if ((page_offset + column_cnt) < SECTORS_PER_PAGE)
                {
                    UINT32 const rhole_base = (page_offset + column_cnt) * BYTES_PER_SECTOR;

                    mem_copy(WR_BUF_PTR(g_ftl_write_buf_id) + rhole_base,
                             FTL_BUF(bank) + rhole_base,
                             BYTES_PER_PAGE - rhole_base);
                }
            }
            // left/right hole async read operation (two partial page read)
            else
            {
                // read `left hole sectors'
                if (page_offset != 0)
                {
                    nand_page_ptread(bank,
                                     vblock,
                                     page_num,
                                     0,
                                     page_offset,
                                     WR_BUF_PTR(g_ftl_write_buf_id),
                                     RETURN_ON_ISSUE);
                }
                // read `right hole sectors'
                if ((page_offset + column_cnt) < SECTORS_PER_PAGE)
                {
                    nand_page_ptread(bank,
                                     vblock,
                                     page_num,
                                     page_offset + column_cnt,
                                     SECTORS_PER_PAGE - (page_offset + column_cnt),
                                     WR_BUF_PTR(g_ftl_write_buf_id),
                                     RETURN_ON_ISSUE);
                }
            }
        }
        // full page write
        page_offset = 0;
        column_cnt  = SECTORS_PER_PAGE;
        // invalid old page (decrease vcount)
        set_vcount(bank, vblock, get_vcount(bank, vblock) - 1);
    }
    vblock   = new_vpn / PAGES_PER_BLK;
    page_num = new_vpn % PAGES_PER_BLK;
    ASSERT(get_vcount(bank,vblock) < (PAGES_PER_BLK - 1));

    // write new data (make sure that the new data is ready in the write buffer frame)
    // (c.f FO_B_SATA_W flag in flash.h)
    nand_page_ptprogram_from_host(bank,
                                  vblock,
                                  page_num,
                                  page_offset,
                                  column_cnt);
    // update metadata
    set_lpn(bank, page_num, lpn);
    set_vpn(lpn, new_vpn);
    set_vcount(bank, vblock, get_vcount(bank, vblock) + 1);
}
Пример #13
0
static void load_pmap_table(void)
{
    UINT32 pmap_addr = PAGE_MAP_ADDR;
    UINT32 temp_page_addr;
    UINT32 pmap_bytes = BYTES_PER_PAGE; // per bank
    UINT32 pmap_boundary = PAGE_MAP_ADDR + (NUM_LPAGES * sizeof(UINT32));
    UINT32 mapblk_lbn, bank;
    BOOL32 finished = FALSE;

    flash_finish();

    for (mapblk_lbn = 0; mapblk_lbn < MAPBLKS_PER_BANK; mapblk_lbn++)
    {
        temp_page_addr = pmap_addr; // backup page mapping addr

        for (bank = 0; bank < NUM_BANKS; bank++)
        {
            if (finished)
            {
                break;
            }
            else if (pmap_addr >= pmap_boundary)
            {
                finished = TRUE;
                break;
            }
            else if (pmap_addr + BYTES_PER_PAGE >= pmap_boundary)
            {
                finished = TRUE;
                pmap_bytes = (pmap_boundary - pmap_addr + BYTES_PER_SECTOR - 1) / BYTES_PER_SECTOR * BYTES_PER_SECTOR;
            }
            // read page mapping table from map_block
            nand_page_ptread(bank,
                             get_mapblk_vpn(bank, mapblk_lbn) / PAGES_PER_BLK,
                             get_mapblk_vpn(bank, mapblk_lbn) % PAGES_PER_BLK,
                             0,
                             pmap_bytes / BYTES_PER_SECTOR,
                             FTL_BUF(bank),
                             RETURN_ON_ISSUE);
            pmap_addr += pmap_bytes;
        }
        flash_finish();

        pmap_bytes = BYTES_PER_PAGE;
        for (bank = 0; bank < NUM_BANKS; bank++)
        {
            if (temp_page_addr >= pmap_boundary)
            {
                break;
            }
            else if (temp_page_addr + BYTES_PER_PAGE >= pmap_boundary)
            {
                pmap_bytes = (pmap_boundary - temp_page_addr + BYTES_PER_SECTOR - 1) / BYTES_PER_SECTOR * BYTES_PER_SECTOR;
            }
            // copy page mapping table to PMAP_ADDR from FTL buffer
            mem_copy(temp_page_addr, FTL_BUF(bank), pmap_bytes);

            temp_page_addr += pmap_bytes;
        }
        if (finished)
        {
            break;
        }
    }
}
Пример #14
0
static void chunkInvalid()
{
    uart_print("chunkInvalid\n");
    UINT32 dst = FTL_BUF(0) + (chunkIdx_*BYTES_PER_CHUNK);
    mem_set_dram (dst, INVALID, BYTES_PER_CHUNK);
}
Пример #15
0
static void write_page(UINT32 const lpn, UINT32 const sect_offset, UINT32 const num_sectors)
{
	write_p++;

	UINT32 bank, old_vpn, new_vpn;
	UINT32 vblock, page_num, page_offset, column_cnt;

	bank        = get_num_bank(lpn); // page striping
	page_offset = sect_offset;
	column_cnt  = num_sectors;

	new_vpn  = assign_new_write_vpn(bank);
	old_vpn  = get_vpn(lpn);
	if (old_vpn != NULL)
	{
		vblock   = old_vpn / PAGES_PER_BLK;
		page_num = old_vpn % PAGES_PER_BLK;
		if (num_sectors != SECTORS_PER_PAGE)
		{
			if ((num_sectors <= 8) && (page_offset != 0))
			{
				// one page async read
				data_read++;
				nand_page_read(bank,
						vblock,
						page_num,
						FTL_BUF(bank));
				// copy `left hole sectors' into SATA write buffer
				if (page_offset != 0)
				{
					mem_copy(WR_BUF_PTR(g_ftl_write_buf_id),
							FTL_BUF(bank),
							page_offset * BYTES_PER_SECTOR);
				}
				// copy `right hole sectors' into SATA write buffer
				if ((page_offset + column_cnt) < SECTORS_PER_PAGE)
				{
					UINT32 const rhole_base = (page_offset + column_cnt) * BYTES_PER_SECTOR;

					mem_copy(WR_BUF_PTR(g_ftl_write_buf_id) + rhole_base,
							FTL_BUF(bank) + rhole_base,
							BYTES_PER_PAGE - rhole_base);
				}
			}
			// left/right hole async read operation (two partial page read)
			else
			{
				// read `left hole sectors'
				if (page_offset != 0)
				{
					data_read++;
					nand_page_ptread(bank,
							vblock,
							page_num,
							0,
							page_offset,
							WR_BUF_PTR(g_ftl_write_buf_id),
							RETURN_WHEN_DONE);
				}
				// read `right hole sectors'
				if ((page_offset + column_cnt) < SECTORS_PER_PAGE)
				{
					data_read++;
					nand_page_ptread(bank,
							vblock,
							page_num,
							page_offset + column_cnt,
							SECTORS_PER_PAGE - (page_offset + column_cnt),
							WR_BUF_PTR(g_ftl_write_buf_id),
							RETURN_WHEN_DONE);
				}
			}
		}
		set_vcount(bank, vblock, get_vcount(bank, vblock) - 1);
	}
	else if (num_sectors != SECTORS_PER_PAGE)
	{
		if(page_offset != 0)
			mem_set_dram(WR_BUF_PTR(g_ftl_write_buf_id),
					0,
					page_offset * BYTES_PER_SECTOR);
		if((page_offset + num_sectors) < SECTORS_PER_PAGE)
		{
			UINT32 const rhole_base = (page_offset + num_sectors) * BYTES_PER_SECTOR;
			mem_set_dram(WR_BUF_PTR(g_ftl_write_buf_id) + rhole_base, 0, BYTES_PER_PAGE - rhole_base);
		}
	}
	vblock   = new_vpn / PAGES_PER_BLK;
	page_num = new_vpn % PAGES_PER_BLK;

	// write new data (make sure that the new data is ready in the write buffer frame)
	// (c.f FO_B_SATA_W flag in flash.h)
	data_prog++;
	nand_page_program_from_host(bank,
			vblock,
			page_num);
	// update metadata
	set_lpn(bank, page_num, lpn);
	set_vpn(lpn, new_vpn);
	set_vcount(bank, vblock, get_vcount(bank, vblock) + 1);
}