static void overwriteCompletePageInOwLog() { #if DetailedOwStats == 1 uart_print_level_1("-\r\n"); #endif uart_print("overwriteCompletePageInOwLog\r\n"); #if MeasureDetailedOverwrite start_interval_measurement(TIMER_CH3, TIMER_PRESCALE_0); #endif chooseNewBank_(); manageOldCompletePage(); UINT32 newLogLpn = getOWLpn(bank_); UINT32 lbn = LogPageToLogBlk(newLogLpn); UINT32 vBlk = get_log_vbn(bank_, lbn); UINT32 pageOffset = LogPageToOffset(newLogLpn); nand_page_ptprogram_from_host(bank_, vBlk, pageOffset, 0, SECTORS_PER_PAGE); increaseOwCounter(bank_, lbn, pageOffset); #if MeasureOwEfficiency write_dram_32(OwEffBuf(bank_, LogPageToLogBlk(newLogLpn)), read_dram_32(OwEffBuf(bank_, LogPageToLogBlk(newLogLpn))) + SECTORS_PER_PAGE); #endif for(UINT32 i=0; i<CHUNKS_PER_PAGE; i++) { write_dram_32(chunkInLpnsList(OWCtrl[bank_].lpnsListPtr, LogPageToOffset(newLogLpn), i), lpn_); write_dram_32(ChunksMapTable(lpn_, i), ( (bank_ * LOG_BLK_PER_BANK * CHUNKS_PER_BLK) + (newLogLpn * CHUNKS_PER_PAGE) + i ) | StartOwLogLpn); } increaseOWLpn(bank_); #if MeasureDetailedOverwrite UINT32 timerValue=GET_TIMER_VALUE(TIMER_CH3); UINT32 nTicks = 0xFFFFFFFF - timerValue; uart_print_level_2("OPN0 "); uart_print_level_2_int(nTicks); uart_print_level_2("\r\n"); #endif }
static void writeCompletePage() { //uart_print_level_1("3 "); uart_print("writeCompletePage\r\n"); UINT32 newLogLpn = getRWLpn(bank_); UINT32 vBlk = get_log_vbn(bank_, LogPageToLogBlk(newLogLpn)); UINT32 pageOffset = LogPageToOffset(newLogLpn); nand_page_ptprogram_from_host (bank_, vBlk, pageOffset, 0, SECTORS_PER_PAGE); // write new data (make sure that the new data is ready in the write buffer frame) (c.f FO_B_SATA_W flag in flash.h) for(UINT32 i=0; i<CHUNKS_PER_PAGE; i++) { if((chunkInLpnsList(RWCtrl[bank_].lpnsListPtr, LogPageToOffset(newLogLpn), i)) >=(DRAM_BASE + DRAM_SIZE)) { uart_print_level_1("ERROR in write::writeCompletePage 1: writing to "); uart_print_level_1_int(chunkInLpnsList(RWCtrl[bank_].lpnsListPtr, LogPageToOffset(newLogLpn), i)); uart_print_level_1("\r\n"); } write_dram_32(chunkInLpnsList(RWCtrl[bank_].lpnsListPtr, LogPageToOffset(newLogLpn), i), lpn_); //shashtblUpdate(lpn_, i, (bank_ * LOG_BLK_PER_BANK * CHUNKS_PER_BLK) + (newLogLpn * CHUNKS_PER_PAGE) + i); write_dram_32(ChunksMapTable(lpn_, i), (bank_ * LOG_BLK_PER_BANK * CHUNKS_PER_BLK) + (newLogLpn * CHUNKS_PER_PAGE) + i); } increaseRWLpn(bank_); }
static void write_page(UINT32 const lpn, UINT32 const sect_offset, UINT32 const num_sectors) { CHECK_LPAGE(lpn); ASSERT(sect_offset < SECTORS_PER_PAGE); ASSERT(num_sectors > 0 && num_sectors <= SECTORS_PER_PAGE); UINT32 bank, old_vpn, new_vpn; UINT32 vblock, page_num, page_offset, column_cnt; bank = get_num_bank(lpn); // page striping page_offset = sect_offset; column_cnt = num_sectors; new_vpn = assign_new_write_vpn(bank); old_vpn = get_vpn(lpn); CHECK_VPAGE (old_vpn); CHECK_VPAGE (new_vpn); ASSERT(old_vpn != new_vpn); g_ftl_statistics[bank].page_wcount++; // if old data already exist, if (old_vpn != NULL) { vblock = old_vpn / PAGES_PER_BLK; page_num = old_vpn % PAGES_PER_BLK; //-------------------------------------------------------------------------------------- // `Partial programming' // we could not determine whether the new data is loaded in the SATA write buffer. // Thus, read the left/right hole sectors of a valid page and copy into the write buffer. // And then, program whole valid data //-------------------------------------------------------------------------------------- if (num_sectors != SECTORS_PER_PAGE) { // Performance optimization (but, not proved) // To reduce flash memory access, valid hole copy into SATA write buffer after reading whole page // Thus, in this case, we need just one full page read + one or two mem_copy if ((num_sectors <= 8) && (page_offset != 0)) { // one page async read nand_page_read(bank, vblock, page_num, FTL_BUF(bank)); // copy `left hole sectors' into SATA write buffer if (page_offset != 0) { mem_copy(WR_BUF_PTR(g_ftl_write_buf_id), FTL_BUF(bank), page_offset * BYTES_PER_SECTOR); } // copy `right hole sectors' into SATA write buffer if ((page_offset + column_cnt) < SECTORS_PER_PAGE) { UINT32 const rhole_base = (page_offset + column_cnt) * BYTES_PER_SECTOR; mem_copy(WR_BUF_PTR(g_ftl_write_buf_id) + rhole_base, FTL_BUF(bank) + rhole_base, BYTES_PER_PAGE - rhole_base); } } // left/right hole async read operation (two partial page read) else { // read `left hole sectors' if (page_offset != 0) { nand_page_ptread(bank, vblock, page_num, 0, page_offset, WR_BUF_PTR(g_ftl_write_buf_id), RETURN_ON_ISSUE); } // read `right hole sectors' if ((page_offset + column_cnt) < SECTORS_PER_PAGE) { nand_page_ptread(bank, vblock, page_num, page_offset + column_cnt, SECTORS_PER_PAGE - (page_offset + column_cnt), WR_BUF_PTR(g_ftl_write_buf_id), RETURN_ON_ISSUE); } } } // full page write page_offset = 0; column_cnt = SECTORS_PER_PAGE; // invalid old page (decrease vcount) set_vcount(bank, vblock, get_vcount(bank, vblock) - 1); } vblock = new_vpn / PAGES_PER_BLK; page_num = new_vpn % PAGES_PER_BLK; ASSERT(get_vcount(bank,vblock) < (PAGES_PER_BLK - 1)); // write new data (make sure that the new data is ready in the write buffer frame) // (c.f FO_B_SATA_W flag in flash.h) nand_page_ptprogram_from_host(bank, vblock, page_num, page_offset, column_cnt); // update metadata set_lpn(bank, page_num, lpn); set_vpn(lpn, new_vpn); set_vcount(bank, vblock, get_vcount(bank, vblock) + 1); }
static void overwritePageOldInOrderInOwLog() { #if MeasureDetailedOverwrite start_interval_measurement(TIMER_CH3, TIMER_PRESCALE_0); #endif uart_print("overwritePageOldInOrderInOwLog\r\n"); UINT32 firstChunk = sectOffset_ / SECTORS_PER_CHUNK; UINT32 chunk = read_dram_32(ChunksMapTable(lpn_, firstChunk)); chunk = chunk & 0x7FFFFFFF; UINT32 bank = ChunkToBank(chunk); UINT32 lbn = ChunkToLbn(chunk); UINT32 vBlk = get_log_vbn(bank, lbn); UINT32 pageOffset = ChunkToPageOffset(chunk); if(readOwCounter(bank, lbn, pageOffset) < OwLimit) { #if DetailedOwStats == 1 uart_print_level_1("*\r\n"); #endif uart_print("Can overwrite in place\r\n"); nand_page_ptprogram_from_host(bank, vBlk, pageOffset, sectOffset_, nSects_); increaseOwCounter(bank, lbn, pageOffset); #if MeasureOwEfficiency write_dram_32(OwEffBuf(bank_, ChunkToLbn(chunk)), read_dram_32(OwEffBuf(bank_, ChunkToLbn(chunk))) + nSects_); #endif } else { uart_print("Exceeding limit, must find a new page\r\n"); if (remainingSects_ == SECTORS_PER_PAGE) { overwriteCompletePageInOwLog(); } else { syncWithWriteLimit(); UINT16 invalidChunksToDecrement = 0; chooseNewBank_(); while(remainingSects_) { invalidChunksToDecrement++; UINT32 nSectsToWrite = (((sectOffset_ % SECTORS_PER_CHUNK) + remainingSects_) < SECTORS_PER_CHUNK) ? remainingSects_ : (SECTORS_PER_CHUNK - (sectOffset_ % SECTORS_PER_CHUNK)); if(nSectsToWrite == SECTORS_PER_CHUNK) { uart_print("Copy chunk "); uart_print_int( (sectOffset_ % SECTORS_PER_CHUNK) / SECTORS_PER_CHUNK); uart_print(" to OW_LOG_BUF\r\n"); overwriteCompleteChunkNew(); updateOwDramBufMetadata(); updateOwChunkPtr(); } else { UINT32 chunkIdx = sectOffset_ / SECTORS_PER_CHUNK; chunk = read_dram_32(ChunksMapTable(lpn_, chunkIdx)); chunk = chunk & 0x7FFFFFFF; overwritePartialChunkWhenOldChunkIsInExhaustedOWLog(nSectsToWrite, chunk); updateOwDramBufMetadata(); updateOwChunkPtr(); } sectOffset_ += nSectsToWrite; remainingSects_ -= nSectsToWrite; } decrementValidChunksByN(&heapDataOverwrite, bank, lbn, invalidChunksToDecrement); g_ftl_write_buf_id = (g_ftl_write_buf_id + 1) % NUM_WR_BUFFERS; SETREG (BM_STACK_WRSET, g_ftl_write_buf_id); SETREG (BM_STACK_RESET, 0x01); } } #if MeasureDetailedOverwrite UINT32 timerValue=GET_TIMER_VALUE(TIMER_CH3); UINT32 nTicks = 0xFFFFFFFF - timerValue; uart_print_level_2("OPIO "); uart_print_level_2_int(nTicks); uart_print_level_2("\r\n"); #endif }