__s32 _flush_w_cache(void) { __u32 i, pos; __u32 sec_index; __u64 tempsecbitmap; for(i = 0; i < N_NAND_W_CACHE; i++) { if(nand_w_cache[i].hit_page != 0xffffffff) { #if 0 if(nand_w_cache[i].secbitmap != FULL_BITMAP_OF_LOGIC_PAGE) LML_PageRead(nand_w_cache[i].hit_page,(nand_w_cache[i].secbitmap ^ FULL_BITMAP_OF_LOGIC_PAGE)&FULL_BITMAP_OF_LOGIC_PAGE,nand_w_cache[i].data); LML_PageWrite(nand_w_cache[i].hit_page,FULL_BITMAP_OF_LOGIC_PAGE,nand_w_cache[i].data); #else //PRINT("_fill_nand_cache, LR, 0x%x, 0x%x, 0x%x\n", nand_w_cache[pos].hit_page, nand_w_cache[pos].secbitmap, nand_r_cache.data); pos = i; if(nand_w_cache[pos].secbitmap != FULL_BITMAP_OF_LOGIC_PAGE) { sec_index =0; tempsecbitmap = nand_w_cache[pos].secbitmap; LML_PageRead(nand_w_cache[pos].hit_page,FULL_BITMAP_OF_LOGIC_PAGE,CacheMainBuf); while(tempsecbitmap) { if(tempsecbitmap&((__u64)0x1)) { MEMCPY(CacheMainBuf+ 512*sec_index, nand_w_cache[pos].data + 512*sec_index, 512); } tempsecbitmap>>=1; sec_index++; } LML_PageWrite(nand_w_cache[pos].hit_page, FULL_BITMAP_OF_LOGIC_PAGE, CacheMainBuf); } else { LML_PageWrite(nand_w_cache[pos].hit_page, FULL_BITMAP_OF_LOGIC_PAGE, nand_w_cache[pos].data); } #endif #ifdef NAND_R_CACHE_EN /*disable read cache with current page*/ if (nand_r_cache.hit_page == nand_w_cache[i].hit_page){ nand_r_cache.hit_page = 0xffffffff; nand_r_cache.secbitmap = 0; } #endif nand_w_cache[i].hit_page = 0xffffffff; nand_w_cache[i].secbitmap = 0; nand_w_cache[i].access_count = 0; }
__s32 NAND_CacheFlush(void) { __u32 i; for(i = 0; i < N_NAND_W_CACHE; i++) { if(nand_w_cache[i].hit_page != 0xffffffff) { if(nand_w_cache[i].secbitmap != FULL_BITMAP_OF_LOGIC_PAGE) LML_PageRead(nand_w_cache[i].hit_page,nand_w_cache[i].secbitmap ^ FULL_BITMAP_OF_LOGIC_PAGE,nand_w_cache[i].data); LML_PageWrite(nand_w_cache[i].hit_page,FULL_BITMAP_OF_LOGIC_PAGE,nand_w_cache[i].data); nand_w_cache[i].hit_page = 0xffffffff; nand_w_cache[i].secbitmap = 0; nand_w_cache[i].access_count = 0; /*disable read cache with current page*/ if (nand_r_cache.hit_page == nand_w_cache[i].hit_page){ nand_r_cache.hit_page = 0xffffffff; nand_r_cache.secbitmap = 0; } } } return 0; }
__s32 _flush_w_cache_simple(__u32 i) { if(nand_w_cache[i].hit_page != 0xffffffff) { if(nand_w_cache[i].secbitmap != FULL_BITMAP_OF_LOGIC_PAGE) LML_PageRead(nand_w_cache[i].hit_page,(nand_w_cache[i].secbitmap ^ FULL_BITMAP_OF_LOGIC_PAGE)&FULL_BITMAP_OF_LOGIC_PAGE,nand_w_cache[i].data); LML_PageWrite(nand_w_cache[i].hit_page,FULL_BITMAP_OF_LOGIC_PAGE,nand_w_cache[i].data); /*disable read cache with current page*/ if (nand_r_cache.hit_page == nand_w_cache[i].hit_page){ nand_r_cache.hit_page = 0xffffffff; nand_r_cache.secbitmap = 0; } nand_w_cache[i].hit_page = 0xffffffff; nand_w_cache[i].secbitmap = 0; nand_w_cache[i].access_count = 0; nand_w_cache[i].dev_num= 0xffffffff; } return 0; }
/* ************************************************************************************************************************ * NAND FLASH LOGIC MANAGE LAYER READ-RECLAIM * *Description: Repair the logic block whose data has reach the limit of the ability of * the HW ECC module correct. * *Arguments : nPage the page address where need be repaired. * *Return : read-reclaim result; * = 0 do read-reclaim successful; * = -1 do read-reclaim failed. * *Notes : if read a physical page millions of times, there may be some bit error in * the page, and the error bit number will increase along with the read times, * so, if the number of the error bit reachs the ECC limit, the data should be * read out and write to another physical blcok. ************************************************************************************************************************ */ __s32 LML_ReadReclaim(__u32 nPage) { __s32 result; #if CFG_SUPPORT_READ_RECLAIM LOGICCTL_ERR("[LOGICCTL_ERR] read reclaim go\n"); //flush the page cache to nand flash first, because need use the buffer result = LML_FlushPageCache(); if(result < 0) { LOGICCTL_ERR("[LOGICCTL_ERR] Flush page cache failed when do read reclaim! Error:0x%x\n", result); return -1; } //read the full page data to buffer result = LML_PageRead(nPage, FULL_BITMAP_OF_LOGIC_PAGE, LML_WRITE_PAGE_CACHE); if(result < 0) { return -1; } //the data in the page cache is full, write it to nand flash result = LML_PageWrite(nPage, FULL_BITMAP_OF_LOGIC_PAGE, LML_WRITE_PAGE_CACHE); if(result < 0) { return -1; } #endif return 0; }
__s32 NAND_CacheWrite(__u32 blk, __u32 nblk, void *buf) { __u32 nSector,StartSec; __u32 page; __u32 SecBitmap,SecWithinPage; __u32 i; __u8 *pdata; nSector = nblk; StartSec = blk; SecBitmap = 0; page = 0xffffffff; pdata = (__u8 *)buf; /*combind sectors to pages*/ while(nSector) { SecWithinPage = StartSec % SECTOR_CNT_OF_LOGIC_PAGE; SecBitmap |= (1 << SecWithinPage); page = StartSec / SECTOR_CNT_OF_LOGIC_PAGE; /*close page if last sector*/ if (SecWithinPage == (SECTOR_CNT_OF_LOGIC_PAGE - 1)) { /*write to nand flash if align one logic page*/ if(SecBitmap == FULL_BITMAP_OF_LOGIC_PAGE) { /*disable write cache with current page*/ for (i = 0; i < N_NAND_W_CACHE; i++) { if (nand_w_cache[i].hit_page == page){ nand_w_cache[i].hit_page = 0xffffffff; nand_w_cache[i].secbitmap = 0; } } /*disable read cache with current page*/ if (nand_r_cache.hit_page == page){ nand_r_cache.hit_page = 0xffffffff; nand_r_cache.secbitmap = 0; } LML_PageWrite(page,FULL_BITMAP_OF_LOGIC_PAGE,pdata + 512 - 512*SECTOR_CNT_OF_LOGIC_PAGE); } /*fill to cache if unalign one logic page*/ else _fill_nand_cache(page, SecBitmap, pdata + 512 - 512*_get_valid_bits(SecBitmap)); SecBitmap = 0; } /*reset variable*/ nSector--; StartSec++; pdata += 512; } /*fill opened page*/ if (SecBitmap) _fill_nand_cache(page,SecBitmap,pdata - 512*_get_valid_bits(SecBitmap)); return 0; }
__s32 _fill_nand_cache(__u32 page, __u32 secbitmap, __u8 *pdata) { __u8 hit; __u8 i; __u8 pos = 0xff; g_w_access_cnt++; hit = 0; for (i = 0; i < N_NAND_W_CACHE; i++) { /*merge data if cache hit*/ if (nand_w_cache[i].hit_page == page){ hit = 1; MEMCPY(nand_w_cache[i].data + 512 * _get_first_valid_bit(secbitmap),pdata, 512 * _get_valid_bits(secbitmap)); nand_w_cache[i].secbitmap |= secbitmap; nand_w_cache[i].access_count = g_w_access_cnt; pos = i; break; } } /*post data if cache miss*/ if (!hit) { /*find cache to post*/ for (i = 0; i < N_NAND_W_CACHE; i++) { if (nand_w_cache[i].hit_page == 0xffffffff) { pos = i; break; } } if (pos == 0xff) { __u32 access_cnt = nand_w_cache[0].access_count; pos = 0; for (i = 1; i < N_NAND_W_CACHE; i++) { if (access_cnt > nand_w_cache[i].access_count) { pos = i; access_cnt = nand_w_cache[i].access_count; } } if(nand_w_cache[pos].secbitmap != FULL_BITMAP_OF_LOGIC_PAGE) LML_PageRead(nand_w_cache[pos].hit_page,nand_w_cache[pos].secbitmap ^ FULL_BITMAP_OF_LOGIC_PAGE,nand_w_cache[pos].data); LML_PageWrite(nand_w_cache[pos].hit_page, FULL_BITMAP_OF_LOGIC_PAGE, nand_w_cache[pos].data); nand_w_cache[pos].access_count = 0; } /*merge data*/ MEMCPY(nand_w_cache[pos].data + 512 * _get_first_valid_bit(secbitmap),pdata, 512 * _get_valid_bits(secbitmap)); nand_w_cache[pos].hit_page = page; nand_w_cache[pos].secbitmap = secbitmap; nand_w_cache[pos].access_count = g_w_access_cnt; } if (g_w_access_cnt == 0) { for (i = 0; i < N_NAND_W_CACHE; i++) nand_w_cache[i].access_count = 0; g_w_access_cnt = 1; nand_w_cache[pos].access_count = g_w_access_cnt; } return 0; }