/* fetch block map table from flash */ static __s32 _read_block_map_tbl(__u8 nZone) { __s32 TablePage; __u32 TableBlk; struct __PhysicOpPara_t param; /*set table block number and table page number*/ TableBlk = NandDriverInfo.ZoneTblPstInfo[nZone].PhyBlkNum; TablePage = NandDriverInfo.ZoneTblPstInfo[nZone].TablePst; /*read data block and free block map tbl*/ param.MDataPtr = LML_PROCESS_TBL_BUF; param.SDataPtr = NULL; param.SectBitmap = 0xf; LML_CalculatePhyOpPar(¶m, nZone, TableBlk, TablePage); if(LML_VirtualPageRead(¶m) < 0) { MAPPING_ERR("_read_block_map_tbl :read block map table0 err\n"); return NAND_OP_FALSE; } MEMCPY(DATA_BLK_TBL,LML_PROCESS_TBL_BUF,2048); TablePage++; param.MDataPtr = LML_PROCESS_TBL_BUF; LML_CalculatePhyOpPar(¶m, nZone, TableBlk, TablePage); if( LML_VirtualPageRead(¶m) < 0) { MAPPING_ERR("_read_block_map_tbl : read block map table1 err\n"); return NAND_OP_FALSE; } MEMCPY(&DATA_BLK_TBL[512],LML_PROCESS_TBL_BUF,2048); if(((__u32 *)DATA_BLK_TBL)[1023] != \ _GetTblCheckSum((__u32 *)DATA_BLK_TBL,(DATA_BLK_CNT_OF_ZONE+FREE_BLK_CNT_OF_ZONE))) { MAPPING_ERR("_read_block_map_tbl : read data block map table checksum err\n"); dump((void*)DATA_BLK_TBL,1024*4,4,8); return NAND_OP_FALSE; } /*read log block table*/ TablePage++; param.MDataPtr = LML_PROCESS_TBL_BUF; LML_CalculatePhyOpPar(¶m, nZone, TableBlk, TablePage); if ( LML_VirtualPageRead(¶m) < 0) { MAPPING_ERR("_read_block_map_tbl : read block map table2 err\n"); return NAND_OP_FALSE; } if (((__u32 *)LML_PROCESS_TBL_BUF)[511] != \ _GetTblCheckSum((__u32 *)LML_PROCESS_TBL_BUF, LOG_BLK_CNT_OF_ZONE*sizeof(struct __LogBlkType_t)/sizeof(__u32))) { MAPPING_ERR("_read_block_map_tbl : read log block table checksum err\n"); dump((void*)LML_PROCESS_TBL_BUF,512*8,2,8); return NAND_OP_FALSE; } MEMCPY(LOG_BLK_TBL,LML_PROCESS_TBL_BUF,LOG_BLK_CNT_OF_ZONE*sizeof(struct __LogBlkType_t)); return NAND_OP_TRUE; }
/*write block map table to flash*/ static __s32 _write_back_block_map_tbl(__u8 nZone) { __s32 TablePage; __u32 TableBlk; struct __NandUserData_t UserData[2]; struct __PhysicOpPara_t param; struct __SuperPhyBlkType_t BadBlk,NewBlk; /*write back all page map table within this zone*/ if (NAND_OP_TRUE != _write_back_all_page_map_tbl(nZone)) { MAPPING_ERR("write back all page map tbl err\n"); return NAND_OP_FALSE; } /*set table block number and table page number*/ TableBlk = NandDriverInfo.ZoneTblPstInfo[nZone].PhyBlkNum; TablePage = NandDriverInfo.ZoneTblPstInfo[nZone].TablePst; if(TablePage >= PAGE_CNT_OF_SUPER_BLK - 4) { if(NAND_OP_TRUE != LML_VirtualBlkErase(nZone, TableBlk)) { BadBlk.PhyBlkNum = TableBlk; if(NAND_OP_TRUE != LML_BadBlkManage(&BadBlk,CUR_MAP_ZONE,0,&NewBlk)) { MAPPING_ERR("write back block tbl : bad block manage err erase data block\n"); return NAND_OP_FALSE; } TableBlk = NewBlk.PhyBlkNum; } TablePage = -4; } TablePage += 4; //calculate checksum for data block table and free block table ((__u32 *)DATA_BLK_TBL)[1023] = \ _GetTblCheckSum((__u32 *)DATA_BLK_TBL, (DATA_BLK_CNT_OF_ZONE + FREE_BLK_CNT_OF_ZONE)); //clear full page data MEMSET(LML_PROCESS_TBL_BUF, 0xff, SECTOR_CNT_OF_SUPER_PAGE * SECTOR_SIZE); rewrite: /*write back data block and free block map table*/ MEMSET((void *)&UserData,0xff,sizeof(struct __NandUserData_t) * 2); MEMCPY(LML_PROCESS_TBL_BUF,DATA_BLK_TBL,2048); /*write page 0, need set spare info*/ if (TablePage == 0) { UserData[0].LogicInfo = (1<<14) | ((nZone % ZONE_CNT_OF_DIE) << 10) | 0xaa ; } UserData[0].PageStatus = 0x55; param.MDataPtr = LML_PROCESS_TBL_BUF; param.SDataPtr = (void *)&UserData; param.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE; LML_CalculatePhyOpPar(¶m, nZone, TableBlk, TablePage); LML_VirtualPageWrite(¶m); if (NAND_OP_TRUE != PHY_SynchBank(param.BankNum, SYNC_CHIP_MODE)) { BadBlk.PhyBlkNum = TableBlk; if (NAND_OP_TRUE != LML_BadBlkManage(&BadBlk,nZone,0,&NewBlk)) { MAPPING_ERR("write blk map table : bad block mange err after write\n"); return NAND_OP_FALSE; } TableBlk = NewBlk.PhyBlkNum; TablePage = 0; goto rewrite; } MEMCPY(LML_PROCESS_TBL_BUF, &DATA_BLK_TBL[512], 2048); TablePage ++; param.MDataPtr = LML_PROCESS_TBL_BUF; MEMSET((void *)&UserData,0xff,sizeof(struct __NandUserData_t) * 2); UserData[0].PageStatus = 0x55; LML_CalculatePhyOpPar(¶m, nZone, TableBlk, TablePage); LML_VirtualPageWrite(¶m); if(NAND_OP_TRUE != PHY_SynchBank(param.BankNum, SYNC_CHIP_MODE)) { BadBlk.PhyBlkNum = TableBlk; if(NAND_OP_TRUE != LML_BadBlkManage(&BadBlk,nZone,0,&NewBlk)) { MAPPING_ERR("write blk map table : bad block mange err after write\n"); return NAND_OP_FALSE; } TableBlk = NewBlk.PhyBlkNum; TablePage = 0; goto rewrite; } /*write back log block map table*/ TablePage++; MEMSET(LML_PROCESS_TBL_BUF, 0xff, SECTOR_CNT_OF_SUPER_PAGE * SECTOR_SIZE); MEMCPY(LML_PROCESS_TBL_BUF,LOG_BLK_TBL,LOG_BLK_CNT_OF_ZONE*sizeof(struct __LogBlkType_t)); /*cal checksum*/ ((__u32 *)LML_PROCESS_TBL_BUF)[511] = \ _GetTblCheckSum((__u32 *)LML_PROCESS_TBL_BUF, LOG_BLK_CNT_OF_ZONE*sizeof(struct __LogBlkType_t)/sizeof(__u32)); LML_CalculatePhyOpPar(¶m, nZone, TableBlk, TablePage); LML_VirtualPageWrite(¶m); if(NAND_OP_TRUE != PHY_SynchBank(param.BankNum, SYNC_CHIP_MODE)) { BadBlk.PhyBlkNum = TableBlk; if(NAND_OP_TRUE != LML_BadBlkManage(&BadBlk,nZone,0,&NewBlk)) { MAPPING_ERR("write blk map table : bad block mange err after write\n"); return NAND_OP_FALSE; } TableBlk = NewBlk.PhyBlkNum; TablePage = 0; goto rewrite; } /*reset zone info*/ NandDriverInfo.ZoneTblPstInfo[nZone].PhyBlkNum = TableBlk; NandDriverInfo.ZoneTblPstInfo[nZone].TablePst = TablePage - 2; return NAND_OP_TRUE; }
static __s32 _write_back_page_map_tbl(__u32 nLogBlkPst) { __u16 TablePage; __u32 TableBlk; struct __NandUserData_t UserData[2]; struct __PhysicOpPara_t param; struct __SuperPhyBlkType_t BadBlk,NewBlk; /*check page poisition, merge if no free page*/ TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage + 1; TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum; if (TablePage == PAGE_CNT_OF_SUPER_BLK) { /*block id full,need merge*/ if (LML_MergeLogBlk(SPECIAL_MERGE_MODE,LOG_BLK_TBL[nLogBlkPst].LogicBlkNum)) { MAPPING_ERR("write back page tbl : merge err\n"); return NAND_OP_FALSE; } if (PAGE_MAP_CACHE->ZoneNum != 0xff) { /*move merge*/ TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage + 1; TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum; } else return NAND_OP_TRUE; } rewrite: //PRINT("-------------------write back page tbl for blk %x\n",TableBlk); /*write page map table*/ MEMSET((void *)&UserData,0xff,sizeof(struct __NandUserData_t) * 2); UserData[0].PageStatus = 0xaa; MEMSET(LML_PROCESS_TBL_BUF,0xff,SECTOR_CNT_OF_SUPER_PAGE * SECTOR_SIZE); if(PAGE_CNT_OF_SUPER_BLK >= 512) { __u32 page; for(page = 0; page < PAGE_CNT_OF_SUPER_BLK; page++) *((__u16 *)LML_PROCESS_TBL_BUF + page) = PAGE_MAP_TBL[page].PhyPageNum; ((__u32 *)LML_PROCESS_TBL_BUF)[511] = \ _GetTblCheckSum((__u32 *)LML_PROCESS_TBL_BUF, PAGE_CNT_OF_SUPER_BLK*2/(sizeof (__u32))); } else { MEMCPY(LML_PROCESS_TBL_BUF, PAGE_MAP_TBL,PAGE_CNT_OF_SUPER_BLK*sizeof(struct __PageMapTblItem_t)); ((__u32 *)LML_PROCESS_TBL_BUF)[511] = \ _GetTblCheckSum((__u32 *)LML_PROCESS_TBL_BUF, PAGE_CNT_OF_SUPER_BLK*sizeof(struct __PageMapTblItem_t)/(sizeof (__u32))); } param.MDataPtr = LML_PROCESS_TBL_BUF; param.SDataPtr = (void *)&UserData; param.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE; //rewrite: LML_CalculatePhyOpPar(¶m, CUR_MAP_ZONE, TableBlk, TablePage); LML_VirtualPageWrite(¶m); if (NAND_OP_TRUE != PHY_SynchBank(param.BankNum, SYNC_CHIP_MODE)) { BadBlk.PhyBlkNum = TableBlk; if (NAND_OP_TRUE != LML_BadBlkManage(&BadBlk,CUR_MAP_ZONE,TablePage,&NewBlk)) { MAPPING_ERR("write page map table : bad block mange err after write\n"); return NAND_OP_FALSE; } TableBlk = NewBlk.PhyBlkNum; LOG_BLK_TBL[nLogBlkPst].PhyBlk = NewBlk; goto rewrite; } LOG_BLK_TBL[nLogBlkPst].LastUsedPage = TablePage; PAGE_MAP_CACHE->ZoneNum = 0xff; PAGE_MAP_CACHE->LogBlkPst = 0xff; return NAND_OP_TRUE; }
static __s32 _read_page_map_tbl(__u32 nLogBlkPst) { __s32 ret; __u16 TablePage; __u32 TableBlk, checksum; __u16 logicpagenum; __u8 status; struct __NandUserData_t UserData[2]; struct __PhysicOpPara_t param; /*check page poisition, merge if no free page*/ TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage; TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum; if (TablePage == 0xffff) { /*log block is empty*/ MEMSET(PAGE_MAP_TBL, 0xff,PAGE_CNT_OF_SUPER_BLK*sizeof(struct __PageMapTblItem_t) ); return NAND_OP_TRUE; } /*read page map table*/ param.MDataPtr = LML_PROCESS_TBL_BUF; param.SDataPtr = (void *)&UserData; param.SectBitmap = 0xf; LML_CalculatePhyOpPar(¶m, CUR_MAP_ZONE, TableBlk, TablePage); ret = LML_VirtualPageRead(¶m); if(PAGE_CNT_OF_SUPER_BLK >= 512) { checksum = _GetTblCheckSum((__u32 *)LML_PROCESS_TBL_BUF, \ PAGE_CNT_OF_SUPER_BLK*2/sizeof(__u32)); } else { checksum = _GetTblCheckSum((__u32 *)LML_PROCESS_TBL_BUF, \ PAGE_CNT_OF_SUPER_BLK*sizeof(struct __PageMapTblItem_t)/sizeof(__u32)); } status = UserData[0].PageStatus; logicpagenum = UserData[0].LogicPageNum; if((ret < 0) || (status != 0xaa) || (logicpagenum != 0xffff) || (checksum != ((__u32 *)LML_PROCESS_TBL_BUF)[511])) { if(NAND_OP_TRUE != _rebuild_page_map_tbl(nLogBlkPst)) { MAPPING_ERR("rebuild page map table err\n"); return NAND_OP_FALSE; } } else { if(PAGE_CNT_OF_SUPER_BLK >= 512) { __u32 page; for(page = 0; page < PAGE_CNT_OF_SUPER_BLK; page++) PAGE_MAP_TBL[page].PhyPageNum = *((__u16 *)LML_PROCESS_TBL_BUF + page); } else MEMCPY(PAGE_MAP_TBL,LML_PROCESS_TBL_BUF, PAGE_CNT_OF_SUPER_BLK*sizeof(struct __PageMapTblItem_t)); } return NAND_OP_TRUE; }
static __s32 _write_back_page_map_tbl(__u32 nLogBlkPst) { __u16 TablePage; __u32 TableBlk; struct __NandUserData_t UserData[2]; struct __PhysicOpPara_t param, tmpPage0; struct __SuperPhyBlkType_t BadBlk,NewBlk; __s32 result; /*check page poisition, merge if no free page*/ if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[nLogBlkPst].LogBlkType == LSB_TYPE)) { TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage + 1; TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum; DBUG_MSG("[DBUG] _write_back_page_map_tbl, log block: %x, bak log block %x\n", LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum, LOG_BLK_TBL[nLogBlkPst].PhyBlk1.PhyBlkNum); DBUG_MSG("[DBUG] _write_back_page_map_tbl, select bak log block\n"); TablePage = PMM_CalNextLogPage(TablePage); if((TablePage >= PAGE_CNT_OF_SUPER_BLK)&&(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex == 0)) { DBUG_MSG("[DBUG] _write_back_page_map_tbl, change to log block 1, phyblock1: %x\n", LOG_BLK_TBL[nLogBlkPst].PhyBlk1.PhyBlkNum); LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex = 1; TablePage = TablePage - PAGE_CNT_OF_SUPER_BLK; } if(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex == 1) TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk1.PhyBlkNum; else TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum; if (TablePage >= PAGE_CNT_OF_SUPER_BLK){ //DBUG_INF("[DBUG] _write_back_page_map_tbl, log block full, need merge\n"); /*block id full,need merge*/ if (LML_MergeLogBlk(SPECIAL_MERGE_MODE,LOG_BLK_TBL[nLogBlkPst].LogicBlkNum)){ MAPPING_ERR("write back page tbl : merge err\n"); return NAND_OP_FALSE; } DBUG_MSG("[DBUG] _write_back_page_map_tbl, log block merge end\n"); if (PAGE_MAP_CACHE->ZoneNum != 0xff){ /*move merge*/ TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage + 1; TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum; TablePage = PMM_CalNextLogPage(TablePage); //DBUG_INF("[DBUG] _write_back_page_map_tbl, after move merge, table block: %x, table page %x\n", TableBlk, TablePage); } else return NAND_OP_TRUE; } } else { TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage + 1; TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum; if (TablePage == PAGE_CNT_OF_SUPER_BLK){ /*block id full,need merge*/ if (LML_MergeLogBlk(SPECIAL_MERGE_MODE,LOG_BLK_TBL[nLogBlkPst].LogicBlkNum)){ MAPPING_ERR("write back page tbl : merge err\n"); return NAND_OP_FALSE; } if (PAGE_MAP_CACHE->ZoneNum != 0xff){ /*move merge*/ TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage + 1; TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum; } else return NAND_OP_TRUE; } } rewrite: //PRINT("-------------------write back page tbl for blk %x\n",TableBlk); /*write page map table*/ if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[nLogBlkPst].LogBlkType == LSB_TYPE)) { if((TablePage== 0)&&(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex == 1)) { MEMSET((void *)(&UserData[0]),0xff,sizeof(struct __NandUserData_t) * 2); //log page is the page0 of the logblk1, should copy page0 of logblock0, and skip the page LML_CalculatePhyOpPar(&tmpPage0, CUR_MAP_ZONE, LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum, 0); tmpPage0.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE; tmpPage0.MDataPtr = LML_TEMP_BUF; tmpPage0.SDataPtr = (void *)UserData; result = LML_VirtualPageRead(&tmpPage0); if(result < 0) { LOGICCTL_ERR("[LOGICCTL_ERR] Get log age of data block failed when write logical page, Err:0x%x!\n", result); return -ERR_PHYSIC; } //log page is the page0 of the logblk1, should skip the page UserData[0].LogType = LSB_TYPE|(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex<<4); UserData[1].LogType = LSB_TYPE|(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex<<4); LML_CalculatePhyOpPar(&tmpPage0, CUR_MAP_ZONE, LOG_BLK_TBL[nLogBlkPst].PhyBlk1.PhyBlkNum, 0); tmpPage0.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE; tmpPage0.MDataPtr = LML_TEMP_BUF; tmpPage0.SDataPtr = (void *)UserData; result = LML_VirtualPageWrite(&tmpPage0); TablePage++; } } MEMSET((void *)(&UserData[0]),0xff,sizeof(struct __NandUserData_t) * 2); UserData[0].PageStatus = 0xaa; if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[nLogBlkPst].LogBlkType == LSB_TYPE)) { UserData[0].LogType = LSB_TYPE|(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex<<4); UserData[1].LogType = LSB_TYPE|(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex<<4); } else { UserData[0].LogType = 0xff; UserData[1].LogType = 0xff; } //if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[nLogBlkPst].LogBlkType == LSB_TYPE)&&(TablePage== 0)) //{ // DBUG_INF("[DBUG] _write_back_page_map_tbl in page0, TablePage: %x, TableBlk: %x\n", TablePage, TableBlk); // DBUG_INF("[DBUG] _write_back_page_map_tbl in page0, logicNum: %x, log0: %x, log1: %x\n", LOG_BLK_TBL[nLogBlkPst].LogicBlkNum,LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum, LOG_BLK_TBL[nLogBlkPst].PhyBlk1.PhyBlkNum); // DBUG_INF("[DBUG] _write_back_page_map_tbl in page0, logicinfo: %x, logicpage: %x\n", UserData[0].LogicInfo, UserData[0].LogicPageNum); // DBUG_INF("[DBUG] _write_back_page_map_tbl in page0, logtype: %x, pagestatus: %x\n", UserData[0].LogType, UserData[0].PageStatus); //} MEMSET(LML_PROCESS_TBL_BUF,0xff,SECTOR_CNT_OF_SUPER_PAGE * SECTOR_SIZE); if(PAGE_CNT_OF_SUPER_BLK >= 512) { __u32 page; for(page = 0; page < PAGE_CNT_OF_SUPER_BLK; page++) *((__u16 *)LML_PROCESS_TBL_BUF + page) = PAGE_MAP_TBL[page].PhyPageNum; ((__u32 *)LML_PROCESS_TBL_BUF)[511] = \ _GetTblCheckSum((__u32 *)LML_PROCESS_TBL_BUF, PAGE_CNT_OF_SUPER_BLK*2/(sizeof (__u32))); } else { MEMCPY(LML_PROCESS_TBL_BUF, PAGE_MAP_TBL,PAGE_CNT_OF_SUPER_BLK*sizeof(struct __PageMapTblItem_t)); ((__u32 *)LML_PROCESS_TBL_BUF)[511] = \ _GetTblCheckSum((__u32 *)LML_PROCESS_TBL_BUF, PAGE_CNT_OF_SUPER_BLK*sizeof(struct __PageMapTblItem_t)/(sizeof (__u32))); } param.MDataPtr = LML_PROCESS_TBL_BUF; param.SDataPtr = (void *)&UserData; param.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE; //rewrite: LML_CalculatePhyOpPar(¶m, CUR_MAP_ZONE, TableBlk, TablePage); LML_VirtualPageWrite(¶m); if (NAND_OP_TRUE != PHY_SynchBank(param.BankNum, SYNC_CHIP_MODE)){ BadBlk.PhyBlkNum = TableBlk; if (NAND_OP_TRUE != LML_BadBlkManage(&BadBlk,CUR_MAP_ZONE,TablePage,&NewBlk)){ MAPPING_ERR("write page map table : bad block mange err after write\n"); return NAND_OP_FALSE; } TableBlk = NewBlk.PhyBlkNum; LOG_BLK_TBL[nLogBlkPst].PhyBlk = NewBlk; goto rewrite; } LOG_BLK_TBL[nLogBlkPst].LastUsedPage = TablePage; PAGE_MAP_CACHE->ZoneNum = 0xff; PAGE_MAP_CACHE->LogBlkPst = 0xff; if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[nLogBlkPst].LogBlkType == LSB_TYPE)) DBUG_MSG("[DBUG] _write_back_page_map_tbl end, lastusedpage: %x, write_index: %x\n", LOG_BLK_TBL[nLogBlkPst].LastUsedPage, LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex); return NAND_OP_TRUE; }