/* ************************************************************************************************************************ * RESTORE VALID PAGE DATA FROM BAD BLOCK * *Description: Restore the valid page data from the bad block. * *Arguments : pBadBlk the pointer to the bad physical block parameter; * nErrPage the number of the error page; * pNewBlk the pointer to the new valid block parameter. * *Return : restore page data result; * = 0 restore data successful; * = -1 restore data failed. ************************************************************************************************************************ */ static __s32 _RestorePageData(struct __SuperPhyBlkType_t *pBadBlk, __u32 nZoneNum, __u32 nErrPage, struct __SuperPhyBlkType_t *pNewBlk) { __s32 i, result; struct __PhysicOpPara_t tmpSrcPage, tmpDstPage; //set sector bitmap and buffer pointer for copy nand flash page tmpSrcPage.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE; tmpDstPage.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE; tmpSrcPage.MDataPtr = NULL; tmpSrcPage.SDataPtr = NULL; for(i=0; i<nErrPage; i++) { //calculate source page and destination page parameter for copy nand page LML_CalculatePhyOpPar(&tmpSrcPage, nZoneNum, pBadBlk->PhyBlkNum, i); LML_CalculatePhyOpPar(&tmpDstPage, nZoneNum, pNewBlk->PhyBlkNum, i); PHY_PageCopyback(&tmpSrcPage, &tmpDstPage); //check page copy result result = PHY_SynchBank(tmpDstPage.BankNum, SYNC_CHIP_MODE); if(result < 0) { LOGICCTL_DBG("[LOGICCTL_DBG] Copy page failed when restore bad block data!\n"); return -1; } } return 0; }
static __s32 _write_dirty_flag(__u8 nZone) { __s32 TablePage; __u32 TableBlk; struct __PhysicOpPara_t param; struct __NandUserData_t UserData[2]; /*set table block number and table page number*/ TableBlk = NandDriverInfo.ZoneTblPstInfo[nZone].PhyBlkNum; TablePage = NandDriverInfo.ZoneTblPstInfo[nZone].TablePst; TablePage += 3; MEMSET((void *)&UserData,0xff,sizeof(struct __NandUserData_t) * 2); UserData[0].PageStatus = 0x55; MEMSET(LML_PROCESS_TBL_BUF,0x55,512); param.MDataPtr = LML_PROCESS_TBL_BUF; param.SDataPtr = (void *)&UserData; LML_CalculatePhyOpPar(¶m, nZone, TableBlk, TablePage); LML_VirtualPageWrite(¶m); PHY_SynchBank(param.BankNum, SYNC_CHIP_MODE); return NAND_OP_TRUE; }
/* ************************************************************************************************************************ * ANALYZE NAND FLASH STORAGE SYSTEM * *Description: Analyze nand flash storage system, generate the nand flash physical * architecture parameter and connect information. * *Arguments : none * *Return : analyze result; * = 0 analyze successful; * < 0 analyze failed, can't recognize or some other error. ************************************************************************************************************************ */ __s32 BOOT_AnalyzeNandSystem(void) { __s32 result; __u32 i; __u8 tmpChipID[8]; boot_nand_para_t nand_info; if( BOOT_NandGetPara( &nand_info ) != 0 ){ return -1; } _InitNandPhyInfo(&nand_info); //reset the nand flash chip on boot chip select result = PHY_ResetChip(BOOT_CHIP_SELECT_NUM); result |= PHY_SynchBank(BOOT_CHIP_SELECT_NUM, SYNC_CHIP_MODE); if(result) return -1; //read nand flash chip ID from boot chip result = PHY_ReadNandId(BOOT_CHIP_SELECT_NUM, tmpChipID); if(result) return -1; //check nand ID result = _CheckNandID(tmpChipID); if(result) return -1; /*configure page size*/ { NFC_INIT_INFO nfc_info; nfc_info.bus_width = 0x0; nfc_info.ce_ctl = 0x0; nfc_info.ce_ctl1 = 0x0; nfc_info.debug = 0x0; nfc_info.pagesize = SECTOR_CNT_OF_SINGLE_PAGE; nfc_info.rb_sel = 1; nfc_info.serial_access_mode = 1; nfc_info.ddr_type = DDR_TYPE; NFC_ChangMode(&nfc_info); NandIndex = 0; } PHY_ChangeMode(1); if(SUPPORT_READ_RETRY&&(((READ_RETRY_TYPE>>16)&0xff) <0x10)) //boot0 only support hynix readretry { SCAN_DBG("NFC Read Retry Init. \n"); NFC_ReadRetryInit(READ_RETRY_TYPE); PHY_GetDefaultParam(0); } else {
/* ************************************************************************************************************************ * ANALYZE NAND FLASH STORAGE SYSTEM * *Description: Analyze nand flash storage system, generate the nand flash physical * architecture parameter and connect information. * *Arguments : none * *Return : analyze result; * = 0 analyze successful; * < 0 analyze failed, can't recognize or some other error. ************************************************************************************************************************ */ __s32 SCN_AnalyzeNandSystem(void) { __s32 i,result; __u8 tmpChipID[8]; __u8 uniqueID[32]; struct __NandPhyInfoPar_t tmpNandPhyInfo; //init nand flash storage information to default value NandStorageInfo.ChipCnt = 1; NandStorageInfo.ChipConnectInfo = 1; NandStorageInfo.RbConnectMode= 1; NandStorageInfo.RbCnt= 1; NandStorageInfo.RbConnectInfo= 1; NandStorageInfo.BankCntPerChip = 1; NandStorageInfo.DieCntPerChip = 1; NandStorageInfo.PlaneCntPerDie = 1; NandStorageInfo.SectorCntPerPage = 4; NandStorageInfo.PageCntPerPhyBlk = 64; NandStorageInfo.BlkCntPerDie = 1024; NandStorageInfo.OperationOpt = 0; NandStorageInfo.FrequencePar = 10; NandStorageInfo.EccMode = 0; NandStorageInfo.ReadRetryType= 0; //reset the nand flash chip on boot chip select result = PHY_ResetChip(BOOT_CHIP_SELECT_NUM); result |= PHY_SynchBank(BOOT_CHIP_SELECT_NUM, SYNC_CHIP_MODE); if(result) { SCAN_ERR("[SCAN_ERR] Reset boot nand flash chip failed!\n"); return -1; } //read nand flash chip ID from boot chip result = PHY_ReadNandId(BOOT_CHIP_SELECT_NUM, tmpChipID); if(result) { SCAN_ERR("[SCAN_ERR] Read chip ID from boot chip failed!\n"); return -1; } SCAN_DBG("[SCAN_DBG] Nand flash chip id is:0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", tmpChipID[0],tmpChipID[1],tmpChipID[2],tmpChipID[3], tmpChipID[4],tmpChipID[5]); //search the nand flash physical architecture parameter by nand ID result = _SearchNandArchi(tmpChipID, &tmpNandPhyInfo); if(result) { SCAN_ERR("[SCAN_ERR] search nand physical architecture parameter failed!\n"); return -1; } //set the nand flash physical architecture parameter NandStorageInfo.BankCntPerChip = tmpNandPhyInfo.DieCntPerChip; NandStorageInfo.DieCntPerChip = tmpNandPhyInfo.DieCntPerChip; NandStorageInfo.PlaneCntPerDie = 2; NandStorageInfo.SectorCntPerPage = tmpNandPhyInfo.SectCntPerPage; NandStorageInfo.PageCntPerPhyBlk = tmpNandPhyInfo.PageCntPerBlk; NandStorageInfo.BlkCntPerDie = tmpNandPhyInfo.BlkCntPerDie; NandStorageInfo.OperationOpt = tmpNandPhyInfo.OperationOpt; NandStorageInfo.FrequencePar = tmpNandPhyInfo.AccessFreq; NandStorageInfo.EccMode = tmpNandPhyInfo.EccMode; NandStorageInfo.NandChipId[0] = tmpNandPhyInfo.NandID[0]; NandStorageInfo.NandChipId[1] = tmpNandPhyInfo.NandID[1]; NandStorageInfo.NandChipId[2] = tmpNandPhyInfo.NandID[2]; NandStorageInfo.NandChipId[3] = tmpNandPhyInfo.NandID[3]; NandStorageInfo.NandChipId[4] = tmpNandPhyInfo.NandID[4]; NandStorageInfo.NandChipId[5] = tmpNandPhyInfo.NandID[5]; NandStorageInfo.NandChipId[6] = tmpNandPhyInfo.NandID[6]; NandStorageInfo.NandChipId[7] = tmpNandPhyInfo.NandID[7]; NandStorageInfo.ValidBlkRatio = tmpNandPhyInfo.ValidBlkRatio; NandStorageInfo.ReadRetryType = tmpNandPhyInfo.ReadRetryType; NandStorageInfo.DDRType = tmpNandPhyInfo.DDRType; //set the optional operation parameter NandStorageInfo.OptPhyOpPar.MultiPlaneReadCmd[0] = tmpNandPhyInfo.OptionOp->MultiPlaneReadCmd[0]; NandStorageInfo.OptPhyOpPar.MultiPlaneReadCmd[1] = tmpNandPhyInfo.OptionOp->MultiPlaneReadCmd[1]; NandStorageInfo.OptPhyOpPar.MultiPlaneWriteCmd[0] = tmpNandPhyInfo.OptionOp->MultiPlaneWriteCmd[0]; NandStorageInfo.OptPhyOpPar.MultiPlaneWriteCmd[1] = tmpNandPhyInfo.OptionOp->MultiPlaneWriteCmd[1]; NandStorageInfo.OptPhyOpPar.MultiPlaneCopyReadCmd[0] = tmpNandPhyInfo.OptionOp->MultiPlaneCopyReadCmd[0]; NandStorageInfo.OptPhyOpPar.MultiPlaneCopyReadCmd[1] = tmpNandPhyInfo.OptionOp->MultiPlaneCopyReadCmd[1]; NandStorageInfo.OptPhyOpPar.MultiPlaneCopyReadCmd[2] = tmpNandPhyInfo.OptionOp->MultiPlaneCopyReadCmd[2]; NandStorageInfo.OptPhyOpPar.MultiPlaneCopyWriteCmd[0] = tmpNandPhyInfo.OptionOp->MultiPlaneCopyWriteCmd[0]; NandStorageInfo.OptPhyOpPar.MultiPlaneCopyWriteCmd[1] = tmpNandPhyInfo.OptionOp->MultiPlaneCopyWriteCmd[1]; NandStorageInfo.OptPhyOpPar.MultiPlaneCopyWriteCmd[2] = tmpNandPhyInfo.OptionOp->MultiPlaneCopyWriteCmd[2]; NandStorageInfo.OptPhyOpPar.MultiPlaneStatusCmd = tmpNandPhyInfo.OptionOp->MultiPlaneStatusCmd; NandStorageInfo.OptPhyOpPar.InterBnk0StatusCmd = tmpNandPhyInfo.OptionOp->InterBnk0StatusCmd; NandStorageInfo.OptPhyOpPar.InterBnk1StatusCmd = tmpNandPhyInfo.OptionOp->InterBnk1StatusCmd; NandStorageInfo.OptPhyOpPar.BadBlockFlagPosition = tmpNandPhyInfo.OptionOp->BadBlockFlagPosition; NandStorageInfo.OptPhyOpPar.MultiPlaneBlockOffset = tmpNandPhyInfo.OptionOp->MultiPlaneBlockOffset; //set some configurable optional operation parameter if(!CFG_SUPPORT_MULTI_PLANE_PROGRAM) { NandStorageInfo.OperationOpt &= ~NAND_MULTI_READ; NandStorageInfo.OperationOpt &= ~NAND_MULTI_PROGRAM; } if(!CFG_SUPPORT_INT_INTERLEAVE) { NandStorageInfo.OperationOpt &= ~NAND_INT_INTERLEAVE; } if(!CFG_SUPPORT_RANDOM) { NandStorageInfo.OperationOpt &= ~NAND_RANDOM; } if(!CFG_SUPPORT_READ_RETRY) { NandStorageInfo.OperationOpt &= ~NAND_READ_RETRY; } if(!CFG_SUPPORT_ALIGN_NAND_BNK) { NandStorageInfo.OperationOpt |= NAND_PAGE_ADR_NO_SKIP; } //process the plane count of a die and the bank count of a chip if(!SUPPORT_MULTI_PROGRAM) { NandStorageInfo.PlaneCntPerDie = 1; } if(!SUPPORT_INT_INTERLEAVE) { NandStorageInfo.BankCntPerChip = 1; } //process the rb connect infomation for(i=1; i<MAX_CHIP_SELECT_CNT; i++) { //reset current nand flash chip PHY_ResetChip((__u32)i); //read the nand chip ID from current nand flash chip PHY_ReadNandId((__u32)i, tmpChipID); //check if the nand flash id same as the boot chip if((tmpChipID[0] == NandStorageInfo.NandChipId[0]) && (tmpChipID[1] == NandStorageInfo.NandChipId[1]) && (tmpChipID[2] == NandStorageInfo.NandChipId[2]) && (tmpChipID[3] == NandStorageInfo.NandChipId[3]) && ((tmpChipID[4] == NandStorageInfo.NandChipId[4])||(NandStorageInfo.NandChipId[4]==0xff)) && ((tmpChipID[5] == NandStorageInfo.NandChipId[5])||(NandStorageInfo.NandChipId[5]==0xff))) { NandStorageInfo.ChipCnt++; NandStorageInfo.ChipConnectInfo |= (1<<i); } } //process the rb connect infomation { NandStorageInfo.RbConnectMode = 0xff; if((NandStorageInfo.ChipCnt == 1) && (NandStorageInfo.ChipConnectInfo & (1<<0))) { NandStorageInfo.RbConnectMode =1; } else if(NandStorageInfo.ChipCnt == 2) { if((NandStorageInfo.ChipConnectInfo & (1<<0)) && (NandStorageInfo.ChipConnectInfo & (1<<1))) NandStorageInfo.RbConnectMode =2; else if((NandStorageInfo.ChipConnectInfo & (1<<0)) && (NandStorageInfo.ChipConnectInfo & (1<<2))) NandStorageInfo.RbConnectMode =3; else if((NandStorageInfo.ChipConnectInfo & (1<<0)) && (NandStorageInfo.ChipConnectInfo & (1<<7))) NandStorageInfo.RbConnectMode =0; //special use, only one rb } else if(NandStorageInfo.ChipCnt == 4) { if((NandStorageInfo.ChipConnectInfo & (1<<0)) && (NandStorageInfo.ChipConnectInfo & (1<<1)) && (NandStorageInfo.ChipConnectInfo & (1<<2)) && (NandStorageInfo.ChipConnectInfo & (1<<3)) ) NandStorageInfo.RbConnectMode =4; else if((NandStorageInfo.ChipConnectInfo & (1<<0)) && (NandStorageInfo.ChipConnectInfo & (1<<2)) && (NandStorageInfo.ChipConnectInfo & (1<<4)) && (NandStorageInfo.ChipConnectInfo & (1<<6)) ) NandStorageInfo.RbConnectMode =5; } else if(NandStorageInfo.ChipCnt == 8) { NandStorageInfo.RbConnectMode =8; } if( NandStorageInfo.RbConnectMode == 0xff) { SCAN_ERR("%s : check nand rb connect fail, ChipCnt = %x, ChipConnectInfo = %x \n",__FUNCTION__, NandStorageInfo.ChipCnt, NandStorageInfo.ChipConnectInfo); return -1; } } //process the external inter-leave operation if(CFG_SUPPORT_EXT_INTERLEAVE) { if(NandStorageInfo.ChipCnt > 1) { NandStorageInfo.OperationOpt |= NAND_EXT_INTERLEAVE; } } else { NandStorageInfo.OperationOpt &= ~NAND_EXT_INTERLEAVE; } if(SUPPORT_READ_UNIQUE_ID) { for(i=0; i<NandStorageInfo.ChipCnt; i++) { PHY_ReadNandUniqueId(i, uniqueID); } } /*configure page size*/ { NFC_INIT_INFO nand_info; nand_info.bus_width = 0x0; nand_info.ce_ctl = 0x0; nand_info.ce_ctl1 = 0x0; nand_info.debug = 0x0; nand_info.pagesize = SECTOR_CNT_OF_SINGLE_PAGE; nand_info.rb_sel = 1; nand_info.serial_access_mode = 1; nand_info.ddr_type = DDR_TYPE; NFC_ChangMode(&nand_info); } PHY_ChangeMode(1); if(SUPPORT_READ_RETRY) { PHY_DBG("NFC Read Retry Init. \n"); NFC_ReadRetryInit(READ_RETRY_TYPE); for(i=0; i<NandStorageInfo.ChipCnt;i++) { PHY_GetDefaultParam(i); } } //print nand flash physical architecture parameter SCAN_DBG("\n\n"); SCAN_DBG("[SCAN_DBG] ==============Nand Architecture Parameter==============\n"); SCAN_DBG("[SCAN_DBG] Nand Chip ID: 0x%x 0x%x\n", (NandStorageInfo.NandChipId[0] << 0) | (NandStorageInfo.NandChipId[1] << 8) | (NandStorageInfo.NandChipId[2] << 16) | (NandStorageInfo.NandChipId[3] << 24), (NandStorageInfo.NandChipId[4] << 0) | (NandStorageInfo.NandChipId[5] << 8) | (NandStorageInfo.NandChipId[6] << 16) | (NandStorageInfo.NandChipId[7] << 24)); SCAN_DBG("[SCAN_DBG] Nand Chip Count: 0x%x\n", NandStorageInfo.ChipCnt); SCAN_DBG("[SCAN_DBG] Nand Chip Connect: 0x%x\n", NandStorageInfo.ChipConnectInfo); SCAN_DBG("[SCAN_DBG] Nand Rb Connect Mode: 0x%x\n", NandStorageInfo.RbConnectMode); SCAN_DBG("[SCAN_DBG] Sector Count Of Page: 0x%x\n", NandStorageInfo.SectorCntPerPage); SCAN_DBG("[SCAN_DBG] Page Count Of Block: 0x%x\n", NandStorageInfo.PageCntPerPhyBlk); SCAN_DBG("[SCAN_DBG] Block Count Of Die: 0x%x\n", NandStorageInfo.BlkCntPerDie); SCAN_DBG("[SCAN_DBG] Plane Count Of Die: 0x%x\n", NandStorageInfo.PlaneCntPerDie); SCAN_DBG("[SCAN_DBG] Die Count Of Chip: 0x%x\n", NandStorageInfo.DieCntPerChip); SCAN_DBG("[SCAN_DBG] Bank Count Of Chip: 0x%x\n", NandStorageInfo.BankCntPerChip); SCAN_DBG("[SCAN_DBG] Optional Operation: 0x%x\n", NandStorageInfo.OperationOpt); SCAN_DBG("[SCAN_DBG] Access Frequence: 0x%x\n", NandStorageInfo.FrequencePar); SCAN_DBG("[SCAN_DBG] ECC Mode: 0x%x\n", NandStorageInfo.EccMode); SCAN_DBG("[SCAN_DBG] Read Retry Type: 0x%x\n", NandStorageInfo.ReadRetryType); SCAN_DBG("[SCAN_DBG] DDR Type: 0x%x\n", NandStorageInfo.DDRType); SCAN_DBG("[SCAN_DBG] =======================================================\n\n"); //print nand flash optional operation parameter SCAN_DBG("[SCAN_DBG] ==============Optional Operaion Parameter==============\n"); SCAN_DBG("[SCAN_DBG] MultiPlaneReadCmd: 0x%x, 0x%x\n", NandStorageInfo.OptPhyOpPar.MultiPlaneReadCmd[0],NandStorageInfo.OptPhyOpPar.MultiPlaneReadCmd[1]); SCAN_DBG("[SCAN_DBG] MultiPlaneWriteCmd: 0x%x, 0x%x\n", NandStorageInfo.OptPhyOpPar.MultiPlaneWriteCmd[0],NandStorageInfo.OptPhyOpPar.MultiPlaneWriteCmd[1]); SCAN_DBG("[SCAN_DBG] MultiPlaneCopyReadCmd: 0x%x, 0x%x, 0x%x\n", NandStorageInfo.OptPhyOpPar.MultiPlaneCopyReadCmd[0],NandStorageInfo.OptPhyOpPar.MultiPlaneCopyReadCmd[1], NandStorageInfo.OptPhyOpPar.MultiPlaneCopyReadCmd[2]); SCAN_DBG("[SCAN_DBG] MultiPlaneCopyWriteCmd: 0x%x, 0x%x, 0x%x\n", NandStorageInfo.OptPhyOpPar.MultiPlaneCopyWriteCmd[0], NandStorageInfo.OptPhyOpPar.MultiPlaneCopyWriteCmd[1], NandStorageInfo.OptPhyOpPar.MultiPlaneCopyWriteCmd[2]); SCAN_DBG("[SCAN_DBG] MultiPlaneStatusCmd: 0x%x\n", NandStorageInfo.OptPhyOpPar.MultiPlaneStatusCmd); SCAN_DBG("[SCAN_DBG] InterBnk0StatusCmd: 0x%x\n", NandStorageInfo.OptPhyOpPar.InterBnk0StatusCmd); SCAN_DBG("[SCAN_DBG] InterBnk1StatusCmd: 0x%x\n", NandStorageInfo.OptPhyOpPar.InterBnk1StatusCmd); SCAN_DBG("[SCAN_DBG] BadBlockFlagPosition: 0x%x\n", NandStorageInfo.OptPhyOpPar.BadBlockFlagPosition); SCAN_DBG("[SCAN_DBG] MultiPlaneBlockOffset: 0x%x\n", NandStorageInfo.OptPhyOpPar.MultiPlaneBlockOffset); SCAN_DBG("[SCAN_DBG] =======================================================\n"); return 0; }
/* ************************************************************************************************************************ * GET LOG PAGE FOR WRITE * *Description: Get a log page for write. * *Arguments : nBlk the logical block number of the log block; * nPage the number of the logical page, which page need log page; * pLogPage the pointer to the log page number, for return value; * pLogPst the pointer to the position of the log block in the log block table. * *Return : get log page result. * = 0 get log page for write successful; * =-1 get log page for write failed. ************************************************************************************************************************ */ static __s32 _GetLogPageForWrite(__u32 nBlk, __u32 nPage, __u16 *pLogPage, __u32 *pLogPst) { __s32 result, tmpLogPst; __u16 tmpPage, tempBank; struct __PhysicOpPara_t tmpPhyPage; struct __NandUserData_t tmpSpare[2]; tmpLogPst = _GetLogBlkPst(nBlk); if(tmpLogPst < 0) { //get log block position failed, there is no such log block, need create a new one result = _CreateNewLogBlk(nBlk, (__u32 *)&tmpLogPst); if(result < 0) { MAPPING_ERR("[MAPPING_ERR] Create new log block failed!\n"); return -1; } } //need swap the page mapping table to ram which is accessing currently result = PMM_SwitchMapTbl(tmpLogPst); if(result < 0) { MAPPING_ERR("[MAPPING_ERR] Switch page mapping table failed when get log page! Err:0x%x\n", result); return -1; } //need get log page by write mode, tmpPage = LOG_BLK_TBL[tmpLogPst].LastUsedPage; if(SUPPORT_ALIGN_NAND_BNK) { if(tmpPage == 0xffff) { //the log block is empty, need get log page in the first page line tmpPage = nPage % INTERLEAVE_BANK_CNT; } else { //need bank align, the log page and the data page should be in the same bank if((nPage % INTERLEAVE_BANK_CNT) > (tmpPage % INTERLEAVE_BANK_CNT)) { //get the log page in the same page line with last used page tmpPage = tmpPage + ((nPage % INTERLEAVE_BANK_CNT) - (tmpPage % INTERLEAVE_BANK_CNT)); } else { //need get the log page in the next page line of the last used page tmpPage = tmpPage + (nPage % INTERLEAVE_BANK_CNT) + (INTERLEAVE_BANK_CNT - (tmpPage % INTERLEAVE_BANK_CNT)); } } } else { //use the page which is the next of the last used page tmpPage = tmpPage + 1; } if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[tmpLogPst].LogBlkType == LSB_TYPE)) { DBUG_MSG("[DBUG_MSG] _GetLogPageForWrite, select bak log block\n"); if(SUPPORT_ALIGN_NAND_BNK) { tempBank = tmpPage%INTERLEAVE_BANK_CNT; tmpPage =PMM_CalNextLogPage(tmpPage); while(tmpPage%INTERLEAVE_BANK_CNT != tempBank) { tmpPage++; tmpPage =PMM_CalNextLogPage(tmpPage); if(tmpPage>=PAGE_CNT_OF_SUPER_BLK) break; } } else { tmpPage =PMM_CalNextLogPage(tmpPage); } if((tmpPage >= PAGE_CNT_OF_SUPER_BLK)&&(LOG_BLK_TBL[tmpLogPst].WriteBlkIndex == 0)) { LOG_BLK_TBL[tmpLogPst].WriteBlkIndex = 1; tmpPage = tmpPage - PAGE_CNT_OF_SUPER_BLK; } if(LOG_BLK_TBL[tmpLogPst].WriteBlkIndex == 1) DBUG_MSG("[DBUG_MSG] _GetLogPageForWrite, log block index: %x, log block num: %x, page: %x \n", LOG_BLK_TBL[tmpLogPst].WriteBlkIndex, LOG_BLK_TBL[tmpLogPst].PhyBlk1.PhyBlkNum, tmpPage); else DBUG_MSG("[DBUG_MSG] _GetLogPageForWrite, log block index: %x, log block num: %x, page: %x \n", LOG_BLK_TBL[tmpLogPst].WriteBlkIndex, LOG_BLK_TBL[tmpLogPst].PhyBlk.PhyBlkNum, tmpPage); } __CHECK_WRITE_LOGICAL_INFO_OF_LOG_BLOCK: //check if need write the logical information in the first page of the log block if((LOG_BLK_TBL[tmpLogPst].LastUsedPage == 0xffff) && (tmpPage != 0)) { //get logical information from the data block LML_CalculatePhyOpPar(&tmpPhyPage, CUR_MAP_ZONE, DATA_BLK_TBL[nBlk].PhyBlkNum, 0); tmpPhyPage.SectBitmap = 0x03; tmpPhyPage.MDataPtr = LML_TEMP_BUF; tmpPhyPage.SDataPtr = (void *)tmpSpare; LML_VirtualPageRead(&tmpPhyPage); //if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[tmpLogPst].LogBlkType == LSB_TYPE)) //{ // PRINT("_GetLogPageForWrite log %x page 0, data age: %x, log age: %x\n", LOG_BLK_TBL[tmpLogPst].WriteBlkIndex, tmpSpare[0].PageStatus, tmpSpare[0].PageStatus+1); //} tmpSpare[0].BadBlkFlag = 0xff; tmpSpare[1].BadBlkFlag = 0xff; tmpSpare[0].LogicInfo = ((CUR_MAP_ZONE % ZONE_CNT_OF_DIE)<<10) | nBlk; tmpSpare[1].LogicInfo = ((CUR_MAP_ZONE % ZONE_CNT_OF_DIE)<<10) | nBlk; tmpSpare[0].LogicPageNum = 0xffff; tmpSpare[1].LogicPageNum = 0xffff; tmpSpare[0].PageStatus = tmpSpare[0].PageStatus + 1; tmpSpare[1].PageStatus = tmpSpare[0].PageStatus; if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[tmpLogPst].LogBlkType == LSB_TYPE)) { tmpSpare[0].LogType = LSB_TYPE|(LOG_BLK_TBL[tmpLogPst].WriteBlkIndex<<4); tmpSpare[1].LogType = LSB_TYPE|(LOG_BLK_TBL[tmpLogPst].WriteBlkIndex<<4); } else { tmpSpare[0].LogType = 0xff; tmpSpare[1].LogType = 0xff; } //write the logical information to the spare area of the data block if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[tmpLogPst].LogBlkType == LSB_TYPE)) { DBUG_MSG("[DBUG] _GetLogPageForWrite, write the logical information to log page 0, writeblkindex: %x\n", LOG_BLK_TBL[tmpLogPst].WriteBlkIndex); LML_CalculatePhyOpPar(&tmpPhyPage, CUR_MAP_ZONE, LOG_BLK_TBL[tmpLogPst].PhyBlk.PhyBlkNum, 0); } else LML_CalculatePhyOpPar(&tmpPhyPage, CUR_MAP_ZONE, LOG_BLK_TBL[tmpLogPst].PhyBlk.PhyBlkNum, 0); tmpPhyPage.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE; result = LML_VirtualPageWrite(&tmpPhyPage); if(result < 0) { LOGICCTL_ERR("[MAPPING_ERR] Physical write module failed when write logical information, Err:0x%x!\n", result); return -1; } result = PHY_SynchBank(tmpPhyPage.BankNum, SYNC_CHIP_MODE); if(result < 0) { //the last write operation on current bank is failed, the block is bad, need proccess it LOGICCTL_DBG("[LOGICCTL_DBG] Find a bad block when write logical page! bank:0x%x, block:0x%x, page:0x%x\n", tmpPhyPage.BankNum, tmpPhyPage.BlkNum, tmpPhyPage.PageNum); //process the bad block result = LML_BadBlkManage(&LOG_BLK_TBL[tmpLogPst].PhyBlk, CUR_MAP_ZONE, 0, &LOG_BLK_TBL[tmpLogPst].PhyBlk); if(result < 0) { LOGICCTL_ERR("[MAPPING_ERR] Bad block process failed when get log page for write, Err:0x%x!\n", result); return -1; } goto __CHECK_WRITE_LOGICAL_INFO_OF_LOG_BLOCK; } } //set the log page number for return *pLogPage = tmpPage; *pLogPst = tmpLogPst; return 0; }
/* ************************************************************************************************************************ * CREATE A NEW LOG BLOCK * *Description: Create a new log block. * *Arguments : nBlk the logical block number of the log block; * pLogPst the pointer to the log block position in the log block table. * *Return : create new log block result. * = 0 create new log block successful; * =-1 create new log block failed. ************************************************************************************************************************ */ static __s32 _CreateNewLogBlk(__u32 nBlk, __u32 *pLogPst) { __s32 i, result, LogBlkType,tmpPst=-1; __u16 tmpLogAccessAge = 0xffff; struct __SuperPhyBlkType_t tmpFreeBlk, tmpFreeBlk1; struct __PhysicOpPara_t tmpPhyPage; struct __NandUserData_t tmpSpare[2]; #if CFG_SUPPORT_WEAR_LEVELLING //check if need do wear-levelling if(BLK_ERASE_CNTER >= WEAR_LEVELLING_FREQUENCY) { LML_WearLevelling(); } #endif //try to search an empty item in the log block table for(i=0; i<LOG_BLK_CNT_OF_ZONE; i++) { if(LOG_BLK_TBL[i].LogicBlkNum == 0xffff) { //find a empty item tmpPst = i; break; } } //there is no empty item in the log block table, need merge a log block if(tmpPst == -1) { //check if there is some full log block for(i=0; i<LOG_BLK_CNT_OF_ZONE; i++) { if(LOG_BLK_TBL[i].LastUsedPage == PAGE_CNT_OF_SUPER_BLK-1) { tmpPst = i; break; } } if(tmpPst == -1) { //there is no full log block, look for an oldest log block to merge for(i=0; i<LOG_BLK_CNT_OF_ZONE; i++) { if(LOG_ACCESS_AGE[i] < tmpLogAccessAge) { tmpLogAccessAge = LOG_ACCESS_AGE[i]; tmpPst = i; } } } //switch the page mapping table for merge the log block result = PMM_SwitchMapTbl(tmpPst); if(result < 0) { MAPPING_ERR("[MAPPING_ERR] Switch page mapping table failed when create new log block! Err:0x%x\n", result); return -1; } //merge the log block with normal type, to make an empty item result = LML_MergeLogBlk(NORMAL_MERGE_MODE, LOG_BLK_TBL[tmpPst].LogicBlkNum); if(result < 0) { //merge log block failed, report error MAPPING_ERR("[MAPPING_ERR] Merge log block failed when create new log block! Err:0x%x\n", result); return -1; } } LogBlkType = BMM_CalLogBlkType(nBlk); if((SUPPORT_LOG_BLOCK_MANAGE)&&(LogBlkType == LSB_TYPE)) { DBUG_MSG("[DBUG_MSG] _CreateNewLogBlk, select bak log block\n"); //get a free block to create a new log block result = BMM_GetFreeBlk(LOWEST_EC_TYPE, &tmpFreeBlk); if(result < 0) { MAPPING_ERR("[MAPPING_ERR] Get free block failed when create new log block!\n"); return -1; } //get a free block to create a new log block result = BMM_GetFreeBlk(LOWEST_EC_TYPE, &tmpFreeBlk1); if(result < 0) { MAPPING_ERR("[MAPPING_ERR] Get free block failed when create new log block!\n"); return -1; } //DBUG_INF("[DBUG] _CreateNewLogBlk, logic: %x, logblk0: %x, logblk1:%x \n", nBlk, tmpFreeBlk.PhyBlkNum, tmpFreeBlk1.PhyBlkNum); //make a new log item in the log block table LOG_BLK_TBL[tmpPst].LogicBlkNum = nBlk; LOG_BLK_TBL[tmpPst].LastUsedPage = 0xffff; LOG_BLK_TBL[tmpPst].LogBlkType = LogBlkType; LOG_BLK_TBL[tmpPst].WriteBlkIndex = 0; LOG_BLK_TBL[tmpPst].ReadBlkIndex = 0; LOG_BLK_TBL[tmpPst].PhyBlk = tmpFreeBlk; LOG_BLK_TBL[tmpPst].PhyBlk1 = tmpFreeBlk1; //set the return vaule of the log position *pLogPst = tmpPst; } else { //get a free block to create a new log block result = BMM_GetFreeBlk(LOWEST_EC_TYPE, &tmpFreeBlk); if(result < 0) { MAPPING_ERR("[MAPPING_ERR] Get free block failed when create new log block!\n"); return -1; } //make a new log item in the log block table LOG_BLK_TBL[tmpPst].LogicBlkNum = nBlk; LOG_BLK_TBL[tmpPst].LastUsedPage = 0xffff; LOG_BLK_TBL[tmpPst].LogBlkType = LogBlkType; LOG_BLK_TBL[tmpPst].WriteBlkIndex = 0; LOG_BLK_TBL[tmpPst].ReadBlkIndex = 0; LOG_BLK_TBL[tmpPst].PhyBlk = tmpFreeBlk; //set the return vaule of the log position *pLogPst = tmpPst; } __CHECK_LOGICAL_INFO_OF_DATA_BLOCK: //check if the data block is an empty block, if so, need update the logic information in the spare area LML_CalculatePhyOpPar(&tmpPhyPage, CUR_MAP_ZONE, DATA_BLK_TBL[nBlk].PhyBlkNum, 0); tmpPhyPage.SectBitmap = 0x03; tmpPhyPage.MDataPtr = LML_TEMP_BUF; tmpPhyPage.SDataPtr = (void *)tmpSpare; LML_VirtualPageRead(&tmpPhyPage); if(tmpSpare[0].LogicInfo == 0xffff) { tmpSpare[0].BadBlkFlag = 0xff; tmpSpare[1].BadBlkFlag = 0xff; tmpSpare[0].LogicInfo = ((CUR_MAP_ZONE % ZONE_CNT_OF_DIE)<<10) | nBlk; tmpSpare[1].LogicInfo = ((CUR_MAP_ZONE % ZONE_CNT_OF_DIE)<<10) | nBlk; tmpSpare[0].LogicPageNum = 0xffff; tmpSpare[1].LogicPageNum = 0xffff; tmpSpare[0].PageStatus = 0xff; tmpSpare[1].PageStatus = 0xff; //write the logical information to the spare area of the data block tmpPhyPage.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE; result = LML_VirtualPageWrite(&tmpPhyPage); if(result < 0) { LOGICCTL_ERR("[MAPPING_ERR] Physical write module failed when write logical information, Err:0x%x!\n", result); return -1; } result = PHY_SynchBank(tmpPhyPage.BankNum, SYNC_CHIP_MODE); if(result < 0) { //the last write operation on current bank is failed, the block is bad, need proccess it LOGICCTL_DBG("[LOGICCTL_DBG] Find a bad block when write logical page! bank:0x%x, block:0x%x, page:0x%x\n", tmpPhyPage.BankNum, tmpPhyPage.BlkNum, tmpPhyPage.PageNum); //process the bad block result = LML_BadBlkManage(&DATA_BLK_TBL[nBlk], CUR_MAP_ZONE, 0, &tmpFreeBlk); if(result < 0) { LOGICCTL_ERR("[MAPPING_ERR] Bad block process failed when create new log block, Err:0x%x!\n", result); return -1; } DATA_BLK_TBL[nBlk] = tmpFreeBlk; goto __CHECK_LOGICAL_INFO_OF_DATA_BLOCK; } } return 0; }
/* ************************************************************************************************************************ * NAND FLASH LOGIC MANAGE LAYER WEAR-LEVELLING * *Description: Equate the erase cycles among all physical blocks. * *Arguments : none * *Return : do wear-levelling result; * = 0 do wear-levelling successful; * = -1 do wear-levelling failed. * *Notes : The erase cycle of a physical block is limited, if the erase cycle overun this * limit, the physical block may be invalid. so a policy is needed to equate the * millions of erase cycles to ervery physical block. ************************************************************************************************************************ */ __s32 LML_WearLevelling(void) { #if CFG_SUPPORT_WEAR_LEVELLING __s32 i, result; __u32 tmpLogicBlk; __u16 tmpLowEc = 0xffff; struct __SuperPhyBlkType_t tmpFreeBlk, tmpDataBlk; struct __NandUserData_t tmpSpare[2]; struct __PhysicOpPara_t tmpSrcPage, tmpDstPage; BLK_ERASE_CNTER = 0; //scan the data block table, to look for a physical block with lowest erase count for(i=DATA_BLK_CNT_OF_ZONE-1; i>=0; i--) { if(DATA_BLK_TBL[i].BlkEraseCnt < tmpLowEc) { tmpLowEc = DATA_BLK_TBL[i].BlkEraseCnt; tmpLogicBlk = i; } } //get a free block which has the highest erase count result = BMM_GetFreeBlk(HIGHEST_EC_TYPE, &tmpFreeBlk); if(result < 0) { LOGICCTL_ERR("[LOGICCTL_ERR] Get free block failed when do wear-levelling!\n"); return -1; } //clear the block erase counter BLK_ERASE_CNTER = 0; if(tmpLowEc >= tmpFreeBlk.BlkEraseCnt) { if(tmpLowEc == 0xffff) { //the lowest erase count reach the highest value, clear erase count of all physical block for(i=0; i<DATA_BLK_CNT_OF_ZONE; i++) { //clear the erase count for the data block DATA_BLK_TBL[i].BlkEraseCnt = 0x00; } for(i=0; i<FREE_BLK_CNT_OF_ZONE; i++) { //clear the erase count for the free block if(FREE_BLK_TBL[i].PhyBlkNum != 0xffff) { FREE_BLK_TBL[i].BlkEraseCnt = 0x00; } } for(i=0; i<MAX_LOG_BLK_CNT; i++) { //clear the erase count for the log block if(LOG_BLK_TBL[i].LogicBlkNum != 0xffff) { LOG_BLK_TBL[i].PhyBlk.BlkEraseCnt = 0x00; } } } BMM_SetFreeBlk(&tmpFreeBlk); return 0; } BMM_GetDataBlk(tmpLogicBlk, &tmpDataBlk); result = BMM_GetLogBlk(tmpLogicBlk, NULL); if(result < 0) { //check if the data block is empty LML_CalculatePhyOpPar(&tmpSrcPage, CUR_MAP_ZONE, tmpDataBlk.PhyBlkNum, 0); tmpSrcPage.SectBitmap = 0x03; tmpSrcPage.MDataPtr = LML_TEMP_BUF; tmpSrcPage.SDataPtr = (void *)tmpSpare; LML_VirtualPageRead(&tmpSrcPage); if(tmpSpare[0].LogicInfo != 0xffff) { //need copy data from the data block to the free block tmpSrcPage.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE; tmpSrcPage.MDataPtr = NULL; tmpSrcPage.SDataPtr = NULL; tmpDstPage.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE; tmpDstPage.MDataPtr = NULL; tmpDstPage.SDataPtr = NULL; for(i=0; i<PAGE_CNT_OF_SUPER_BLK; i++) { LML_CalculatePhyOpPar(&tmpSrcPage, CUR_MAP_ZONE, tmpDataBlk.PhyBlkNum, i); LML_CalculatePhyOpPar(&tmpDstPage, CUR_MAP_ZONE, tmpFreeBlk.PhyBlkNum, i); PHY_PageCopyback(&tmpSrcPage, &tmpDstPage); //check page copy result result = PHY_SynchBank(tmpDstPage.BankNum, SYNC_CHIP_MODE); if(result < 0) { LOGICCTL_DBG("[LOGICCTL_DBG] Copy page failed when doing wear-levelling!\n"); result = LML_BadBlkManage(&tmpFreeBlk, CUR_MAP_ZONE, 0, NULL); if(result < 0) { LOGICCTL_ERR("[LOGICCTL_ERR] Bad block manage failed when doing wear-levelling!\n"); return -1; } return 0; } } } //set the data block item by the free block BMM_SetDataBlk(tmpLogicBlk, &tmpFreeBlk); if(tmpSpare[0].LogicInfo != 0xffff) { //erase the data block to a new free block result = LML_VirtualBlkErase(CUR_MAP_ZONE, tmpDataBlk.PhyBlkNum); if(result < 0) { LOGICCTL_DBG("[LOGICCTL_DBG] Erase super block failed when doing wear-levelling!\n"); result = LML_BadBlkManage(&tmpDataBlk, CUR_MAP_ZONE, 0, NULL); if(result < 0) { LOGICCTL_ERR("[LOGICCTL_ERR] Bad block manage failed when doing wear-levelling!\n"); return -1; } return 0; } } //set the the data block to free block table tmpDataBlk.BlkEraseCnt++; BMM_SetFreeBlk(&tmpDataBlk); } else { //set the free block back to free table BMM_SetFreeBlk(&tmpFreeBlk); } #endif return 0; }
/*write block map table to flash*/ static __s32 _write_back_block_map_tbl(__u8 nZone) { __s32 TablePage; __u32 TableBlk; struct __NandUserData_t UserData[2]; struct __PhysicOpPara_t param; struct __SuperPhyBlkType_t BadBlk,NewBlk; /*write back all page map table within this zone*/ if (NAND_OP_TRUE != _write_back_all_page_map_tbl(nZone)) { MAPPING_ERR("write back all page map tbl err\n"); return NAND_OP_FALSE; } /*set table block number and table page number*/ TableBlk = NandDriverInfo.ZoneTblPstInfo[nZone].PhyBlkNum; TablePage = NandDriverInfo.ZoneTblPstInfo[nZone].TablePst; if(TablePage >= PAGE_CNT_OF_SUPER_BLK - 4) { if(NAND_OP_TRUE != LML_VirtualBlkErase(nZone, TableBlk)) { BadBlk.PhyBlkNum = TableBlk; if(NAND_OP_TRUE != LML_BadBlkManage(&BadBlk,CUR_MAP_ZONE,0,&NewBlk)) { MAPPING_ERR("write back block tbl : bad block manage err erase data block\n"); return NAND_OP_FALSE; } TableBlk = NewBlk.PhyBlkNum; } TablePage = -4; } TablePage += 4; //calculate checksum for data block table and free block table ((__u32 *)DATA_BLK_TBL)[1023] = \ _GetTblCheckSum((__u32 *)DATA_BLK_TBL, (DATA_BLK_CNT_OF_ZONE + FREE_BLK_CNT_OF_ZONE)); //clear full page data MEMSET(LML_PROCESS_TBL_BUF, 0xff, SECTOR_CNT_OF_SUPER_PAGE * SECTOR_SIZE); rewrite: /*write back data block and free block map table*/ MEMSET((void *)&UserData,0xff,sizeof(struct __NandUserData_t) * 2); MEMCPY(LML_PROCESS_TBL_BUF,DATA_BLK_TBL,2048); /*write page 0, need set spare info*/ if (TablePage == 0) { UserData[0].LogicInfo = (1<<14) | ((nZone % ZONE_CNT_OF_DIE) << 10) | 0xaa ; } UserData[0].PageStatus = 0x55; param.MDataPtr = LML_PROCESS_TBL_BUF; param.SDataPtr = (void *)&UserData; param.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE; LML_CalculatePhyOpPar(¶m, nZone, TableBlk, TablePage); LML_VirtualPageWrite(¶m); if (NAND_OP_TRUE != PHY_SynchBank(param.BankNum, SYNC_CHIP_MODE)) { BadBlk.PhyBlkNum = TableBlk; if (NAND_OP_TRUE != LML_BadBlkManage(&BadBlk,nZone,0,&NewBlk)) { MAPPING_ERR("write blk map table : bad block mange err after write\n"); return NAND_OP_FALSE; } TableBlk = NewBlk.PhyBlkNum; TablePage = 0; goto rewrite; } MEMCPY(LML_PROCESS_TBL_BUF, &DATA_BLK_TBL[512], 2048); TablePage ++; param.MDataPtr = LML_PROCESS_TBL_BUF; MEMSET((void *)&UserData,0xff,sizeof(struct __NandUserData_t) * 2); UserData[0].PageStatus = 0x55; LML_CalculatePhyOpPar(¶m, nZone, TableBlk, TablePage); LML_VirtualPageWrite(¶m); if(NAND_OP_TRUE != PHY_SynchBank(param.BankNum, SYNC_CHIP_MODE)) { BadBlk.PhyBlkNum = TableBlk; if(NAND_OP_TRUE != LML_BadBlkManage(&BadBlk,nZone,0,&NewBlk)) { MAPPING_ERR("write blk map table : bad block mange err after write\n"); return NAND_OP_FALSE; } TableBlk = NewBlk.PhyBlkNum; TablePage = 0; goto rewrite; } /*write back log block map table*/ TablePage++; MEMSET(LML_PROCESS_TBL_BUF, 0xff, SECTOR_CNT_OF_SUPER_PAGE * SECTOR_SIZE); MEMCPY(LML_PROCESS_TBL_BUF,LOG_BLK_TBL,LOG_BLK_CNT_OF_ZONE*sizeof(struct __LogBlkType_t)); /*cal checksum*/ ((__u32 *)LML_PROCESS_TBL_BUF)[511] = \ _GetTblCheckSum((__u32 *)LML_PROCESS_TBL_BUF, LOG_BLK_CNT_OF_ZONE*sizeof(struct __LogBlkType_t)/sizeof(__u32)); LML_CalculatePhyOpPar(¶m, nZone, TableBlk, TablePage); LML_VirtualPageWrite(¶m); if(NAND_OP_TRUE != PHY_SynchBank(param.BankNum, SYNC_CHIP_MODE)) { BadBlk.PhyBlkNum = TableBlk; if(NAND_OP_TRUE != LML_BadBlkManage(&BadBlk,nZone,0,&NewBlk)) { MAPPING_ERR("write blk map table : bad block mange err after write\n"); return NAND_OP_FALSE; } TableBlk = NewBlk.PhyBlkNum; TablePage = 0; goto rewrite; } /*reset zone info*/ NandDriverInfo.ZoneTblPstInfo[nZone].PhyBlkNum = TableBlk; NandDriverInfo.ZoneTblPstInfo[nZone].TablePst = TablePage - 2; return NAND_OP_TRUE; }
static __s32 _write_back_page_map_tbl(__u32 nLogBlkPst) { __u16 TablePage; __u32 TableBlk; struct __NandUserData_t UserData[2]; struct __PhysicOpPara_t param; struct __SuperPhyBlkType_t BadBlk,NewBlk; /*check page poisition, merge if no free page*/ TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage + 1; TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum; if (TablePage == PAGE_CNT_OF_SUPER_BLK) { /*block id full,need merge*/ if (LML_MergeLogBlk(SPECIAL_MERGE_MODE,LOG_BLK_TBL[nLogBlkPst].LogicBlkNum)) { MAPPING_ERR("write back page tbl : merge err\n"); return NAND_OP_FALSE; } if (PAGE_MAP_CACHE->ZoneNum != 0xff) { /*move merge*/ TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage + 1; TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum; } else return NAND_OP_TRUE; } rewrite: //PRINT("-------------------write back page tbl for blk %x\n",TableBlk); /*write page map table*/ MEMSET((void *)&UserData,0xff,sizeof(struct __NandUserData_t) * 2); UserData[0].PageStatus = 0xaa; MEMSET(LML_PROCESS_TBL_BUF,0xff,SECTOR_CNT_OF_SUPER_PAGE * SECTOR_SIZE); if(PAGE_CNT_OF_SUPER_BLK >= 512) { __u32 page; for(page = 0; page < PAGE_CNT_OF_SUPER_BLK; page++) *((__u16 *)LML_PROCESS_TBL_BUF + page) = PAGE_MAP_TBL[page].PhyPageNum; ((__u32 *)LML_PROCESS_TBL_BUF)[511] = \ _GetTblCheckSum((__u32 *)LML_PROCESS_TBL_BUF, PAGE_CNT_OF_SUPER_BLK*2/(sizeof (__u32))); } else { MEMCPY(LML_PROCESS_TBL_BUF, PAGE_MAP_TBL,PAGE_CNT_OF_SUPER_BLK*sizeof(struct __PageMapTblItem_t)); ((__u32 *)LML_PROCESS_TBL_BUF)[511] = \ _GetTblCheckSum((__u32 *)LML_PROCESS_TBL_BUF, PAGE_CNT_OF_SUPER_BLK*sizeof(struct __PageMapTblItem_t)/(sizeof (__u32))); } param.MDataPtr = LML_PROCESS_TBL_BUF; param.SDataPtr = (void *)&UserData; param.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE; //rewrite: LML_CalculatePhyOpPar(¶m, CUR_MAP_ZONE, TableBlk, TablePage); LML_VirtualPageWrite(¶m); if (NAND_OP_TRUE != PHY_SynchBank(param.BankNum, SYNC_CHIP_MODE)) { BadBlk.PhyBlkNum = TableBlk; if (NAND_OP_TRUE != LML_BadBlkManage(&BadBlk,CUR_MAP_ZONE,TablePage,&NewBlk)) { MAPPING_ERR("write page map table : bad block mange err after write\n"); return NAND_OP_FALSE; } TableBlk = NewBlk.PhyBlkNum; LOG_BLK_TBL[nLogBlkPst].PhyBlk = NewBlk; goto rewrite; } LOG_BLK_TBL[nLogBlkPst].LastUsedPage = TablePage; PAGE_MAP_CACHE->ZoneNum = 0xff; PAGE_MAP_CACHE->LogBlkPst = 0xff; return NAND_OP_TRUE; }
/*! * * \par Description: * This function copy valuable data from log block or dat block to free block, change free to data ,change * data and log to free. * * \param [in] LogNum,serial number within log block space * \return sucess or failed. * \note this function was called when log block is not suit for swap or move. **/ __s32 _free2data_simple_merge(__u32 nlogical) { __u8 InData; __u16 SuperPage; __u16 SrcPage,DstPage; __u32 SrcBlk,DstBlk; struct __SuperPhyBlkType_t DataBlk; struct __SuperPhyBlkType_t FreeBlk; struct __LogBlkType_t LogBlk; struct __PhysicOpPara_t SrcParam,DstParam; /*init block info*/ BMM_GetDataBlk(nlogical,&DataBlk); BMM_GetLogBlk(nlogical,&LogBlk); if (NAND_OP_TRUE != BMM_GetFreeBlk(LOWEST_EC_TYPE, &FreeBlk)) return NAND_OP_FALSE; /*copy data from data block or log block to free block*/ if((SUPPORT_LOG_BLOCK_MANAGE)&&(LogBlk.LogBlkType == LSB_TYPE)) { //DBUG_INF("[DBUG] nand lsb type simple merge block %x\n", nlogical); for (SuperPage = 0; SuperPage < PAGE_CNT_OF_LOGIC_BLK; SuperPage++) { /*set source address and destination address*/ DstPage = SuperPage; DstBlk = FreeBlk.PhyBlkNum; SrcPage = PMM_GetCurMapPage(SuperPage); InData = (SrcPage == 0xffff)?1 : 0; if(InData) { SrcBlk = DataBlk.PhyBlkNum; } else { if(SrcPage&(0x1<<15)) SrcBlk = LogBlk.PhyBlk1.PhyBlkNum; else SrcBlk = LogBlk.PhyBlk.PhyBlkNum; } SrcPage = InData?SuperPage:(SrcPage&0x7fff); LML_CalculatePhyOpPar(&SrcParam, CUR_MAP_ZONE,SrcBlk, SrcPage); LML_CalculatePhyOpPar(&DstParam, CUR_MAP_ZONE,DstBlk, DstPage); if (DstPage == 0) { __u8 SeqPlus; //SeqPlus = InData?1:0; SeqPlus = InData?2:1; if(NAND_OP_FALSE == _copy_page0(SrcBlk, SrcPage, DstBlk,SeqPlus)) { LOGICCTL_ERR("simple_merge : copy page 0 err\n"); return NAND_OP_FALSE; } } else { if(NAND_OP_TRUE != PHY_PageCopyback(&SrcParam,&DstParam)) { LOGICCTL_ERR("simple merge : copy back err\n"); return NAND_OP_FALSE; } } if(NAND_OP_TRUE != PHY_SynchBank(DstParam.BankNum, SYNC_BANK_MODE)) { struct __SuperPhyBlkType_t SubBlk; if(NAND_OP_TRUE != LML_BadBlkManage(&FreeBlk,CUR_MAP_ZONE,DstPage, &SubBlk)) { LOGICCTL_ERR("simgple merge : bad block manage err after copy back\n"); return NAND_OP_FALSE; } FreeBlk = SubBlk; SuperPage -= 1; } } /*move free block to data block*/ BMM_SetDataBlk(nlogical, &FreeBlk); /*move erased data block to free block*/ if ( NAND_OP_TRUE != LML_VirtualBlkErase(CUR_MAP_ZONE, DataBlk.PhyBlkNum)){ if (NAND_OP_TRUE != LML_BadBlkManage(&DataBlk,CUR_MAP_ZONE,0,NULL)){ LOGICCTL_ERR("swap merge : bad block manage err erase data block\n"); return NAND_OP_FALSE; } } /*move erased data block to free block*/ if (DataBlk.BlkEraseCnt < 0xffff) DataBlk.BlkEraseCnt ++; BMM_SetFreeBlk(&DataBlk); //DBUG_INF("[DBUG] logic %x simple merge: erase data block: %x\n", LogBlk.LogicBlkNum, DataBlk.PhyBlkNum); /*move erased log block to free block*/ if ( NAND_OP_TRUE != LML_VirtualBlkErase(CUR_MAP_ZONE, LogBlk.PhyBlk.PhyBlkNum)){ if (NAND_OP_TRUE != LML_BadBlkManage(&LogBlk.PhyBlk,CUR_MAP_ZONE,0,NULL)){ LOGICCTL_ERR("simple merge : bad block manage err after erase log block\n"); return NAND_OP_FALSE; } } if (LogBlk.PhyBlk.BlkEraseCnt < 0xffff) LogBlk.PhyBlk.BlkEraseCnt ++; BMM_SetFreeBlk(&LogBlk.PhyBlk); //DBUG_INF("[DBUG] logic %x simple merge: erase log block 0: %x\n", LogBlk.LogicBlkNum, LogBlk.PhyBlk.PhyBlkNum); /*move erased log block to free block*/ if ( NAND_OP_TRUE != LML_VirtualBlkErase(CUR_MAP_ZONE, LogBlk.PhyBlk1.PhyBlkNum)){ if (NAND_OP_TRUE != LML_BadBlkManage(&LogBlk.PhyBlk1,CUR_MAP_ZONE,0,NULL)){ LOGICCTL_ERR("simple merge : bad block manage err after erase log block\n"); return NAND_OP_FALSE; } } if (LogBlk.PhyBlk1.BlkEraseCnt < 0xffff) LogBlk.PhyBlk1.BlkEraseCnt ++; BMM_SetFreeBlk(&LogBlk.PhyBlk1); //DBUG_INF("[DBUG] logic %x simple merge: erase log block 1: %x\n", LogBlk.LogicBlkNum, LogBlk.PhyBlk1.PhyBlkNum); MEMSET(&LogBlk, 0xff, sizeof(struct __LogBlkType_t)); LogBlk.LogBlkType = 0; LogBlk.WriteBlkIndex = 0; LogBlk.ReadBlkIndex = 0; BMM_SetLogBlk(nlogical, &LogBlk); /*clear page map table*/ PMM_ClearCurMapTbl(); } else { for (SuperPage = 0; SuperPage < PAGE_CNT_OF_LOGIC_BLK; SuperPage++) { /*set source address and destination address*/ DstPage = SuperPage; DstBlk = FreeBlk.PhyBlkNum; SrcPage = PMM_GetCurMapPage(SuperPage); InData = (SrcPage == 0xffff)?1 : 0; SrcBlk = InData?DataBlk.PhyBlkNum : LogBlk.PhyBlk.PhyBlkNum; SrcPage = InData?SuperPage:SrcPage; LML_CalculatePhyOpPar(&SrcParam, CUR_MAP_ZONE,SrcBlk, SrcPage); LML_CalculatePhyOpPar(&DstParam, CUR_MAP_ZONE,DstBlk, DstPage); if (DstPage == 0) { __u8 SeqPlus; //SeqPlus = InData?1:0; SeqPlus = InData?2:1; if(NAND_OP_FALSE == _copy_page0(SrcBlk, SrcPage, DstBlk,SeqPlus)) { LOGICCTL_ERR("simple_merge : copy page 0 err\n"); return NAND_OP_FALSE; } } else { if(NAND_OP_TRUE != PHY_PageCopyback(&SrcParam,&DstParam)) { LOGICCTL_ERR("simple merge : copy back err\n"); return NAND_OP_FALSE; } } if(NAND_OP_TRUE != PHY_SynchBank(DstParam.BankNum, SYNC_BANK_MODE)) { struct __SuperPhyBlkType_t SubBlk; if(NAND_OP_TRUE != LML_BadBlkManage(&FreeBlk,CUR_MAP_ZONE,DstPage, &SubBlk)) { LOGICCTL_ERR("simgple merge : bad block manage err after copy back\n"); return NAND_OP_FALSE; } FreeBlk = SubBlk; SuperPage -= 1; } } /*move free block to data block*/ BMM_SetDataBlk(nlogical, &FreeBlk); /*move erased data block to free block*/ if ( NAND_OP_TRUE != LML_VirtualBlkErase(CUR_MAP_ZONE, DataBlk.PhyBlkNum)){ if (NAND_OP_TRUE != LML_BadBlkManage(&DataBlk,CUR_MAP_ZONE,0,NULL)){ LOGICCTL_ERR("swap merge : bad block manage err erase data block\n"); return NAND_OP_FALSE; } } /*move erased data block to free block*/ if (DataBlk.BlkEraseCnt < 0xffff) DataBlk.BlkEraseCnt ++; BMM_SetFreeBlk(&DataBlk); /*move erased log block to free block*/ if ( NAND_OP_TRUE != LML_VirtualBlkErase(CUR_MAP_ZONE, LogBlk.PhyBlk.PhyBlkNum)){ if (NAND_OP_TRUE != LML_BadBlkManage(&LogBlk.PhyBlk,CUR_MAP_ZONE,0,NULL)){ LOGICCTL_ERR("move merge : bad block manage err after erase log block\n"); return NAND_OP_FALSE; } } if (LogBlk.PhyBlk.BlkEraseCnt < 0xffff) LogBlk.PhyBlk.BlkEraseCnt ++; BMM_SetFreeBlk(&LogBlk.PhyBlk); MEMSET(&LogBlk, 0xff, sizeof(struct __LogBlkType_t)); LogBlk.LogBlkType = 0; LogBlk.WriteBlkIndex = 0; LogBlk.ReadBlkIndex = 0; BMM_SetLogBlk(nlogical, &LogBlk); /*clear page map table*/ PMM_ClearCurMapTbl(); } return NAND_OP_TRUE; }
/*! * * \par Description: * This function move valuable data from log block to free block,then replace them. * * \param [in] LogNum,serial number within log block space * \return sucess or failed. * \note this function was called when log block is full, and valid pages is less than half of one block. **/ __s32 _free2log_move_merge(__u32 nlogical) { __u8 bank; __u16 LastUsedPage,SuperPage; __u16 SrcPage,DstPage, SrcBlock, DstBlock; struct __SuperPhyBlkType_t FreeBlk,FreeBlk1; struct __LogBlkType_t LogBlk; struct __PhysicOpPara_t SrcParam,DstParam; struct __NandUserData_t UserData[2]; /*init info of log block , and get one free block */ BMM_GetLogBlk(nlogical, &LogBlk); if (NAND_OP_TRUE != BMM_GetFreeBlk(LOWEST_EC_TYPE, &FreeBlk)) return NAND_OP_FALSE; if((SUPPORT_LOG_BLOCK_MANAGE)&&(LogBlk.LogBlkType == LSB_TYPE)) { if (NAND_OP_TRUE != BMM_GetFreeBlk(LOWEST_EC_TYPE, &FreeBlk1)) return NAND_OP_FALSE; //DBUG_INF("[DBUG] lsb move merge, new log0: %x, new log1: %x\n", FreeBlk.PhyBlkNum, FreeBlk1.PhyBlkNum); } SrcParam.MDataPtr = DstParam.MDataPtr = NULL; SrcParam.SDataPtr = DstParam.SDataPtr = NULL; SrcParam.SectBitmap = DstParam.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE; if(SUPPORT_ALIGN_NAND_BNK) { redo: /*copy data bank by bank, for copy-back using*/ LastUsedPage = 0; for (bank = 0; bank < INTERLEAVE_BANK_CNT; bank++) { DstPage = bank; for (SuperPage = bank; SuperPage < PAGE_CNT_OF_SUPER_BLK; SuperPage+= INTERLEAVE_BANK_CNT) { SrcPage = PMM_GetCurMapPage(SuperPage); if (SrcPage != 0xffff) { /*set source and destinate address*/ if((SUPPORT_LOG_BLOCK_MANAGE)&&(LogBlk.LogBlkType == LSB_TYPE)) { DBUG_MSG("[DBUG_MSG] _free2log_move_merge 2, select bak log block\n"); DstPage = PMM_CalNextLogPage(DstPage); while((DstPage%INTERLEAVE_BANK_CNT)!=bank) { DstPage++; DstPage = PMM_CalNextLogPage(DstPage); if(DstPage>=PAGE_CNT_OF_SUPER_BLK) break; } if(DstPage >= PAGE_CNT_OF_SUPER_BLK) { LOGICCTL_ERR("move merge : dst page cal error\n"); return NAND_OP_FALSE; } if(SrcPage&(0x1<<15)) SrcBlock = LogBlk.PhyBlk1.PhyBlkNum; else SrcBlock = LogBlk.PhyBlk.PhyBlkNum; DstBlock = FreeBlk.PhyBlkNum; SrcPage &= (~(0x1<<15)); } else { SrcBlock = LogBlk.PhyBlk.PhyBlkNum; DstBlock = FreeBlk.PhyBlkNum; } LML_CalculatePhyOpPar(&SrcParam,CUR_MAP_ZONE, SrcBlock, SrcPage); LML_CalculatePhyOpPar(&DstParam,CUR_MAP_ZONE, DstBlock, DstPage); if (DstPage == 0) { if ( NAND_OP_FALSE == _copy_page0(SrcBlock,SrcPage,FreeBlk.PhyBlkNum,0)) { LOGICCTL_ERR("move merge : copy page 0 err1\n"); return NAND_OP_FALSE; } } else { if (NAND_OP_TRUE != PHY_PageCopyback(&SrcParam,&DstParam)) { LOGICCTL_ERR("move merge : copy back err\n"); return NAND_OP_FALSE; } } if (NAND_OP_TRUE != PHY_SynchBank(DstParam.BankNum, SYNC_BANK_MODE)) { struct __SuperPhyBlkType_t SubBlk; if (NAND_OP_TRUE != LML_BadBlkManage(&FreeBlk,CUR_MAP_ZONE,0,&SubBlk)) { LOGICCTL_ERR("move merge : bad block manage err after copy back\n"); return NAND_OP_FALSE; } FreeBlk = SubBlk; goto redo; } PMM_SetCurMapPage(SuperPage,DstPage); DstPage += INTERLEAVE_BANK_CNT; } } /*if bank 0 is empty, need write mange info in page 0*/ if ((bank == 0) && (DstPage == 0)) { if ( NAND_OP_FALSE == _copy_page0(LogBlk.PhyBlk.PhyBlkNum,0,FreeBlk.PhyBlkNum,0)) { LOGICCTL_ERR("move merge : copy page 0 err2\n"); return NAND_OP_FALSE; } LML_CalculatePhyOpPar(&DstParam, CUR_MAP_ZONE, FreeBlk.PhyBlkNum, 0); if (NAND_OP_TRUE != PHY_SynchBank(DstParam.BankNum, SYNC_BANK_MODE)) { struct __SuperPhyBlkType_t SubBlk; if (NAND_OP_TRUE != LML_BadBlkManage(&FreeBlk,CUR_MAP_ZONE,0,&SubBlk)) { LOGICCTL_ERR("move merge : bad block manage err after copy back\n"); return NAND_OP_FALSE; } FreeBlk = SubBlk; goto redo; } } /*reset LastUsedPage*/ if ((DstPage - INTERLEAVE_BANK_CNT) > LastUsedPage) { LastUsedPage = DstPage - INTERLEAVE_BANK_CNT; } } } else { /*copy data page by page*/ DstPage = 0; LastUsedPage = 0; for (SuperPage = 0; SuperPage < PAGE_CNT_OF_LOGIC_BLK; SuperPage++) { SrcPage = PMM_GetCurMapPage(SuperPage); if (SrcPage != 0xffff) { /*set source and destinate address*/ if((SUPPORT_LOG_BLOCK_MANAGE)&&(LogBlk.LogBlkType == LSB_TYPE)) { DBUG_MSG("[DBUG_MSG] _free2log_move_merge 3, select bak log block\n"); DstPage = PMM_CalNextLogPage(DstPage); if(DstPage >= PAGE_CNT_OF_SUPER_BLK) { LOGICCTL_ERR("move merge : dst page cal error\n"); return NAND_OP_FALSE; } if(SrcPage&(0x1<<15)) SrcBlock = LogBlk.PhyBlk1.PhyBlkNum; else SrcBlock = LogBlk.PhyBlk.PhyBlkNum; DstBlock = FreeBlk.PhyBlkNum; SrcPage &= 0x7fff; } else { SrcBlock = LogBlk.PhyBlk.PhyBlkNum; DstBlock = FreeBlk.PhyBlkNum; } LML_CalculatePhyOpPar(&SrcParam,CUR_MAP_ZONE, SrcBlock, SrcPage); LML_CalculatePhyOpPar(&DstParam,CUR_MAP_ZONE, DstBlock, DstPage); if (0 == DstPage) { if ( NAND_OP_FALSE == _copy_page0(SrcBlock,SrcPage,FreeBlk.PhyBlkNum,0)) { LOGICCTL_ERR("move merge : copy page 0 err1\n"); return NAND_OP_FALSE; } } else { SrcParam.MDataPtr = DstParam.MDataPtr = LML_TEMP_BUF; SrcParam.SDataPtr = DstParam.SDataPtr = (void *)&UserData; MEMSET((void *)&UserData,0xff,sizeof(struct __NandUserData_t) * 2); SrcParam.SectBitmap = DstParam.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE; if (LML_VirtualPageRead(&SrcParam) < 0){ LOGICCTL_ERR("move merge : read main data err\n"); return NAND_OP_FALSE; } if (NAND_OP_TRUE != LML_VirtualPageWrite(&DstParam)){ LOGICCTL_ERR("move merge : write err\n"); return NAND_OP_FALSE; } } if (NAND_OP_TRUE != PHY_SynchBank(DstParam.BankNum, SYNC_BANK_MODE)) { struct __SuperPhyBlkType_t SubBlk; if (NAND_OP_TRUE != LML_BadBlkManage(&FreeBlk,CUR_MAP_ZONE,LastUsedPage,&SubBlk)) { LOGICCTL_ERR("move merge : bad block manage err after copy back\n"); return NAND_OP_FALSE; } FreeBlk = SubBlk; SuperPage -= 1; } PMM_SetCurMapPage(SuperPage,DstPage); LastUsedPage = DstPage; DstPage++; } } } /*erase log block*/ if(NAND_OP_TRUE != LML_VirtualBlkErase(CUR_MAP_ZONE, LogBlk.PhyBlk.PhyBlkNum)) { if(NAND_OP_TRUE != LML_BadBlkManage(&LogBlk.PhyBlk,CUR_MAP_ZONE,0,NULL)) { LOGICCTL_ERR("move merge : bad block manage err after erase log block\n"); return NAND_OP_FALSE; } } /*move erased log block to free block*/ if(LogBlk.PhyBlk.BlkEraseCnt < 0xffff) { LogBlk.PhyBlk.BlkEraseCnt ++; } BMM_SetFreeBlk(&LogBlk.PhyBlk); if((SUPPORT_LOG_BLOCK_MANAGE)&&(LogBlk.LogBlkType == LSB_TYPE)) { //DBUG_INF("[DBUG] logic %x move merge: erase log block 0: %x\n", LogBlk.LogicBlkNum, LogBlk.PhyBlk.PhyBlkNum); if(NAND_OP_TRUE != LML_VirtualBlkErase(CUR_MAP_ZONE, LogBlk.PhyBlk1.PhyBlkNum)) { if(NAND_OP_TRUE != LML_BadBlkManage(&LogBlk.PhyBlk1,CUR_MAP_ZONE,0,NULL)) { LOGICCTL_ERR("move merge : bad block manage err after erase log block\n"); return NAND_OP_FALSE; } } /*move erased log block to free block*/ if(LogBlk.PhyBlk1.BlkEraseCnt < 0xffff) { LogBlk.PhyBlk1.BlkEraseCnt ++; } BMM_SetFreeBlk(&LogBlk.PhyBlk1); //DBUG_INF("[DBUG] logic %x move merge: erase log block 1: %x\n", LogBlk.LogicBlkNum, LogBlk.PhyBlk1.PhyBlkNum); } /*move free block to log block*/ LogBlk.PhyBlk.PhyBlkNum= FreeBlk.PhyBlkNum; LogBlk.PhyBlk.BlkEraseCnt= FreeBlk.BlkEraseCnt; if((SUPPORT_LOG_BLOCK_MANAGE)&&(LogBlk.LogBlkType == LSB_TYPE)) { DBUG_MSG("[DBUG_MSG] _free2log_move_merge 4, select bak log block\n"); LogBlk.PhyBlk1.PhyBlkNum= FreeBlk1.PhyBlkNum; LogBlk.PhyBlk1.BlkEraseCnt= FreeBlk1.BlkEraseCnt; LogBlk.WriteBlkIndex = 0; LogBlk.ReadBlkIndex = 0; DBUG_MSG("[DBUG] move merge to new log block, logic block: %x, logblock0: %x, logblock1: %x\n", LogBlk.LogicBlkNum, LogBlk.PhyBlk.PhyBlkNum, LogBlk.PhyBlk1.PhyBlkNum); } else { LogBlk.LogBlkType = 0; LogBlk.WriteBlkIndex = 0; LogBlk.ReadBlkIndex = 0; } LogBlk.LastUsedPage = LastUsedPage; BMM_SetLogBlk(nlogical, &LogBlk); //if((SUPPORT_LOG_BLOCK_MANAGE)&&(LogBlk.LogBlkType == LSB_TYPE)) // DBUG_INF("logic %x move merge, lastusedpage: %x\n", LogBlk.LogicBlkNum, LogBlk.LastUsedPage); return NAND_OP_TRUE; }
static __s32 _write_back_page_map_tbl(__u32 nLogBlkPst) { __u16 TablePage; __u32 TableBlk; struct __NandUserData_t UserData[2]; struct __PhysicOpPara_t param, tmpPage0; struct __SuperPhyBlkType_t BadBlk,NewBlk; __s32 result; /*check page poisition, merge if no free page*/ if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[nLogBlkPst].LogBlkType == LSB_TYPE)) { TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage + 1; TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum; DBUG_MSG("[DBUG] _write_back_page_map_tbl, log block: %x, bak log block %x\n", LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum, LOG_BLK_TBL[nLogBlkPst].PhyBlk1.PhyBlkNum); DBUG_MSG("[DBUG] _write_back_page_map_tbl, select bak log block\n"); TablePage = PMM_CalNextLogPage(TablePage); if((TablePage >= PAGE_CNT_OF_SUPER_BLK)&&(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex == 0)) { DBUG_MSG("[DBUG] _write_back_page_map_tbl, change to log block 1, phyblock1: %x\n", LOG_BLK_TBL[nLogBlkPst].PhyBlk1.PhyBlkNum); LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex = 1; TablePage = TablePage - PAGE_CNT_OF_SUPER_BLK; } if(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex == 1) TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk1.PhyBlkNum; else TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum; if (TablePage >= PAGE_CNT_OF_SUPER_BLK){ //DBUG_INF("[DBUG] _write_back_page_map_tbl, log block full, need merge\n"); /*block id full,need merge*/ if (LML_MergeLogBlk(SPECIAL_MERGE_MODE,LOG_BLK_TBL[nLogBlkPst].LogicBlkNum)){ MAPPING_ERR("write back page tbl : merge err\n"); return NAND_OP_FALSE; } DBUG_MSG("[DBUG] _write_back_page_map_tbl, log block merge end\n"); if (PAGE_MAP_CACHE->ZoneNum != 0xff){ /*move merge*/ TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage + 1; TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum; TablePage = PMM_CalNextLogPage(TablePage); //DBUG_INF("[DBUG] _write_back_page_map_tbl, after move merge, table block: %x, table page %x\n", TableBlk, TablePage); } else return NAND_OP_TRUE; } } else { TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage + 1; TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum; if (TablePage == PAGE_CNT_OF_SUPER_BLK){ /*block id full,need merge*/ if (LML_MergeLogBlk(SPECIAL_MERGE_MODE,LOG_BLK_TBL[nLogBlkPst].LogicBlkNum)){ MAPPING_ERR("write back page tbl : merge err\n"); return NAND_OP_FALSE; } if (PAGE_MAP_CACHE->ZoneNum != 0xff){ /*move merge*/ TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage + 1; TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum; } else return NAND_OP_TRUE; } } rewrite: //PRINT("-------------------write back page tbl for blk %x\n",TableBlk); /*write page map table*/ if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[nLogBlkPst].LogBlkType == LSB_TYPE)) { if((TablePage== 0)&&(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex == 1)) { MEMSET((void *)(&UserData[0]),0xff,sizeof(struct __NandUserData_t) * 2); //log page is the page0 of the logblk1, should copy page0 of logblock0, and skip the page LML_CalculatePhyOpPar(&tmpPage0, CUR_MAP_ZONE, LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum, 0); tmpPage0.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE; tmpPage0.MDataPtr = LML_TEMP_BUF; tmpPage0.SDataPtr = (void *)UserData; result = LML_VirtualPageRead(&tmpPage0); if(result < 0) { LOGICCTL_ERR("[LOGICCTL_ERR] Get log age of data block failed when write logical page, Err:0x%x!\n", result); return -ERR_PHYSIC; } //log page is the page0 of the logblk1, should skip the page UserData[0].LogType = LSB_TYPE|(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex<<4); UserData[1].LogType = LSB_TYPE|(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex<<4); LML_CalculatePhyOpPar(&tmpPage0, CUR_MAP_ZONE, LOG_BLK_TBL[nLogBlkPst].PhyBlk1.PhyBlkNum, 0); tmpPage0.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE; tmpPage0.MDataPtr = LML_TEMP_BUF; tmpPage0.SDataPtr = (void *)UserData; result = LML_VirtualPageWrite(&tmpPage0); TablePage++; } } MEMSET((void *)(&UserData[0]),0xff,sizeof(struct __NandUserData_t) * 2); UserData[0].PageStatus = 0xaa; if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[nLogBlkPst].LogBlkType == LSB_TYPE)) { UserData[0].LogType = LSB_TYPE|(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex<<4); UserData[1].LogType = LSB_TYPE|(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex<<4); } else { UserData[0].LogType = 0xff; UserData[1].LogType = 0xff; } //if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[nLogBlkPst].LogBlkType == LSB_TYPE)&&(TablePage== 0)) //{ // DBUG_INF("[DBUG] _write_back_page_map_tbl in page0, TablePage: %x, TableBlk: %x\n", TablePage, TableBlk); // DBUG_INF("[DBUG] _write_back_page_map_tbl in page0, logicNum: %x, log0: %x, log1: %x\n", LOG_BLK_TBL[nLogBlkPst].LogicBlkNum,LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum, LOG_BLK_TBL[nLogBlkPst].PhyBlk1.PhyBlkNum); // DBUG_INF("[DBUG] _write_back_page_map_tbl in page0, logicinfo: %x, logicpage: %x\n", UserData[0].LogicInfo, UserData[0].LogicPageNum); // DBUG_INF("[DBUG] _write_back_page_map_tbl in page0, logtype: %x, pagestatus: %x\n", UserData[0].LogType, UserData[0].PageStatus); //} MEMSET(LML_PROCESS_TBL_BUF,0xff,SECTOR_CNT_OF_SUPER_PAGE * SECTOR_SIZE); if(PAGE_CNT_OF_SUPER_BLK >= 512) { __u32 page; for(page = 0; page < PAGE_CNT_OF_SUPER_BLK; page++) *((__u16 *)LML_PROCESS_TBL_BUF + page) = PAGE_MAP_TBL[page].PhyPageNum; ((__u32 *)LML_PROCESS_TBL_BUF)[511] = \ _GetTblCheckSum((__u32 *)LML_PROCESS_TBL_BUF, PAGE_CNT_OF_SUPER_BLK*2/(sizeof (__u32))); } else { MEMCPY(LML_PROCESS_TBL_BUF, PAGE_MAP_TBL,PAGE_CNT_OF_SUPER_BLK*sizeof(struct __PageMapTblItem_t)); ((__u32 *)LML_PROCESS_TBL_BUF)[511] = \ _GetTblCheckSum((__u32 *)LML_PROCESS_TBL_BUF, PAGE_CNT_OF_SUPER_BLK*sizeof(struct __PageMapTblItem_t)/(sizeof (__u32))); } param.MDataPtr = LML_PROCESS_TBL_BUF; param.SDataPtr = (void *)&UserData; param.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE; //rewrite: LML_CalculatePhyOpPar(¶m, CUR_MAP_ZONE, TableBlk, TablePage); LML_VirtualPageWrite(¶m); if (NAND_OP_TRUE != PHY_SynchBank(param.BankNum, SYNC_CHIP_MODE)){ BadBlk.PhyBlkNum = TableBlk; if (NAND_OP_TRUE != LML_BadBlkManage(&BadBlk,CUR_MAP_ZONE,TablePage,&NewBlk)){ MAPPING_ERR("write page map table : bad block mange err after write\n"); return NAND_OP_FALSE; } TableBlk = NewBlk.PhyBlkNum; LOG_BLK_TBL[nLogBlkPst].PhyBlk = NewBlk; goto rewrite; } LOG_BLK_TBL[nLogBlkPst].LastUsedPage = TablePage; PAGE_MAP_CACHE->ZoneNum = 0xff; PAGE_MAP_CACHE->LogBlkPst = 0xff; if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[nLogBlkPst].LogBlkType == LSB_TYPE)) DBUG_MSG("[DBUG] _write_back_page_map_tbl end, lastusedpage: %x, write_index: %x\n", LOG_BLK_TBL[nLogBlkPst].LastUsedPage, LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex); return NAND_OP_TRUE; }