/*! * * \par Description: * This function copy valuable data from log block or dat block to free block, change free to data ,change * data and log to free. * * \param [in] LogNum,serial number within log block space * \return sucess or failed. * \note this function was called when log block is not suit for swap or move. **/ __s32 _free2data_simple_merge(__u32 nlogical) { __u8 InData; __u16 SuperPage; __u16 SrcPage,DstPage; __u32 SrcBlk,DstBlk; struct __SuperPhyBlkType_t DataBlk; struct __SuperPhyBlkType_t FreeBlk; struct __LogBlkType_t LogBlk; struct __PhysicOpPara_t SrcParam,DstParam; /*init block info*/ BMM_GetDataBlk(nlogical,&DataBlk); BMM_GetLogBlk(nlogical,&LogBlk); if (NAND_OP_TRUE != BMM_GetFreeBlk(LOWEST_EC_TYPE, &FreeBlk)) return NAND_OP_FALSE; /*copy data from data block or log block to free block*/ if((SUPPORT_LOG_BLOCK_MANAGE)&&(LogBlk.LogBlkType == LSB_TYPE)) { //DBUG_INF("[DBUG] nand lsb type simple merge block %x\n", nlogical); for (SuperPage = 0; SuperPage < PAGE_CNT_OF_LOGIC_BLK; SuperPage++) { /*set source address and destination address*/ DstPage = SuperPage; DstBlk = FreeBlk.PhyBlkNum; SrcPage = PMM_GetCurMapPage(SuperPage); InData = (SrcPage == 0xffff)?1 : 0; if(InData) { SrcBlk = DataBlk.PhyBlkNum; } else { if(SrcPage&(0x1<<15)) SrcBlk = LogBlk.PhyBlk1.PhyBlkNum; else SrcBlk = LogBlk.PhyBlk.PhyBlkNum; } SrcPage = InData?SuperPage:(SrcPage&0x7fff); LML_CalculatePhyOpPar(&SrcParam, CUR_MAP_ZONE,SrcBlk, SrcPage); LML_CalculatePhyOpPar(&DstParam, CUR_MAP_ZONE,DstBlk, DstPage); if (DstPage == 0) { __u8 SeqPlus; //SeqPlus = InData?1:0; SeqPlus = InData?2:1; if(NAND_OP_FALSE == _copy_page0(SrcBlk, SrcPage, DstBlk,SeqPlus)) { LOGICCTL_ERR("simple_merge : copy page 0 err\n"); return NAND_OP_FALSE; } } else { if(NAND_OP_TRUE != PHY_PageCopyback(&SrcParam,&DstParam)) { LOGICCTL_ERR("simple merge : copy back err\n"); return NAND_OP_FALSE; } } if(NAND_OP_TRUE != PHY_SynchBank(DstParam.BankNum, SYNC_BANK_MODE)) { struct __SuperPhyBlkType_t SubBlk; if(NAND_OP_TRUE != LML_BadBlkManage(&FreeBlk,CUR_MAP_ZONE,DstPage, &SubBlk)) { LOGICCTL_ERR("simgple merge : bad block manage err after copy back\n"); return NAND_OP_FALSE; } FreeBlk = SubBlk; SuperPage -= 1; } } /*move free block to data block*/ BMM_SetDataBlk(nlogical, &FreeBlk); /*move erased data block to free block*/ if ( NAND_OP_TRUE != LML_VirtualBlkErase(CUR_MAP_ZONE, DataBlk.PhyBlkNum)){ if (NAND_OP_TRUE != LML_BadBlkManage(&DataBlk,CUR_MAP_ZONE,0,NULL)){ LOGICCTL_ERR("swap merge : bad block manage err erase data block\n"); return NAND_OP_FALSE; } } else{ /*move erased data block to free block*/ if (DataBlk.BlkEraseCnt < 0xffff) DataBlk.BlkEraseCnt ++; BMM_SetFreeBlk(&DataBlk); //DBUG_INF("[DBUG] logic %x simple merge: erase data block: %x\n", LogBlk.LogicBlkNum, DataBlk.PhyBlkNum); } /*move erased log block to free block*/ if ( NAND_OP_TRUE != LML_VirtualBlkErase(CUR_MAP_ZONE, LogBlk.PhyBlk.PhyBlkNum)){ if (NAND_OP_TRUE != LML_BadBlkManage(&LogBlk.PhyBlk,CUR_MAP_ZONE,0,NULL)){ LOGICCTL_ERR("simple merge : bad block manage err after erase log block\n"); return NAND_OP_FALSE; } } else{ if (LogBlk.PhyBlk.BlkEraseCnt < 0xffff) LogBlk.PhyBlk.BlkEraseCnt ++; BMM_SetFreeBlk(&LogBlk.PhyBlk); //DBUG_INF("[DBUG] logic %x simple merge: erase log block 0: %x\n", LogBlk.LogicBlkNum, LogBlk.PhyBlk.PhyBlkNum); } /*move erased log block to free block*/ if ( NAND_OP_TRUE != LML_VirtualBlkErase(CUR_MAP_ZONE, LogBlk.PhyBlk1.PhyBlkNum)){ if (NAND_OP_TRUE != LML_BadBlkManage(&LogBlk.PhyBlk1,CUR_MAP_ZONE,0,NULL)){ LOGICCTL_ERR("simple merge : bad block manage err after erase log block\n"); return NAND_OP_FALSE; } } else{ if (LogBlk.PhyBlk1.BlkEraseCnt < 0xffff) LogBlk.PhyBlk1.BlkEraseCnt ++; BMM_SetFreeBlk(&LogBlk.PhyBlk1); //DBUG_INF("[DBUG] logic %x simple merge: erase log block 1: %x\n", LogBlk.LogicBlkNum, LogBlk.PhyBlk1.PhyBlkNum); } MEMSET(&LogBlk, 0xff, sizeof(struct __LogBlkType_t)); LogBlk.LogBlkType = 0; LogBlk.WriteBlkIndex = 0; LogBlk.ReadBlkIndex = 0; BMM_SetLogBlk(nlogical, &LogBlk); /*clear page map table*/ PMM_ClearCurMapTbl(); } else { for (SuperPage = 0; SuperPage < PAGE_CNT_OF_LOGIC_BLK; SuperPage++) { /*set source address and destination address*/ DstPage = SuperPage; DstBlk = FreeBlk.PhyBlkNum; SrcPage = PMM_GetCurMapPage(SuperPage); InData = (SrcPage == 0xffff)?1 : 0; SrcBlk = InData?DataBlk.PhyBlkNum : LogBlk.PhyBlk.PhyBlkNum; SrcPage = InData?SuperPage:SrcPage; LML_CalculatePhyOpPar(&SrcParam, CUR_MAP_ZONE,SrcBlk, SrcPage); LML_CalculatePhyOpPar(&DstParam, CUR_MAP_ZONE,DstBlk, DstPage); if (DstPage == 0) { __u8 SeqPlus; //SeqPlus = InData?1:0; SeqPlus = InData?2:1; if(NAND_OP_FALSE == _copy_page0(SrcBlk, SrcPage, DstBlk,SeqPlus)) { LOGICCTL_ERR("simple_merge : copy page 0 err\n"); return NAND_OP_FALSE; } } else { if(NAND_OP_TRUE != PHY_PageCopyback(&SrcParam,&DstParam)) { LOGICCTL_ERR("simple merge : copy back err\n"); return NAND_OP_FALSE; } } if(NAND_OP_TRUE != PHY_SynchBank(DstParam.BankNum, SYNC_BANK_MODE)) { struct __SuperPhyBlkType_t SubBlk; if(NAND_OP_TRUE != LML_BadBlkManage(&FreeBlk,CUR_MAP_ZONE,DstPage, &SubBlk)) { LOGICCTL_ERR("simgple merge : bad block manage err after copy back\n"); return NAND_OP_FALSE; } FreeBlk = SubBlk; SuperPage -= 1; } } /*move free block to data block*/ BMM_SetDataBlk(nlogical, &FreeBlk); /*move erased data block to free block*/ if ( NAND_OP_TRUE != LML_VirtualBlkErase(CUR_MAP_ZONE, DataBlk.PhyBlkNum)){ if (NAND_OP_TRUE != LML_BadBlkManage(&DataBlk,CUR_MAP_ZONE,0,NULL)){ LOGICCTL_ERR("swap merge : bad block manage err erase data block\n"); return NAND_OP_FALSE; } } else{ /*move erased data block to free block*/ if (DataBlk.BlkEraseCnt < 0xffff) DataBlk.BlkEraseCnt ++; BMM_SetFreeBlk(&DataBlk); } /*move erased log block to free block*/ if ( NAND_OP_TRUE != LML_VirtualBlkErase(CUR_MAP_ZONE, LogBlk.PhyBlk.PhyBlkNum)){ if (NAND_OP_TRUE != LML_BadBlkManage(&LogBlk.PhyBlk,CUR_MAP_ZONE,0,NULL)){ LOGICCTL_ERR("move merge : bad block manage err after erase log block\n"); return NAND_OP_FALSE; } } else{ if (LogBlk.PhyBlk.BlkEraseCnt < 0xffff) LogBlk.PhyBlk.BlkEraseCnt ++; BMM_SetFreeBlk(&LogBlk.PhyBlk); } MEMSET(&LogBlk, 0xff, sizeof(struct __LogBlkType_t)); LogBlk.LogBlkType = 0; LogBlk.WriteBlkIndex = 0; LogBlk.ReadBlkIndex = 0; BMM_SetLogBlk(nlogical, &LogBlk); /*clear page map table*/ PMM_ClearCurMapTbl(); } return NAND_OP_TRUE; }
/*! * * \par Description: * This function copy valuable data from datablk to logblk,then change datablk to freeblk ,change logblk to datablk. * * \param [in] LogNum,serial number within log block space * \return sucess or failed. * \note this function was called when log block is in order,that is to say physical * page number is same with logical page number. **/ __s32 _log2data_swap_merge(__u32 nlogical) { __u16 LastUsedPage,SuperPage; struct __SuperPhyBlkType_t DataBlk; struct __LogBlkType_t LogBlk; struct __PhysicOpPara_t SrcParam,DstParam; /* init info of data block and log block*/ BMM_GetDataBlk(nlogical, &DataBlk); BMM_GetLogBlk(nlogical, &LogBlk); LastUsedPage = LogBlk.LastUsedPage; if((SUPPORT_LOG_BLOCK_MANAGE)&&(LogBlk.LogBlkType == LSB_TYPE)) { LOGICCTL_ERR("swap merge : LSB type not support swap merge\n"); return NAND_OP_FALSE; } /*copy data from data block to log block*/ for (SuperPage = LastUsedPage + 1; SuperPage < PAGE_CNT_OF_SUPER_BLK; SuperPage++){ /*set source and destinate address*/ LML_CalculatePhyOpPar(&SrcParam,CUR_MAP_ZONE, DataBlk.PhyBlkNum, SuperPage); LML_CalculatePhyOpPar(&DstParam,CUR_MAP_ZONE, LogBlk.PhyBlk.PhyBlkNum, SuperPage); if (NAND_OP_TRUE != PHY_PageCopyback(&SrcParam,&DstParam)){ LOGICCTL_ERR("swap merge : copy back err\n"); return NAND_OP_FALSE; } if (NAND_OP_TRUE != PHY_SynchBank(DstParam.BankNum, SYNC_BANK_MODE)){ struct __SuperPhyBlkType_t SubBlk; if (NAND_OP_TRUE != LML_BadBlkManage(&LogBlk.PhyBlk,CUR_MAP_ZONE,SuperPage,&SubBlk)){ LOGICCTL_ERR("swap merge : bad block manage err after copy back\n"); return NAND_OP_FALSE; } LogBlk.PhyBlk = SubBlk; SuperPage -= 1; } } /*move log block to data block*/ BMM_SetDataBlk(nlogical, &LogBlk.PhyBlk); /*clear log block item*/ MEMSET(&LogBlk, 0xff, sizeof(struct __LogBlkType_t)); LogBlk.LogBlkType = 0; LogBlk.WriteBlkIndex = 0; LogBlk.ReadBlkIndex = 0; BMM_SetLogBlk(nlogical, &LogBlk); /*erase data block*/ if ( NAND_OP_TRUE != LML_VirtualBlkErase(CUR_MAP_ZONE, DataBlk.PhyBlkNum)){ if (NAND_OP_TRUE != LML_BadBlkManage(&DataBlk,CUR_MAP_ZONE,0,NULL)){ LOGICCTL_ERR("swap merge : bad block manage err erase data block\n"); return NAND_OP_FALSE; } } else{ /*move erased data block to free block*/ if (DataBlk.BlkEraseCnt < 0xffff) DataBlk.BlkEraseCnt ++; BMM_SetFreeBlk(&DataBlk); } /*clear page map table*/ PMM_ClearCurMapTbl(); return NAND_OP_TRUE; }
/* ************************************************************************************************************************ * NAND FLASH LOGIC MANAGE LAYER WEAR-LEVELLING * *Description: Equate the erase cycles among all physical blocks. * *Arguments : none * *Return : do wear-levelling result; * = 0 do wear-levelling successful; * = -1 do wear-levelling failed. * *Notes : The erase cycle of a physical block is limited, if the erase cycle overun this * limit, the physical block may be invalid. so a policy is needed to equate the * millions of erase cycles to ervery physical block. ************************************************************************************************************************ */ __s32 LML_WearLevelling(void) { #if CFG_SUPPORT_WEAR_LEVELLING __s32 i, result; __u32 tmpLogicBlk; __u16 tmpLowEc = 0xffff; struct __SuperPhyBlkType_t tmpFreeBlk, tmpDataBlk; struct __NandUserData_t tmpSpare[2]; struct __PhysicOpPara_t tmpSrcPage, tmpDstPage; BLK_ERASE_CNTER = 0; //scan the data block table, to look for a physical block with lowest erase count for(i=DATA_BLK_CNT_OF_ZONE-1; i>=0; i--) { if(DATA_BLK_TBL[i].BlkEraseCnt < tmpLowEc) { tmpLowEc = DATA_BLK_TBL[i].BlkEraseCnt; tmpLogicBlk = i; } } //get a free block which has the highest erase count result = BMM_GetFreeBlk(HIGHEST_EC_TYPE, &tmpFreeBlk); if(result < 0) { LOGICCTL_ERR("[LOGICCTL_ERR] Get free block failed when do wear-levelling!\n"); return -1; } //clear the block erase counter BLK_ERASE_CNTER = 0; if(tmpLowEc >= tmpFreeBlk.BlkEraseCnt) { if(tmpLowEc == 0xffff) { //the lowest erase count reach the highest value, clear erase count of all physical block for(i=0; i<DATA_BLK_CNT_OF_ZONE; i++) { //clear the erase count for the data block DATA_BLK_TBL[i].BlkEraseCnt = 0x00; } for(i=0; i<FREE_BLK_CNT_OF_ZONE; i++) { //clear the erase count for the free block if(FREE_BLK_TBL[i].PhyBlkNum != 0xffff) { FREE_BLK_TBL[i].BlkEraseCnt = 0x00; } } for(i=0; i<MAX_LOG_BLK_CNT; i++) { //clear the erase count for the log block if(LOG_BLK_TBL[i].LogicBlkNum != 0xffff) { LOG_BLK_TBL[i].PhyBlk.BlkEraseCnt = 0x00; } } } BMM_SetFreeBlk(&tmpFreeBlk); return 0; } BMM_GetDataBlk(tmpLogicBlk, &tmpDataBlk); result = BMM_GetLogBlk(tmpLogicBlk, NULL); if(result < 0) { //check if the data block is empty LML_CalculatePhyOpPar(&tmpSrcPage, CUR_MAP_ZONE, tmpDataBlk.PhyBlkNum, 0); tmpSrcPage.SectBitmap = 0x03; tmpSrcPage.MDataPtr = LML_TEMP_BUF; tmpSrcPage.SDataPtr = (void *)tmpSpare; LML_VirtualPageRead(&tmpSrcPage); if(tmpSpare[0].LogicInfo != 0xffff) { //need copy data from the data block to the free block tmpSrcPage.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE; tmpSrcPage.MDataPtr = NULL; tmpSrcPage.SDataPtr = NULL; tmpDstPage.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE; tmpDstPage.MDataPtr = NULL; tmpDstPage.SDataPtr = NULL; for(i=0; i<PAGE_CNT_OF_SUPER_BLK; i++) { LML_CalculatePhyOpPar(&tmpSrcPage, CUR_MAP_ZONE, tmpDataBlk.PhyBlkNum, i); LML_CalculatePhyOpPar(&tmpDstPage, CUR_MAP_ZONE, tmpFreeBlk.PhyBlkNum, i); PHY_PageCopyback(&tmpSrcPage, &tmpDstPage); //check page copy result result = PHY_SynchBank(tmpDstPage.BankNum, SYNC_CHIP_MODE); if(result < 0) { LOGICCTL_DBG("[LOGICCTL_DBG] Copy page failed when doing wear-levelling!\n"); result = LML_BadBlkManage(&tmpFreeBlk, CUR_MAP_ZONE, 0, NULL); if(result < 0) { LOGICCTL_ERR("[LOGICCTL_ERR] Bad block manage failed when doing wear-levelling!\n"); return -1; } return 0; } } } //set the data block item by the free block BMM_SetDataBlk(tmpLogicBlk, &tmpFreeBlk); if(tmpSpare[0].LogicInfo != 0xffff) { //erase the data block to a new free block result = LML_VirtualBlkErase(CUR_MAP_ZONE, tmpDataBlk.PhyBlkNum); if(result < 0) { LOGICCTL_DBG("[LOGICCTL_DBG] Erase super block failed when doing wear-levelling!\n"); result = LML_BadBlkManage(&tmpDataBlk, CUR_MAP_ZONE, 0, NULL); if(result < 0) { LOGICCTL_ERR("[LOGICCTL_ERR] Bad block manage failed when doing wear-levelling!\n"); return -1; } return 0; } } //set the the data block to free block table tmpDataBlk.BlkEraseCnt++; BMM_SetFreeBlk(&tmpDataBlk); } else { //set the free block back to free table BMM_SetFreeBlk(&tmpFreeBlk); } #endif return 0; }