/*
************************************************************************************************************************
*                       NAND FLASH LOGIC MANAGE LAYER READ-RECLAIM
*
*Description: Repair the logic block whose data has reach the limit of the ability of
*             the HW ECC module correct.
*
*Arguments  : nPage     the page address where need be repaired.
*
*Return     : read-reclaim result;
*               = 0     do read-reclaim successful;
*               = -1    do read-reclaim failed.
*
*Notes      : if read a physical page millions of times, there may be some bit error in
*             the page, and the error bit number will increase along with the read times,
*             so, if the number of the error bit reachs the ECC limit, the data should be
*             read out and write to another physical blcok.
************************************************************************************************************************
*/
__s32 LML_ReadReclaim(__u32 nPage)
{
    __s32   result;

    #if CFG_SUPPORT_READ_RECLAIM

	LOGICCTL_ERR("[LOGICCTL_ERR] read reclaim go\n");

    //flush the page cache to nand flash first, because need use the buffer
    result = LML_FlushPageCache();
    if(result < 0)
    {
        LOGICCTL_ERR("[LOGICCTL_ERR] Flush page cache failed when do read reclaim! Error:0x%x\n", result);
        return -1;
    }

    //read the full page data to buffer
    result = LML_PageRead(nPage, FULL_BITMAP_OF_LOGIC_PAGE, LML_WRITE_PAGE_CACHE);
    if(result < 0)
    {
        return -1;
    }

    //the data in the page cache is full, write it to nand flash
    result = LML_PageWrite(nPage, FULL_BITMAP_OF_LOGIC_PAGE, LML_WRITE_PAGE_CACHE);
    if(result < 0)
    {
        return -1;
    }

    #endif
    return 0;
}
Beispiel #2
0
/*
************************************************************************************************************************
*                       NAND FLASH LOGIC MANAGE LAYER BAD BLOCK MANAGE
*
*Description: Nand flash bad block manage.
*
*Arguments  : pBadBlk   the pointer to the bad physical block parameter;
*             nZoneNum  the number of the zone which the bad block belonged to;
*             nErrPage  the number of the error page;
*             pNewBlk   the pointer to the new valid block parameter.
*
*Return     : bad block manage result;
*               = 0     do bad block manage successful;
*               = -1    do bad block manage failed.
************************************************************************************************************************
*/
__s32 LML_BadBlkManage(struct __SuperPhyBlkType_t *pBadBlk, __u32 nZoneNum, __u32 nErrPage, struct __SuperPhyBlkType_t *pNewBlk)
{
    __s32   result;
    struct __SuperPhyBlkType_t tmpFreeBlk;
    struct __SuperPhyBlkType_t tmpBadBlk;

    tmpBadBlk = *pBadBlk;

	LOGICCTL_ERR("%s : %d : bad block manage go ,bad block: %x\n",__FUNCTION__,__LINE__,tmpBadBlk.PhyBlkNum);

__PROCESS_BAD_BLOCK:

    if(pNewBlk)
    {
        //get a new free block to replace the bad block
        BMM_GetFreeBlk(LOWEST_EC_TYPE, &tmpFreeBlk);
        if(tmpFreeBlk.PhyBlkNum == 0xffff)
        {
            LOGICCTL_ERR("[LOGICCTL_ERR] Look for free block failed when replace bad block\n");
            return -1;
        }

        //restore the valid page data from the bad block
        if(nErrPage)
        {
            result = _RestorePageData(&tmpBadBlk, nZoneNum, nErrPage, &tmpFreeBlk);
            if(result < 0)
            {
                //restore data failed, mark bad flag to the new free block
                _MarkBadBlk(&tmpFreeBlk, nZoneNum);

                goto __PROCESS_BAD_BLOCK;
            }
        }

        *pNewBlk = tmpFreeBlk;
    }

    //write bad flag to the bad block
    _MarkBadBlk(&tmpBadBlk, nZoneNum);

    return 0;
}
Beispiel #3
0
/*
************************************************************************************************************************
*                       GET LOG PAGE FOR WRITE
*
*Description: Get a log page for write.
*
*Arguments  : nBlk      the logical block number of the log block;
*             nPage     the number of the logical page, which page need log page;
*             pLogPage  the pointer to the log page number, for return value;
*             pLogPst   the pointer to the position of the log block in the log block table.
*
*Return     : get log page result.
*               = 0     get log page for write successful;
*               =-1     get log page for write failed.
************************************************************************************************************************
*/
static __s32 _GetLogPageForWrite(__u32 nBlk, __u32 nPage, __u16 *pLogPage, __u32 *pLogPst)
{
    __s32   result, tmpLogPst;
    __u16   tmpPage, tempBank;
    struct __PhysicOpPara_t tmpPhyPage;
    struct __NandUserData_t tmpSpare[2];

    tmpLogPst = _GetLogBlkPst(nBlk);
    if(tmpLogPst < 0)
    {
        //get log block position failed, there is no such log block, need create a new one
        result = _CreateNewLogBlk(nBlk, (__u32 *)&tmpLogPst);
        if(result < 0)
        {
            MAPPING_ERR("[MAPPING_ERR] Create new log block failed!\n");
            return -1;
        }
    }

    //need swap the page mapping table to ram which is accessing currently
    result = PMM_SwitchMapTbl(tmpLogPst);
    if(result < 0)
    {
        MAPPING_ERR("[MAPPING_ERR] Switch page mapping table failed when get log page! Err:0x%x\n", result);
        return -1;
    }

    //need get log page by write mode,
    tmpPage = LOG_BLK_TBL[tmpLogPst].LastUsedPage;

	if(SUPPORT_ALIGN_NAND_BNK)
    {
        if(tmpPage == 0xffff)
        {
            //the log block is empty, need get log page in the first page line
            tmpPage = nPage % INTERLEAVE_BANK_CNT;
        }
        else
        {
            //need bank align, the log page and the data page should be in the same bank
            if((nPage % INTERLEAVE_BANK_CNT) > (tmpPage % INTERLEAVE_BANK_CNT))
            {
                //get the log page in the same page line with last used page
                tmpPage = tmpPage + ((nPage % INTERLEAVE_BANK_CNT) - (tmpPage % INTERLEAVE_BANK_CNT));
            }
            else
            {
                //need get the log page in the next page line of the last used page
                tmpPage = tmpPage + (nPage % INTERLEAVE_BANK_CNT) + (INTERLEAVE_BANK_CNT - (tmpPage % INTERLEAVE_BANK_CNT));
            }
        }
    }
    else
    {

        //use the page which is the next of the last used page
        tmpPage = tmpPage + 1;
    }

	if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[tmpLogPst].LogBlkType == LSB_TYPE))
	{
		DBUG_MSG("[DBUG_MSG] _GetLogPageForWrite, select bak log block\n");

		if(SUPPORT_ALIGN_NAND_BNK)
		{
			tempBank = tmpPage%INTERLEAVE_BANK_CNT;
			tmpPage =PMM_CalNextLogPage(tmpPage);
			while(tmpPage%INTERLEAVE_BANK_CNT != tempBank)
			{
				tmpPage++;
				tmpPage =PMM_CalNextLogPage(tmpPage);
				if(tmpPage>=PAGE_CNT_OF_SUPER_BLK)
					break;
			}
		}
		else
		{
			tmpPage =PMM_CalNextLogPage(tmpPage);
		}

		if((tmpPage >= PAGE_CNT_OF_SUPER_BLK)&&(LOG_BLK_TBL[tmpLogPst].WriteBlkIndex == 0))
		{
			LOG_BLK_TBL[tmpLogPst].WriteBlkIndex = 1;
			tmpPage = tmpPage - PAGE_CNT_OF_SUPER_BLK;
		}
		if(LOG_BLK_TBL[tmpLogPst].WriteBlkIndex == 1)
			DBUG_MSG("[DBUG_MSG] _GetLogPageForWrite, log block index: %x, log block num: %x, page: %x \n", LOG_BLK_TBL[tmpLogPst].WriteBlkIndex, LOG_BLK_TBL[tmpLogPst].PhyBlk1.PhyBlkNum, tmpPage);
		else
			DBUG_MSG("[DBUG_MSG] _GetLogPageForWrite, log block index: %x, log block num: %x, page: %x \n", LOG_BLK_TBL[tmpLogPst].WriteBlkIndex, LOG_BLK_TBL[tmpLogPst].PhyBlk.PhyBlkNum, tmpPage);
	}

__CHECK_WRITE_LOGICAL_INFO_OF_LOG_BLOCK:
    //check if need write the logical information in the first page of the log block
    if((LOG_BLK_TBL[tmpLogPst].LastUsedPage == 0xffff) && (tmpPage != 0))
    {
        //get logical information from the data block
        LML_CalculatePhyOpPar(&tmpPhyPage, CUR_MAP_ZONE, DATA_BLK_TBL[nBlk].PhyBlkNum, 0);
        tmpPhyPage.SectBitmap = 0x03;
        tmpPhyPage.MDataPtr = LML_TEMP_BUF;
        tmpPhyPage.SDataPtr = (void *)tmpSpare;
        LML_VirtualPageRead(&tmpPhyPage);

		//if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[tmpLogPst].LogBlkType == LSB_TYPE))
		//{
		//	PRINT("_GetLogPageForWrite log %x page 0, data age: %x, log age: %x\n", LOG_BLK_TBL[tmpLogPst].WriteBlkIndex, tmpSpare[0].PageStatus, tmpSpare[0].PageStatus+1);
		//}

        tmpSpare[0].BadBlkFlag = 0xff;
        tmpSpare[1].BadBlkFlag = 0xff;
        tmpSpare[0].LogicInfo = ((CUR_MAP_ZONE % ZONE_CNT_OF_DIE)<<10) | nBlk;
        tmpSpare[1].LogicInfo = ((CUR_MAP_ZONE % ZONE_CNT_OF_DIE)<<10) | nBlk;
        tmpSpare[0].LogicPageNum = 0xffff;
        tmpSpare[1].LogicPageNum = 0xffff;
        tmpSpare[0].PageStatus =  tmpSpare[0].PageStatus + 1;
        tmpSpare[1].PageStatus = tmpSpare[0].PageStatus;
		if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[tmpLogPst].LogBlkType == LSB_TYPE))
		{
			tmpSpare[0].LogType = LSB_TYPE|(LOG_BLK_TBL[tmpLogPst].WriteBlkIndex<<4);
			tmpSpare[1].LogType = LSB_TYPE|(LOG_BLK_TBL[tmpLogPst].WriteBlkIndex<<4);
		}
		else
		{
			tmpSpare[0].LogType = 0xff;
			tmpSpare[1].LogType = 0xff;
		}

       //write the logical information to the spare area of the data block
       	if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[tmpLogPst].LogBlkType == LSB_TYPE))
       	{
       		DBUG_MSG("[DBUG] _GetLogPageForWrite, write the logical information to log page 0, writeblkindex: %x\n", LOG_BLK_TBL[tmpLogPst].WriteBlkIndex);
       		LML_CalculatePhyOpPar(&tmpPhyPage, CUR_MAP_ZONE, LOG_BLK_TBL[tmpLogPst].PhyBlk.PhyBlkNum, 0);
       	}
		else
        	LML_CalculatePhyOpPar(&tmpPhyPage, CUR_MAP_ZONE, LOG_BLK_TBL[tmpLogPst].PhyBlk.PhyBlkNum, 0);

		tmpPhyPage.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE;
        result = LML_VirtualPageWrite(&tmpPhyPage);
        if(result < 0)
        {
            LOGICCTL_ERR("[MAPPING_ERR] Physical write module failed when write logical information, Err:0x%x!\n", result);
            return -1;
        }

        result = PHY_SynchBank(tmpPhyPage.BankNum, SYNC_CHIP_MODE);
        if(result < 0)
        {
            //the last write operation on current bank is failed, the block is bad, need proccess it
            LOGICCTL_DBG("[LOGICCTL_DBG] Find a bad block when write logical page! bank:0x%x, block:0x%x, page:0x%x\n",
                    tmpPhyPage.BankNum, tmpPhyPage.BlkNum, tmpPhyPage.PageNum);

            //process the bad block
            result = LML_BadBlkManage(&LOG_BLK_TBL[tmpLogPst].PhyBlk, CUR_MAP_ZONE, 0, &LOG_BLK_TBL[tmpLogPst].PhyBlk);
            if(result < 0)
            {
                LOGICCTL_ERR("[MAPPING_ERR] Bad block process failed when get log page for write, Err:0x%x!\n", result);
                return -1;
            }

           goto __CHECK_WRITE_LOGICAL_INFO_OF_LOG_BLOCK;
        }
    }

    //set the log page number for return
    *pLogPage = tmpPage;
    *pLogPst = tmpLogPst;

    return 0;
}
Beispiel #4
0
/*
************************************************************************************************************************
*                       CREATE A NEW LOG BLOCK
*
*Description: Create a new log block.
*
*Arguments  : nBlk      the logical block number of the log block;
*             pLogPst   the pointer to the log block position in the log block table.
*
*Return     : create new log block result.
*               = 0     create new log block successful;
*               =-1     create new log block failed.
************************************************************************************************************************
*/
static __s32 _CreateNewLogBlk(__u32 nBlk, __u32 *pLogPst)
{
    __s32   i, result, LogBlkType,tmpPst=-1;
    __u16   tmpLogAccessAge = 0xffff;
    struct __SuperPhyBlkType_t tmpFreeBlk, tmpFreeBlk1;
    struct __PhysicOpPara_t tmpPhyPage;
    struct __NandUserData_t tmpSpare[2];

    #if CFG_SUPPORT_WEAR_LEVELLING

    //check if need do wear-levelling
    if(BLK_ERASE_CNTER >= WEAR_LEVELLING_FREQUENCY)
    {
        LML_WearLevelling();
    }

    #endif
    //try to search an empty item in the log block table
    for(i=0; i<LOG_BLK_CNT_OF_ZONE; i++)
    {
        if(LOG_BLK_TBL[i].LogicBlkNum == 0xffff)
        {
            //find a empty item
            tmpPst = i;
            break;
        }
    }

    //there is no empty item in the log block table, need merge a log block
    if(tmpPst == -1)
    {
        //check if there is some full log block
        for(i=0; i<LOG_BLK_CNT_OF_ZONE; i++)
        {
            if(LOG_BLK_TBL[i].LastUsedPage == PAGE_CNT_OF_SUPER_BLK-1)
            {
                tmpPst = i;
                break;
            }
        }

        if(tmpPst == -1)
        {
            //there is no full log block, look for an oldest log block to merge
            for(i=0; i<LOG_BLK_CNT_OF_ZONE; i++)
            {
                if(LOG_ACCESS_AGE[i] < tmpLogAccessAge)
                {
                    tmpLogAccessAge = LOG_ACCESS_AGE[i];
                    tmpPst = i;
                }
            }
        }

        //switch the page mapping table for merge the log block
        result = PMM_SwitchMapTbl(tmpPst);
        if(result < 0)
        {
            MAPPING_ERR("[MAPPING_ERR] Switch page mapping table failed when create new log block! Err:0x%x\n", result);
            return -1;
        }

        //merge the log block with normal type, to make an empty item
        result = LML_MergeLogBlk(NORMAL_MERGE_MODE, LOG_BLK_TBL[tmpPst].LogicBlkNum);
        if(result < 0)
        {
            //merge log block failed, report error
            MAPPING_ERR("[MAPPING_ERR] Merge log block failed when create new log block! Err:0x%x\n", result);
            return -1;
        }
    }

	LogBlkType = BMM_CalLogBlkType(nBlk);

	if((SUPPORT_LOG_BLOCK_MANAGE)&&(LogBlkType == LSB_TYPE))
	{
		DBUG_MSG("[DBUG_MSG] _CreateNewLogBlk, select bak log block\n");

		//get a free block to create a new log block
	    result = BMM_GetFreeBlk(LOWEST_EC_TYPE, &tmpFreeBlk);
	    if(result < 0)
	    {
	        MAPPING_ERR("[MAPPING_ERR] Get free block failed when create new log block!\n");
	        return -1;
	    }

		//get a free block to create a new log block
	    result = BMM_GetFreeBlk(LOWEST_EC_TYPE, &tmpFreeBlk1);
	    if(result < 0)
	    {
	        MAPPING_ERR("[MAPPING_ERR] Get free block failed when create new log block!\n");
	        return -1;
	    }

		//DBUG_INF("[DBUG] _CreateNewLogBlk, logic: %x, logblk0: %x, logblk1:%x \n", nBlk, tmpFreeBlk.PhyBlkNum, tmpFreeBlk1.PhyBlkNum);

	    //make a new log item in the log block table
	    LOG_BLK_TBL[tmpPst].LogicBlkNum = nBlk;
	    LOG_BLK_TBL[tmpPst].LastUsedPage = 0xffff;
		LOG_BLK_TBL[tmpPst].LogBlkType = LogBlkType;
		LOG_BLK_TBL[tmpPst].WriteBlkIndex = 0;
		LOG_BLK_TBL[tmpPst].ReadBlkIndex = 0;
	    LOG_BLK_TBL[tmpPst].PhyBlk = tmpFreeBlk;
		LOG_BLK_TBL[tmpPst].PhyBlk1 = tmpFreeBlk1;
	    //set the return vaule of the log position
	    *pLogPst = tmpPst;
	}
	else
	{
	    //get a free block to create a new log block
	    result = BMM_GetFreeBlk(LOWEST_EC_TYPE, &tmpFreeBlk);
	    if(result < 0)
	    {
	        MAPPING_ERR("[MAPPING_ERR] Get free block failed when create new log block!\n");
	        return -1;
	    }

	    //make a new log item in the log block table
	    LOG_BLK_TBL[tmpPst].LogicBlkNum = nBlk;
	    LOG_BLK_TBL[tmpPst].LastUsedPage = 0xffff;
		LOG_BLK_TBL[tmpPst].LogBlkType = LogBlkType;
		LOG_BLK_TBL[tmpPst].WriteBlkIndex = 0;
		LOG_BLK_TBL[tmpPst].ReadBlkIndex = 0;
	    LOG_BLK_TBL[tmpPst].PhyBlk = tmpFreeBlk;
	    //set the return vaule of the log position
	    *pLogPst = tmpPst;
	}


__CHECK_LOGICAL_INFO_OF_DATA_BLOCK:
    //check if the data block is an empty block, if so, need update the logic information in the spare area
    LML_CalculatePhyOpPar(&tmpPhyPage, CUR_MAP_ZONE, DATA_BLK_TBL[nBlk].PhyBlkNum, 0);
    tmpPhyPage.SectBitmap = 0x03;
    tmpPhyPage.MDataPtr = LML_TEMP_BUF;
    tmpPhyPage.SDataPtr = (void *)tmpSpare;
    LML_VirtualPageRead(&tmpPhyPage);

    if(tmpSpare[0].LogicInfo == 0xffff)
    {
        tmpSpare[0].BadBlkFlag = 0xff;
        tmpSpare[1].BadBlkFlag = 0xff;
        tmpSpare[0].LogicInfo = ((CUR_MAP_ZONE % ZONE_CNT_OF_DIE)<<10) | nBlk;
        tmpSpare[1].LogicInfo = ((CUR_MAP_ZONE % ZONE_CNT_OF_DIE)<<10) | nBlk;
        tmpSpare[0].LogicPageNum = 0xffff;
        tmpSpare[1].LogicPageNum = 0xffff;
        tmpSpare[0].PageStatus = 0xff;
        tmpSpare[1].PageStatus = 0xff;

        //write the logical information to the spare area of the data block
        tmpPhyPage.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE;
        result = LML_VirtualPageWrite(&tmpPhyPage);
        if(result < 0)
        {
            LOGICCTL_ERR("[MAPPING_ERR] Physical write module failed when write logical information, Err:0x%x!\n", result);
            return -1;
        }

        result = PHY_SynchBank(tmpPhyPage.BankNum, SYNC_CHIP_MODE);
        if(result < 0)
        {
            //the last write operation on current bank is failed, the block is bad, need proccess it
            LOGICCTL_DBG("[LOGICCTL_DBG] Find a bad block when write logical page! bank:0x%x, block:0x%x, page:0x%x\n",
                    tmpPhyPage.BankNum, tmpPhyPage.BlkNum, tmpPhyPage.PageNum);

            //process the bad block
            result = LML_BadBlkManage(&DATA_BLK_TBL[nBlk], CUR_MAP_ZONE, 0, &tmpFreeBlk);
            if(result < 0)
            {
                LOGICCTL_ERR("[MAPPING_ERR] Bad block process failed when create new log block, Err:0x%x!\n", result);
                return -1;
            }
            DATA_BLK_TBL[nBlk] = tmpFreeBlk;

            goto __CHECK_LOGICAL_INFO_OF_DATA_BLOCK;
        }
    }

    return 0;
}
/*
************************************************************************************************************************
*                   NAND FLASH LOGIC MANAGE LAYER WEAR-LEVELLING
*
*Description: Equate the erase cycles among all physical blocks.
*
*Arguments  : none
*
*Return     : do wear-levelling result;
*               = 0     do wear-levelling successful;
*               = -1    do wear-levelling failed.
*
*Notes      : The erase cycle of a physical block is limited, if the erase cycle overun this
*             limit, the physical block may be invalid. so a policy is needed to equate the
*             millions of erase cycles to ervery physical block.
************************************************************************************************************************
*/
__s32 LML_WearLevelling(void)
{
    #if CFG_SUPPORT_WEAR_LEVELLING

    __s32   i, result;

    __u32   tmpLogicBlk;
    __u16   tmpLowEc = 0xffff;
    struct __SuperPhyBlkType_t tmpFreeBlk, tmpDataBlk;
    struct __NandUserData_t tmpSpare[2];
    struct __PhysicOpPara_t tmpSrcPage, tmpDstPage;
    BLK_ERASE_CNTER = 0;

    //scan the data block table, to look for a physical block with lowest erase count
    for(i=DATA_BLK_CNT_OF_ZONE-1; i>=0; i--)
    {
        if(DATA_BLK_TBL[i].BlkEraseCnt < tmpLowEc)
        {
            tmpLowEc = DATA_BLK_TBL[i].BlkEraseCnt;
            tmpLogicBlk = i;
        }
    }

    //get a free block which has the highest erase count
    result = BMM_GetFreeBlk(HIGHEST_EC_TYPE, &tmpFreeBlk);
    if(result < 0)
    {
        LOGICCTL_ERR("[LOGICCTL_ERR] Get free block failed when do wear-levelling!\n");
        return -1;
    }

    //clear the block erase counter
    BLK_ERASE_CNTER = 0;

    if(tmpLowEc >= tmpFreeBlk.BlkEraseCnt)
    {
        if(tmpLowEc == 0xffff)
        {
            //the lowest erase count reach the highest value, clear erase count of all physical block
            for(i=0; i<DATA_BLK_CNT_OF_ZONE; i++)
            {
                //clear the erase count for the data block
                DATA_BLK_TBL[i].BlkEraseCnt = 0x00;
            }

            for(i=0; i<FREE_BLK_CNT_OF_ZONE; i++)
            {
                //clear the erase count for the free block
                if(FREE_BLK_TBL[i].PhyBlkNum != 0xffff)
                {
                    FREE_BLK_TBL[i].BlkEraseCnt = 0x00;
                }
            }

            for(i=0; i<MAX_LOG_BLK_CNT; i++)
            {
                //clear the erase count for the log block
                if(LOG_BLK_TBL[i].LogicBlkNum != 0xffff)
                {
                    LOG_BLK_TBL[i].PhyBlk.BlkEraseCnt = 0x00;
                }
            }
        }

        BMM_SetFreeBlk(&tmpFreeBlk);

        return 0;
    }

    BMM_GetDataBlk(tmpLogicBlk, &tmpDataBlk);
    result = BMM_GetLogBlk(tmpLogicBlk, NULL);
    if(result < 0)
    {
        //check if the data block is empty
        LML_CalculatePhyOpPar(&tmpSrcPage, CUR_MAP_ZONE, tmpDataBlk.PhyBlkNum, 0);
        tmpSrcPage.SectBitmap = 0x03;
        tmpSrcPage.MDataPtr = LML_TEMP_BUF;
        tmpSrcPage.SDataPtr = (void *)tmpSpare;
        LML_VirtualPageRead(&tmpSrcPage);

        if(tmpSpare[0].LogicInfo != 0xffff)
        {
            //need copy data from the data block to the free block
            tmpSrcPage.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE;
            tmpSrcPage.MDataPtr = NULL;
            tmpSrcPage.SDataPtr = NULL;

            tmpDstPage.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE;
            tmpDstPage.MDataPtr = NULL;
            tmpDstPage.SDataPtr = NULL;

            for(i=0; i<PAGE_CNT_OF_SUPER_BLK; i++)
            {
                LML_CalculatePhyOpPar(&tmpSrcPage, CUR_MAP_ZONE, tmpDataBlk.PhyBlkNum, i);
                LML_CalculatePhyOpPar(&tmpDstPage, CUR_MAP_ZONE, tmpFreeBlk.PhyBlkNum, i);

                PHY_PageCopyback(&tmpSrcPage, &tmpDstPage);
                //check page copy result
                result = PHY_SynchBank(tmpDstPage.BankNum, SYNC_CHIP_MODE);
                if(result < 0)
                {
                    LOGICCTL_DBG("[LOGICCTL_DBG] Copy page failed when doing wear-levelling!\n");
                    result = LML_BadBlkManage(&tmpFreeBlk, CUR_MAP_ZONE, 0, NULL);
                    if(result < 0)
                    {
                        LOGICCTL_ERR("[LOGICCTL_ERR] Bad block manage failed when doing wear-levelling!\n");
                        return -1;
                    }

                    return 0;
                }
            }
        }

        //set the data block item by the free block
        BMM_SetDataBlk(tmpLogicBlk, &tmpFreeBlk);

        if(tmpSpare[0].LogicInfo != 0xffff)
        {
            //erase the data block to a new free block
            result = LML_VirtualBlkErase(CUR_MAP_ZONE, tmpDataBlk.PhyBlkNum);
            if(result < 0)
            {
                LOGICCTL_DBG("[LOGICCTL_DBG] Erase super block failed when doing wear-levelling!\n");
                result = LML_BadBlkManage(&tmpDataBlk, CUR_MAP_ZONE, 0, NULL);
                if(result < 0)
                {
                    LOGICCTL_ERR("[LOGICCTL_ERR] Bad block manage failed when doing wear-levelling!\n");
                   return -1;
                }

                return 0;
            }
        }

        //set the the data block to free block table
        tmpDataBlk.BlkEraseCnt++;
        BMM_SetFreeBlk(&tmpDataBlk);
    }
    else
    {
        //set the free block back to free table
        BMM_SetFreeBlk(&tmpFreeBlk);
    }

    #endif
    return 0;
}
Beispiel #6
0
/*!
*
* \par  Description:
*       This function copy valuable data from log block or dat block to free block, change free to data ,change
*       data and log to free.
*
* \param  [in]       LogNum,serial number within log block space
* \return      sucess or failed.
* \note         this function was called when log block is not suit for swap or move.
**/
__s32  _free2data_simple_merge(__u32 nlogical)
{
    __u8 InData;
    __u16 SuperPage;
    __u16 SrcPage,DstPage;
    __u32 SrcBlk,DstBlk;
    struct __SuperPhyBlkType_t DataBlk;
    struct __SuperPhyBlkType_t FreeBlk;
    struct __LogBlkType_t LogBlk;
    struct __PhysicOpPara_t SrcParam,DstParam;

    /*init block info*/
    BMM_GetDataBlk(nlogical,&DataBlk);
	BMM_GetLogBlk(nlogical,&LogBlk);
    if (NAND_OP_TRUE != BMM_GetFreeBlk(LOWEST_EC_TYPE, &FreeBlk))
        return NAND_OP_FALSE;


    /*copy data from data block or log block to free block*/
	if((SUPPORT_LOG_BLOCK_MANAGE)&&(LogBlk.LogBlkType == LSB_TYPE))
	{
		//DBUG_INF("[DBUG] nand lsb type simple merge block %x\n", nlogical);
		for (SuperPage = 0; SuperPage < PAGE_CNT_OF_LOGIC_BLK; SuperPage++)
	    {
	        /*set source address and destination address*/
	        DstPage = SuperPage;
	        DstBlk = FreeBlk.PhyBlkNum;
	        SrcPage = PMM_GetCurMapPage(SuperPage);
	        InData = (SrcPage == 0xffff)?1 : 0;
			if(InData)
			{
				SrcBlk = DataBlk.PhyBlkNum;
			}
			else
			{
				if(SrcPage&(0x1<<15))
					SrcBlk = LogBlk.PhyBlk1.PhyBlkNum;
				else
					SrcBlk = LogBlk.PhyBlk.PhyBlkNum;
			}
	        SrcPage = InData?SuperPage:(SrcPage&0x7fff);
			LML_CalculatePhyOpPar(&SrcParam, CUR_MAP_ZONE,SrcBlk, SrcPage);
			LML_CalculatePhyOpPar(&DstParam, CUR_MAP_ZONE,DstBlk, DstPage);

	        if (DstPage == 0)
	        {
	            __u8 SeqPlus;
	            //SeqPlus = InData?1:0;
	            SeqPlus = InData?2:1;
	            if(NAND_OP_FALSE == _copy_page0(SrcBlk, SrcPage, DstBlk,SeqPlus))
	            {
	                LOGICCTL_ERR("simple_merge : copy page 0 err\n");
	                return NAND_OP_FALSE;
	            }
	        }
	        else
	        {
	            if(NAND_OP_TRUE != PHY_PageCopyback(&SrcParam,&DstParam))
	            {
	                LOGICCTL_ERR("simple merge : copy back err\n");
	                return NAND_OP_FALSE;
	            }
	        }

	        if(NAND_OP_TRUE != PHY_SynchBank(DstParam.BankNum, SYNC_BANK_MODE))
	        {
	            struct __SuperPhyBlkType_t SubBlk;
	            if(NAND_OP_TRUE != LML_BadBlkManage(&FreeBlk,CUR_MAP_ZONE,DstPage, &SubBlk))
	            {
	                LOGICCTL_ERR("simgple merge : bad block manage err after copy back\n");
	                return NAND_OP_FALSE;
	            }
	            FreeBlk = SubBlk;
	            SuperPage -= 1;
	        }
	    }

	    /*move free block to data block*/
	    BMM_SetDataBlk(nlogical, &FreeBlk);


		/*move erased data block to free block*/
	    if ( NAND_OP_TRUE != LML_VirtualBlkErase(CUR_MAP_ZONE, DataBlk.PhyBlkNum)){
	        if (NAND_OP_TRUE != LML_BadBlkManage(&DataBlk,CUR_MAP_ZONE,0,NULL)){
	            LOGICCTL_ERR("swap merge : bad block manage err erase data block\n");
	            return NAND_OP_FALSE;
	        }
	    }
	    /*move erased data block to free block*/
	    if (DataBlk.BlkEraseCnt < 0xffff)
	        DataBlk.BlkEraseCnt ++;
	    BMM_SetFreeBlk(&DataBlk);
		//DBUG_INF("[DBUG] logic %x simple merge: erase data block: %x\n", LogBlk.LogicBlkNum, DataBlk.PhyBlkNum);

	    /*move erased log block to free block*/
	    if ( NAND_OP_TRUE != LML_VirtualBlkErase(CUR_MAP_ZONE, LogBlk.PhyBlk.PhyBlkNum)){
	        if (NAND_OP_TRUE != LML_BadBlkManage(&LogBlk.PhyBlk,CUR_MAP_ZONE,0,NULL)){
	            LOGICCTL_ERR("simple merge : bad block manage err after erase log block\n");
	            return NAND_OP_FALSE;
	        }
	    }
	    if (LogBlk.PhyBlk.BlkEraseCnt < 0xffff)
	        LogBlk.PhyBlk.BlkEraseCnt ++;
	    BMM_SetFreeBlk(&LogBlk.PhyBlk);
		//DBUG_INF("[DBUG] logic %x simple merge: erase log block 0: %x\n", LogBlk.LogicBlkNum, LogBlk.PhyBlk.PhyBlkNum);

		 /*move erased log block to free block*/

		if ( NAND_OP_TRUE != LML_VirtualBlkErase(CUR_MAP_ZONE, LogBlk.PhyBlk1.PhyBlkNum)){
	        if (NAND_OP_TRUE != LML_BadBlkManage(&LogBlk.PhyBlk1,CUR_MAP_ZONE,0,NULL)){
	            LOGICCTL_ERR("simple merge : bad block manage err after erase log block\n");
	            return NAND_OP_FALSE;
	        }
	    }
	    if (LogBlk.PhyBlk1.BlkEraseCnt < 0xffff)
	        LogBlk.PhyBlk1.BlkEraseCnt ++;
	    BMM_SetFreeBlk(&LogBlk.PhyBlk1);
		//DBUG_INF("[DBUG] logic %x simple merge: erase log block 1: %x\n", LogBlk.LogicBlkNum, LogBlk.PhyBlk1.PhyBlkNum);

	    MEMSET(&LogBlk, 0xff, sizeof(struct __LogBlkType_t));
		LogBlk.LogBlkType = 0;
		LogBlk.WriteBlkIndex = 0;
		LogBlk.ReadBlkIndex = 0;
	    BMM_SetLogBlk(nlogical, &LogBlk);

	    /*clear page map table*/
	    PMM_ClearCurMapTbl();
	}
	else
	{
		for (SuperPage = 0; SuperPage < PAGE_CNT_OF_LOGIC_BLK; SuperPage++)
	    {
	        /*set source address and destination address*/
	        DstPage = SuperPage;
	        DstBlk = FreeBlk.PhyBlkNum;
	        SrcPage = PMM_GetCurMapPage(SuperPage);
	        InData = (SrcPage == 0xffff)?1 : 0;
	        SrcBlk = InData?DataBlk.PhyBlkNum : LogBlk.PhyBlk.PhyBlkNum;
	        SrcPage = InData?SuperPage:SrcPage;
			LML_CalculatePhyOpPar(&SrcParam, CUR_MAP_ZONE,SrcBlk, SrcPage);
			LML_CalculatePhyOpPar(&DstParam, CUR_MAP_ZONE,DstBlk, DstPage);

	        if (DstPage == 0)
	        {
	            __u8 SeqPlus;
	            //SeqPlus = InData?1:0;
	            SeqPlus = InData?2:1;
	            if(NAND_OP_FALSE == _copy_page0(SrcBlk, SrcPage, DstBlk,SeqPlus))
	            {
	                LOGICCTL_ERR("simple_merge : copy page 0 err\n");
	                return NAND_OP_FALSE;
	            }
	        }
	        else
	        {
	            if(NAND_OP_TRUE != PHY_PageCopyback(&SrcParam,&DstParam))
	            {
	                LOGICCTL_ERR("simple merge : copy back err\n");
	                return NAND_OP_FALSE;
	            }
	        }

	        if(NAND_OP_TRUE != PHY_SynchBank(DstParam.BankNum, SYNC_BANK_MODE))
	        {
	            struct __SuperPhyBlkType_t SubBlk;
	            if(NAND_OP_TRUE != LML_BadBlkManage(&FreeBlk,CUR_MAP_ZONE,DstPage, &SubBlk))
	            {
	                LOGICCTL_ERR("simgple merge : bad block manage err after copy back\n");
	                return NAND_OP_FALSE;
	            }
	            FreeBlk = SubBlk;
	            SuperPage -= 1;
	        }
	    }

	    /*move free block to data block*/
	    BMM_SetDataBlk(nlogical, &FreeBlk);


		/*move erased data block to free block*/
	    if ( NAND_OP_TRUE != LML_VirtualBlkErase(CUR_MAP_ZONE, DataBlk.PhyBlkNum)){
	        if (NAND_OP_TRUE != LML_BadBlkManage(&DataBlk,CUR_MAP_ZONE,0,NULL)){
	            LOGICCTL_ERR("swap merge : bad block manage err erase data block\n");
	            return NAND_OP_FALSE;
	        }
	    }
	    /*move erased data block to free block*/
	    if (DataBlk.BlkEraseCnt < 0xffff)
	        DataBlk.BlkEraseCnt ++;
	    BMM_SetFreeBlk(&DataBlk);


	    /*move erased log block to free block*/
	    if ( NAND_OP_TRUE != LML_VirtualBlkErase(CUR_MAP_ZONE, LogBlk.PhyBlk.PhyBlkNum)){
	        if (NAND_OP_TRUE != LML_BadBlkManage(&LogBlk.PhyBlk,CUR_MAP_ZONE,0,NULL)){
	            LOGICCTL_ERR("move merge : bad block manage err after erase log block\n");
	            return NAND_OP_FALSE;
	        }
	    }
	    if (LogBlk.PhyBlk.BlkEraseCnt < 0xffff)
	        LogBlk.PhyBlk.BlkEraseCnt ++;
	    BMM_SetFreeBlk(&LogBlk.PhyBlk);
	    MEMSET(&LogBlk, 0xff, sizeof(struct __LogBlkType_t));
		LogBlk.LogBlkType = 0;
		LogBlk.WriteBlkIndex = 0;
		LogBlk.ReadBlkIndex = 0;
	    BMM_SetLogBlk(nlogical, &LogBlk);

	    /*clear page map table*/
	    PMM_ClearCurMapTbl();
	}


    return NAND_OP_TRUE;

}
Beispiel #7
0
/*!
*
* \par  Description:
*       This function move valuable data from log block to free block,then replace them.
*
* \param  [in]       LogNum,serial number within log block space
* \return      sucess or failed.
* \note         this function was called when log block is full, and valid pages is less than half of one block.
**/
__s32  _free2log_move_merge(__u32 nlogical)
{
    __u8 bank;
    __u16 LastUsedPage,SuperPage;
    __u16 SrcPage,DstPage, SrcBlock, DstBlock;
    struct __SuperPhyBlkType_t FreeBlk,FreeBlk1;
    struct __LogBlkType_t LogBlk;
    struct __PhysicOpPara_t SrcParam,DstParam;
	struct __NandUserData_t UserData[2];

    /*init info of log block , and get one free block */
    BMM_GetLogBlk(nlogical, &LogBlk);

    if (NAND_OP_TRUE != BMM_GetFreeBlk(LOWEST_EC_TYPE, &FreeBlk))
        return NAND_OP_FALSE;

	if((SUPPORT_LOG_BLOCK_MANAGE)&&(LogBlk.LogBlkType == LSB_TYPE))
	{
		if (NAND_OP_TRUE != BMM_GetFreeBlk(LOWEST_EC_TYPE, &FreeBlk1))
        	return NAND_OP_FALSE;
		//DBUG_INF("[DBUG] lsb move merge, new log0: %x, new log1: %x\n", FreeBlk.PhyBlkNum, FreeBlk1.PhyBlkNum);
	}


    SrcParam.MDataPtr = DstParam.MDataPtr = NULL;
    SrcParam.SDataPtr = DstParam.SDataPtr = NULL;
    SrcParam.SectBitmap = DstParam.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE;

    if(SUPPORT_ALIGN_NAND_BNK)
    {
        redo:
        /*copy data bank by bank, for copy-back using*/
        LastUsedPage = 0;
        for (bank = 0; bank < INTERLEAVE_BANK_CNT; bank++)
        {
            DstPage = bank;
            for (SuperPage  = bank; SuperPage < PAGE_CNT_OF_SUPER_BLK; SuperPage+= INTERLEAVE_BANK_CNT)
            {
                SrcPage = PMM_GetCurMapPage(SuperPage);
                if (SrcPage != 0xffff)
                {
                	  /*set source and destinate address*/
	                if((SUPPORT_LOG_BLOCK_MANAGE)&&(LogBlk.LogBlkType == LSB_TYPE))
					{
						DBUG_MSG("[DBUG_MSG] _free2log_move_merge 2, select bak log block\n");
						DstPage = PMM_CalNextLogPage(DstPage);
						while((DstPage%INTERLEAVE_BANK_CNT)!=bank)
						{
							DstPage++;
							DstPage = PMM_CalNextLogPage(DstPage);
							if(DstPage>=PAGE_CNT_OF_SUPER_BLK)
								break;
						}

						if(DstPage >= PAGE_CNT_OF_SUPER_BLK)
						{
							LOGICCTL_ERR("move merge : dst page cal error\n");
							return NAND_OP_FALSE;
						}

						if(SrcPage&(0x1<<15))
							SrcBlock = LogBlk.PhyBlk1.PhyBlkNum;
						else
							SrcBlock = LogBlk.PhyBlk.PhyBlkNum;
						DstBlock = FreeBlk.PhyBlkNum;
						SrcPage &= (~(0x1<<15));

					}
					else
					{
						SrcBlock = LogBlk.PhyBlk.PhyBlkNum;
						DstBlock = FreeBlk.PhyBlkNum;
					}

    		 		LML_CalculatePhyOpPar(&SrcParam,CUR_MAP_ZONE, SrcBlock, SrcPage);
                   	LML_CalculatePhyOpPar(&DstParam,CUR_MAP_ZONE, DstBlock, DstPage);
                    if (DstPage == 0)
                    {
                        if ( NAND_OP_FALSE == _copy_page0(SrcBlock,SrcPage,FreeBlk.PhyBlkNum,0))
                        {
                            LOGICCTL_ERR("move merge : copy page 0 err1\n");
                            return NAND_OP_FALSE;
                        }
                    }
                    else
                    {
                        if (NAND_OP_TRUE != PHY_PageCopyback(&SrcParam,&DstParam))
                        {
                            LOGICCTL_ERR("move merge : copy back err\n");
                            return NAND_OP_FALSE;
                        }
                    }

                    if (NAND_OP_TRUE !=  PHY_SynchBank(DstParam.BankNum, SYNC_BANK_MODE))
                    {
                        struct __SuperPhyBlkType_t SubBlk;
                        if (NAND_OP_TRUE != LML_BadBlkManage(&FreeBlk,CUR_MAP_ZONE,0,&SubBlk))
                        {
                            LOGICCTL_ERR("move merge : bad block manage err after copy back\n");
                            return NAND_OP_FALSE;
                        }
                        FreeBlk = SubBlk;
                        goto redo;
                    }

                    PMM_SetCurMapPage(SuperPage,DstPage);
                    DstPage += INTERLEAVE_BANK_CNT;
                }
            }

            /*if bank 0 is empty, need write mange info in page 0*/
            if ((bank == 0) && (DstPage == 0))
            {
                if ( NAND_OP_FALSE == _copy_page0(LogBlk.PhyBlk.PhyBlkNum,0,FreeBlk.PhyBlkNum,0))
                {
                    LOGICCTL_ERR("move merge : copy page 0 err2\n");
                    return NAND_OP_FALSE;
                }
    			LML_CalculatePhyOpPar(&DstParam, CUR_MAP_ZONE, FreeBlk.PhyBlkNum, 0);
                if (NAND_OP_TRUE !=  PHY_SynchBank(DstParam.BankNum, SYNC_BANK_MODE))
                {
                    struct __SuperPhyBlkType_t SubBlk;
                    if (NAND_OP_TRUE != LML_BadBlkManage(&FreeBlk,CUR_MAP_ZONE,0,&SubBlk))
                    {
                        LOGICCTL_ERR("move merge : bad block manage err after copy back\n");
                        return NAND_OP_FALSE;
                    }
                    FreeBlk = SubBlk;
                    goto redo;
                }
            }

            /*reset LastUsedPage*/
            if ((DstPage - INTERLEAVE_BANK_CNT) > LastUsedPage)
            {
                LastUsedPage = DstPage - INTERLEAVE_BANK_CNT;
            }
        }
    }
    else
    {
    	/*copy data page by page*/
    	DstPage = 0;
        LastUsedPage = 0;
    	for (SuperPage = 0; SuperPage < PAGE_CNT_OF_LOGIC_BLK; SuperPage++)
    	{
    		SrcPage = PMM_GetCurMapPage(SuperPage);
    		if (SrcPage != 0xffff)
    		{
    			/*set source and destinate address*/
	            if((SUPPORT_LOG_BLOCK_MANAGE)&&(LogBlk.LogBlkType == LSB_TYPE))
				{
					DBUG_MSG("[DBUG_MSG] _free2log_move_merge 3, select bak log block\n");
					DstPage = PMM_CalNextLogPage(DstPage);
					if(DstPage >= PAGE_CNT_OF_SUPER_BLK)
					{
						LOGICCTL_ERR("move merge : dst page cal error\n");
						return NAND_OP_FALSE;
					}

					if(SrcPage&(0x1<<15))
						SrcBlock = LogBlk.PhyBlk1.PhyBlkNum;
					else
						SrcBlock = LogBlk.PhyBlk.PhyBlkNum;
					DstBlock = FreeBlk.PhyBlkNum;
					SrcPage &= 0x7fff;

				}
				else
				{
					SrcBlock = LogBlk.PhyBlk.PhyBlkNum;
					DstBlock = FreeBlk.PhyBlkNum;
				}

    		 	LML_CalculatePhyOpPar(&SrcParam,CUR_MAP_ZONE, SrcBlock, SrcPage);
                LML_CalculatePhyOpPar(&DstParam,CUR_MAP_ZONE, DstBlock, DstPage);
    			if (0 == DstPage)
    			{
    				if ( NAND_OP_FALSE == _copy_page0(SrcBlock,SrcPage,FreeBlk.PhyBlkNum,0))
                    {
                         LOGICCTL_ERR("move merge : copy page 0 err1\n");
                         return NAND_OP_FALSE;
                    }
    			}
    			else
    			{
    				SrcParam.MDataPtr = DstParam.MDataPtr = LML_TEMP_BUF;
    				SrcParam.SDataPtr = DstParam.SDataPtr = (void *)&UserData;
        			MEMSET((void *)&UserData,0xff,sizeof(struct __NandUserData_t) * 2);
    				SrcParam.SectBitmap = DstParam.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE;
    				if (LML_VirtualPageRead(&SrcParam) < 0){
           				 LOGICCTL_ERR("move merge : read main data err\n");
            			 return NAND_OP_FALSE;
        			}

       				if (NAND_OP_TRUE != LML_VirtualPageWrite(&DstParam)){
            			LOGICCTL_ERR("move merge : write err\n");
            			return NAND_OP_FALSE;
        			}
    			}
    			if (NAND_OP_TRUE !=  PHY_SynchBank(DstParam.BankNum, SYNC_BANK_MODE))
                {
                	struct __SuperPhyBlkType_t SubBlk;
                    if (NAND_OP_TRUE != LML_BadBlkManage(&FreeBlk,CUR_MAP_ZONE,LastUsedPage,&SubBlk))
                    {
                        LOGICCTL_ERR("move merge : bad block manage err after copy back\n");
                    	return NAND_OP_FALSE;
    				}
    				FreeBlk = SubBlk;
    				SuperPage -= 1;
    			}
    			PMM_SetCurMapPage(SuperPage,DstPage);
    			LastUsedPage = DstPage;
    			DstPage++;
    		}
    	}

    }

    /*erase log block*/
    if(NAND_OP_TRUE != LML_VirtualBlkErase(CUR_MAP_ZONE, LogBlk.PhyBlk.PhyBlkNum))
    {
        if(NAND_OP_TRUE != LML_BadBlkManage(&LogBlk.PhyBlk,CUR_MAP_ZONE,0,NULL))
        {
            LOGICCTL_ERR("move merge : bad block manage err after erase log block\n");
            return NAND_OP_FALSE;
        }
    }
	/*move erased log block to free block*/
    if(LogBlk.PhyBlk.BlkEraseCnt < 0xffff)
    {
        LogBlk.PhyBlk.BlkEraseCnt ++;
    }
    BMM_SetFreeBlk(&LogBlk.PhyBlk);

	if((SUPPORT_LOG_BLOCK_MANAGE)&&(LogBlk.LogBlkType == LSB_TYPE))
	{
		//DBUG_INF("[DBUG] logic %x move merge: erase log block 0: %x\n", LogBlk.LogicBlkNum, LogBlk.PhyBlk.PhyBlkNum);
		if(NAND_OP_TRUE != LML_VirtualBlkErase(CUR_MAP_ZONE, LogBlk.PhyBlk1.PhyBlkNum))
	    {
	        if(NAND_OP_TRUE != LML_BadBlkManage(&LogBlk.PhyBlk1,CUR_MAP_ZONE,0,NULL))
	        {
	            LOGICCTL_ERR("move merge : bad block manage err after erase log block\n");
	            return NAND_OP_FALSE;
	        }
	    }
		/*move erased log block to free block*/
	    if(LogBlk.PhyBlk1.BlkEraseCnt < 0xffff)
	    {
	        LogBlk.PhyBlk1.BlkEraseCnt ++;
	    }
	    BMM_SetFreeBlk(&LogBlk.PhyBlk1);
		//DBUG_INF("[DBUG] logic %x move merge: erase log block 1: %x\n", LogBlk.LogicBlkNum, LogBlk.PhyBlk1.PhyBlkNum);
	}


    /*move free block to log block*/
    LogBlk.PhyBlk.PhyBlkNum= FreeBlk.PhyBlkNum;
	LogBlk.PhyBlk.BlkEraseCnt= FreeBlk.BlkEraseCnt;
	if((SUPPORT_LOG_BLOCK_MANAGE)&&(LogBlk.LogBlkType == LSB_TYPE))
	{
		DBUG_MSG("[DBUG_MSG] _free2log_move_merge 4, select bak log block\n");
		LogBlk.PhyBlk1.PhyBlkNum= FreeBlk1.PhyBlkNum;
	    LogBlk.PhyBlk1.BlkEraseCnt= FreeBlk1.BlkEraseCnt;
	    LogBlk.WriteBlkIndex = 0;
	    LogBlk.ReadBlkIndex = 0;
		DBUG_MSG("[DBUG] move merge to new log block, logic block: %x, logblock0: %x, logblock1: %x\n", LogBlk.LogicBlkNum, LogBlk.PhyBlk.PhyBlkNum, LogBlk.PhyBlk1.PhyBlkNum);

	}
	else
	{
		LogBlk.LogBlkType = 0;
		LogBlk.WriteBlkIndex = 0;
	    LogBlk.ReadBlkIndex = 0;
	}
	LogBlk.LastUsedPage = LastUsedPage;
    BMM_SetLogBlk(nlogical, &LogBlk);

	//if((SUPPORT_LOG_BLOCK_MANAGE)&&(LogBlk.LogBlkType == LSB_TYPE))
	//	DBUG_INF("logic %x move merge, lastusedpage: %x\n", LogBlk.LogicBlkNum, LogBlk.LastUsedPage);

    return NAND_OP_TRUE;
}
Beispiel #8
0
__s32 _copy_page0(__u32 SrcBlk,__u16 SrcDataPage,__u32 DstBlk,__u8 SeqPlus)
{
    __u8 seq;
    __u16 LogicInfo;
    struct __NandUserData_t UserData[2];
    struct __PhysicOpPara_t SrcParam,DstParam;

    SrcParam.MDataPtr = DstParam.MDataPtr = LML_TEMP_BUF;
    SrcParam.SDataPtr = DstParam.SDataPtr = (void *)&UserData;
    MEMSET((void *)&UserData,0xff,sizeof(struct __NandUserData_t) * 2);

    /*get seq and logicinfo*/
    SrcParam.SectBitmap = 0x3;
    LML_CalculatePhyOpPar(&SrcParam,CUR_MAP_ZONE, SrcBlk, 0);
    if (LML_VirtualPageRead(&SrcParam) < 0){
        LOGICCTL_ERR("_copy_page0 : read user data err\n");
        return NAND_OP_FALSE;
    }
    seq = UserData[0].PageStatus;
    LogicInfo = UserData[0].LogicInfo;

    /*copy main data */
    SrcParam.SectBitmap = DstParam.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE;
    LML_CalculatePhyOpPar(&SrcParam, CUR_MAP_ZONE, SrcBlk, SrcDataPage);
    LML_CalculatePhyOpPar(&DstParam, CUR_MAP_ZONE, DstBlk, 0);

    if (LML_VirtualPageRead(&SrcParam) < 0){
        LOGICCTL_ERR("_copy_page0 : read main data err\n");
        return NAND_OP_FALSE;
    }

	if((SeqPlus!=0)&&(SeqPlus!=1)&&(SeqPlus!=2))
	{
		PRINT("Error, invalid SeqPlus: %x \n", SeqPlus);
		PRINT("copypage0, SeqPlus, old_seq: %x, new_seq: %x\n", SeqPlus, seq, SeqPlus+seq);
		PRINT("SrcBlk: %x, DstBlk: %x, SrcDataPage: %x \n", SrcBlk, DstBlk, SrcDataPage);
		while(1);
	}
	else
	{
		//PRINT("copypage0, SeqPlus:%x, old_seq: %x, new_seq: %x\n", SeqPlus, seq, SeqPlus+seq);
		//PRINT("SrcBlk: %x, DstBlk: %x, SrcDataPage: %x \n", SrcBlk, DstBlk, SrcDataPage);
	}

    UserData[0].LogicInfo = LogicInfo;
    UserData[0].PageStatus = seq + SeqPlus;
	if(SeqPlus) //copy to new data block
	{
		UserData[0].LogType = 0xff;
		UserData[1].LogType = 0xff;
	}
	else //copy from log to log, move merge to log block0
	{
		UserData[0].LogType &= 0xf;
		UserData[0].LogType &= 0xf;
	}

    if (NAND_OP_TRUE != LML_VirtualPageWrite(&DstParam)){
        LOGICCTL_ERR("_copy_page0 : write err\n");
        return NAND_OP_FALSE;
    }


    return NAND_OP_TRUE;
}
Beispiel #9
0
static __s32 _write_back_page_map_tbl(__u32 nLogBlkPst)
{
    __u16 TablePage;
    __u32 TableBlk;
    struct  __NandUserData_t  UserData[2];
    struct  __PhysicOpPara_t  param, tmpPage0;
    struct  __SuperPhyBlkType_t BadBlk,NewBlk;
	__s32 result;


    /*check page poisition, merge if no free page*/
	if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[nLogBlkPst].LogBlkType == LSB_TYPE))
	{
		TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage + 1;
    	TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum;
		DBUG_MSG("[DBUG] _write_back_page_map_tbl, log block: %x, bak log block %x\n", LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum, LOG_BLK_TBL[nLogBlkPst].PhyBlk1.PhyBlkNum);
		DBUG_MSG("[DBUG] _write_back_page_map_tbl, select bak log block\n");
		TablePage = PMM_CalNextLogPage(TablePage);

		if((TablePage >= PAGE_CNT_OF_SUPER_BLK)&&(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex == 0))
		{
			DBUG_MSG("[DBUG] _write_back_page_map_tbl, change to log block 1, phyblock1: %x\n", LOG_BLK_TBL[nLogBlkPst].PhyBlk1.PhyBlkNum);

			LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex = 1;
			TablePage = TablePage - PAGE_CNT_OF_SUPER_BLK;
		}

		if(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex == 1)
    		TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk1.PhyBlkNum;
		else
			TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum;

		if (TablePage >= PAGE_CNT_OF_SUPER_BLK){
			//DBUG_INF("[DBUG] _write_back_page_map_tbl, log block full, need merge\n");
	        /*block id full,need merge*/
	        if (LML_MergeLogBlk(SPECIAL_MERGE_MODE,LOG_BLK_TBL[nLogBlkPst].LogicBlkNum)){
	            MAPPING_ERR("write back page tbl : merge err\n");
	            return NAND_OP_FALSE;
	        }
			DBUG_MSG("[DBUG] _write_back_page_map_tbl, log block merge end\n");
	        if (PAGE_MAP_CACHE->ZoneNum != 0xff){
	            /*move merge*/
	            TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage + 1;
	            TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum;
				TablePage = PMM_CalNextLogPage(TablePage);
				//DBUG_INF("[DBUG] _write_back_page_map_tbl, after move merge, table block: %x, table page %x\n", TableBlk, TablePage);
	        }
	        else
	            return NAND_OP_TRUE;
	    }
	}
	else
	{
		TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage + 1;
    	TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum;

		if (TablePage == PAGE_CNT_OF_SUPER_BLK){
	        /*block id full,need merge*/
	        if (LML_MergeLogBlk(SPECIAL_MERGE_MODE,LOG_BLK_TBL[nLogBlkPst].LogicBlkNum)){
	            MAPPING_ERR("write back page tbl : merge err\n");
	            return NAND_OP_FALSE;
	        }

	        if (PAGE_MAP_CACHE->ZoneNum != 0xff){
	            /*move merge*/
	            TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage + 1;
	            TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum;
	        }
	        else
	            return NAND_OP_TRUE;
	    }
	}



rewrite:
//PRINT("-------------------write back page tbl for blk %x\n",TableBlk);
    /*write page map table*/
	if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[nLogBlkPst].LogBlkType == LSB_TYPE))
	{
		if((TablePage== 0)&&(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex == 1))
	    {
			MEMSET((void *)(&UserData[0]),0xff,sizeof(struct __NandUserData_t) * 2);
	        //log page is the page0 of the logblk1, should copy page0 of logblock0, and skip the page
	        LML_CalculatePhyOpPar(&tmpPage0, CUR_MAP_ZONE, LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum, 0);
	        tmpPage0.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE;
	        tmpPage0.MDataPtr = LML_TEMP_BUF;
	        tmpPage0.SDataPtr = (void *)UserData;
	        result = LML_VirtualPageRead(&tmpPage0);
	        if(result < 0)
	        {
	            LOGICCTL_ERR("[LOGICCTL_ERR] Get log age of data block failed when write logical page, Err:0x%x!\n", result);
	            return -ERR_PHYSIC;
	        }

			//log page is the page0 of the logblk1, should skip the page
			UserData[0].LogType = LSB_TYPE|(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex<<4);
			UserData[1].LogType = LSB_TYPE|(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex<<4);

	        LML_CalculatePhyOpPar(&tmpPage0, CUR_MAP_ZONE, LOG_BLK_TBL[nLogBlkPst].PhyBlk1.PhyBlkNum, 0);
	        tmpPage0.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE;
	        tmpPage0.MDataPtr = LML_TEMP_BUF;
	        tmpPage0.SDataPtr = (void *)UserData;
	        result = LML_VirtualPageWrite(&tmpPage0);

			TablePage++;

	    }
	}

	MEMSET((void *)(&UserData[0]),0xff,sizeof(struct __NandUserData_t) * 2);
    UserData[0].PageStatus = 0xaa;
	if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[nLogBlkPst].LogBlkType == LSB_TYPE))
	{
		UserData[0].LogType = LSB_TYPE|(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex<<4);
		UserData[1].LogType = LSB_TYPE|(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex<<4);
	}
	else
	{
		UserData[0].LogType = 0xff;
		UserData[1].LogType = 0xff;
	}

	//if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[nLogBlkPst].LogBlkType == LSB_TYPE)&&(TablePage== 0))
	//{
	//	DBUG_INF("[DBUG] _write_back_page_map_tbl in page0, TablePage: %x, TableBlk: %x\n", TablePage, TableBlk);
	//	DBUG_INF("[DBUG] _write_back_page_map_tbl in page0, logicNum: %x, log0: %x, log1: %x\n", LOG_BLK_TBL[nLogBlkPst].LogicBlkNum,LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum, LOG_BLK_TBL[nLogBlkPst].PhyBlk1.PhyBlkNum);
	//	DBUG_INF("[DBUG] _write_back_page_map_tbl in page0, logicinfo: %x, logicpage: %x\n", UserData[0].LogicInfo, UserData[0].LogicPageNum);
	//	DBUG_INF("[DBUG] _write_back_page_map_tbl in page0, logtype: %x, pagestatus: %x\n", UserData[0].LogType, UserData[0].PageStatus);
	//}

    MEMSET(LML_PROCESS_TBL_BUF,0xff,SECTOR_CNT_OF_SUPER_PAGE * SECTOR_SIZE);

	if(PAGE_CNT_OF_SUPER_BLK >= 512)
	{
		__u32 page;

		for(page = 0; page < PAGE_CNT_OF_SUPER_BLK; page++)
			*((__u16 *)LML_PROCESS_TBL_BUF + page) = PAGE_MAP_TBL[page].PhyPageNum;

		((__u32 *)LML_PROCESS_TBL_BUF)[511] = \
        	_GetTblCheckSum((__u32 *)LML_PROCESS_TBL_BUF, PAGE_CNT_OF_SUPER_BLK*2/(sizeof (__u32)));
	}

	else
	{
		MEMCPY(LML_PROCESS_TBL_BUF, PAGE_MAP_TBL,PAGE_CNT_OF_SUPER_BLK*sizeof(struct __PageMapTblItem_t));
    	((__u32 *)LML_PROCESS_TBL_BUF)[511] = \
        	_GetTblCheckSum((__u32 *)LML_PROCESS_TBL_BUF, PAGE_CNT_OF_SUPER_BLK*sizeof(struct __PageMapTblItem_t)/(sizeof (__u32)));
	}

    param.MDataPtr = LML_PROCESS_TBL_BUF;
    param.SDataPtr = (void *)&UserData;
    param.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE;

//rewrite:
    LML_CalculatePhyOpPar(&param, CUR_MAP_ZONE, TableBlk, TablePage);
    LML_VirtualPageWrite(&param);

    if (NAND_OP_TRUE != PHY_SynchBank(param.BankNum, SYNC_CHIP_MODE)){
        BadBlk.PhyBlkNum = TableBlk;
        if (NAND_OP_TRUE != LML_BadBlkManage(&BadBlk,CUR_MAP_ZONE,TablePage,&NewBlk)){
            MAPPING_ERR("write page map table : bad block mange err after write\n");
            return NAND_OP_FALSE;
        }
        TableBlk = NewBlk.PhyBlkNum;
        LOG_BLK_TBL[nLogBlkPst].PhyBlk = NewBlk;
        goto rewrite;
    }

    LOG_BLK_TBL[nLogBlkPst].LastUsedPage = TablePage;
    PAGE_MAP_CACHE->ZoneNum = 0xff;
    PAGE_MAP_CACHE->LogBlkPst = 0xff;

	if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[nLogBlkPst].LogBlkType == LSB_TYPE))
		DBUG_MSG("[DBUG] _write_back_page_map_tbl end, lastusedpage: %x, write_index: %x\n", LOG_BLK_TBL[nLogBlkPst].LastUsedPage, LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex);

    return NAND_OP_TRUE;

}