Ejemplo n.º 1
0
/*post current zone map table in cache*/
static __s32 _blk_map_tbl_cache_post(__u32 nZone)
{
    __u8 poisition;

    /*find the cache to be post*/
    poisition = _find_blk_tbl_post_location();
    BLK_MAP_CACHE = &(BLK_MAP_CACHE_POOL->BlkMapTblCachePool[poisition]);

    /* write back new table in flash if dirty*/
    if (BLK_MAP_CACHE->DirtyFlag) {
        if (NAND_OP_TRUE != _write_back_block_map_tbl(CUR_MAP_ZONE)) {
            MAPPING_ERR("_blk_map_tbl_cache_post : write back zone tbl err\n");
            return NAND_OP_FALSE;
        }
    }

    /*fetch current zone map table*/
    if (NAND_OP_TRUE != _read_block_map_tbl(nZone)) {
        MAPPING_ERR("_blk_map_tbl_cache_post : read zone tbl err\n");
        return NAND_OP_FALSE;
    }
    CUR_MAP_ZONE = nZone;
    BLK_MAP_CACHE->DirtyFlag = 0;

    return NAND_OP_TRUE;
}
Ejemplo n.º 2
0
__s32 BMM_RleaseLogBlock(__s32 tmpPst, __s32 start_page, __u32 merge_page_cnt)
{
	__s32 result;


	//PRINT("BMM_RleaseLogBlock: 0x%x, 0x%x, 0x%x\n", tmpPst, start_page, merge_page_cnt);
	BMM_SetDirtyFlag();

    //switch the page mapping table for merge the log block
    result = PMM_SwitchMapTbl(tmpPst);
    if(result < 0)
    {
        MAPPING_ERR("[MAPPING_ERR] Switch page mapping table failed when create new log block! Err:0x%x\n", result);
        return -1;
    }

    //merge the log block with normal type, to make an empty item
    result = LML_MergeLogBlk_Ext(NORMAL_MERGE_MODE, LOG_BLK_TBL[tmpPst].LogicBlkNum, start_page, merge_page_cnt);
    if(result < 0)
    {
        //merge log block failed, report error
        MAPPING_ERR("[MAPPING_ERR] Merge log block failed when create new log block! Err:0x%x\n", result);
        return -1;
    }

	return (result);

}
Ejemplo n.º 3
0
__s32 BMM_MergeAllLogBlock(void)
{
	__u32 tmpZoneNum, ZoneCnt, tmpLogPos;
	__s32 result;

	ZoneCnt = ZONE_CNT_OF_DIE * DieCntOfNand;
	PRINT("BMM_MergeAllLogBlock, ZoneCnt: %x\n", ZoneCnt);

	for(tmpZoneNum=0; tmpZoneNum<ZoneCnt; tmpZoneNum++)
	{
		PRINT("BMM_MergeAllLogBlock, tmpZoneNum: %x\n", tmpZoneNum);

        //swap the block mapping table to ram which is need be accessing currently
        if(tmpLogicalBlk!= 0xffff)
		LML_MergeLogBlk_Quit();
        result = BMM_SwitchMapTbl(tmpZoneNum);
        if(result < 0)
        {
            MAPPING_ERR("[MAPPING_ERR] BMM_MergeAllLogBlock, Switch block mapping table failed when read logical page! Err:0x%x\n", result);
            return -ERR_MAPPING;
        }


		BMM_SetDirtyFlag();

		for(tmpLogPos = 0; tmpLogPos<MAX_LOG_BLK_CNT; tmpLogPos++)
		{
			if(LOG_BLK_TBL[tmpLogPos].LogicBlkNum != 0xffff)
			{
				PRINT("BMM_MergeAllLogBlock, logpos: %x, logblock: %x\n", tmpLogPos, LOG_BLK_TBL[tmpLogPos].LogicBlkNum);
				//need swap the page mapping table to ram which is accessing currently
			    result = PMM_SwitchMapTbl(tmpLogPos);
			    if(result < 0)
			    {
			        MAPPING_ERR("[MAPPING_ERR] BMM_MergeAllLogBlock, Switch page mapping table failed when switch page map table! Err:0x%x\n", result);
			        return -1;
			    }


				result = LML_MergeLogBlk(NORMAL_MERGE_MODE,LOG_BLK_TBL[tmpLogPos].LogicBlkNum);
				if(result<0)
				{
		            MAPPING_ERR("[MAPPING_ERR] BMM_MergeAllLogBlock, merge error! Err:0x%x\n", result);
		            return -ERR_MAPPING;
				}

			}
		}
	}

	PRINT("BMM_MergeAllLogBlock end\n");
	return result;

}
Ejemplo n.º 4
0
/*post current zone map table in cache*/
static __s32 _page_map_tbl_cache_post(__u32 nLogBlkPst)
{
    __u8 poisition;
    __u8 i;

    struct __BlkMapTblCache_t *TmpBmt = BLK_MAP_CACHE;

    /*find the cache to be post*/
    poisition = _find_page_tbl_post_location();
    PAGE_MAP_CACHE = &(PAGE_MAP_CACHE_POOL->PageMapTblCachePool[poisition]);

    if (PAGE_MAP_CACHE->DirtyFlag && (PAGE_MAP_CACHE->ZoneNum != 0xff)) {
        /*write back page  map table*/
        if (PAGE_MAP_CACHE->ZoneNum != TmpBmt->ZoneNum) {
            for (i = 0; i < BLOCK_MAP_TBL_CACHE_CNT; i++)
            {
                if (BLK_MAP_CACHE_POOL->BlkMapTblCachePool[i].ZoneNum == PAGE_MAP_CACHE->ZoneNum) {
                    BLK_MAP_CACHE = &(BLK_MAP_CACHE_POOL->BlkMapTblCachePool[i]);
                    break;
                }
            }

            if (i == BLOCK_MAP_TBL_CACHE_CNT) {
                MAPPING_ERR("_page_map_tbl_cache_post : position %d ,page map zone %d,blk map zone %d\n",
                            poisition,PAGE_MAP_CACHE->ZoneNum,BLK_MAP_CACHE->ZoneNum);
                return NAND_OP_FALSE;
            }

        }
        /* write back new table in flash if dirty*/
        BMM_SetDirtyFlag();
        if (NAND_OP_TRUE != _write_back_page_map_tbl(PAGE_MAP_CACHE->LogBlkPst)) {
            MAPPING_ERR("write back page tbl err\n");
            return NAND_OP_FALSE;
        }

        BLK_MAP_CACHE = TmpBmt;

    }

    PAGE_MAP_CACHE->DirtyFlag = 0;

    /*fetch current page map table*/
    if (NAND_OP_TRUE != _read_page_map_tbl(nLogBlkPst)) {
        MAPPING_ERR("read page map tbl err\n");
        return NAND_OP_FALSE;
    }

    PAGE_MAP_CACHE->ZoneNum = CUR_MAP_ZONE;
    PAGE_MAP_CACHE->LogBlkPst = nLogBlkPst;

    return NAND_OP_TRUE;
}
Ejemplo n.º 5
0
/*
************************************************************************************************************************
*                       INIT BLOCK MAPPING TABLE CACHE
*
*Description: Initiate block mapping talbe cache.
*
*Arguments  : none.
*
*Return     : init result;
*               = 0     init successful;
*               = -1    init failed.
************************************************************************************************************************
*/
__s32 BMM_InitMapTblCache(void)
{
    __u32 i;

    BLK_MAP_CACHE_POOL = &BlkMapTblCachePool;

    BLK_MAP_CACHE_POOL->LogBlkAccessTimer = 0x0;
    BLK_MAP_CACHE_POOL->SuperBlkEraseCnt = 0x0;

    /*init block map table cache*/
    for(i=0; i<BLOCK_MAP_TBL_CACHE_CNT; i++)
    {
        //init the parmater for block mapping table cache management
        BLK_MAP_CACHE_POOL->BlkMapTblCachePool[i].ZoneNum = 0xff;
        BLK_MAP_CACHE_POOL->BlkMapTblCachePool[i].DirtyFlag = 0x0;
        BLK_MAP_CACHE_POOL->BlkMapTblCachePool[i].AccessCnt = 0x0;
        BLK_MAP_CACHE_POOL->BlkMapTblCachePool[i].LastFreeBlkPst = 0xff;

        //request buffer for data block table and free block table
        BLK_MAP_CACHE_POOL->BlkMapTblCachePool[i].DataBlkTbl = \
                (struct __SuperPhyBlkType_t *)MALLOC(sizeof(struct __SuperPhyBlkType_t)*BLOCK_CNT_OF_ZONE);
        if(NULL == BLK_MAP_CACHE_POOL->BlkMapTblCachePool[i].DataBlkTbl)
        {
            MAPPING_ERR("BMM_InitMapTblCache : allocate memory err\n");
            return -ERR_MALLOC;
        }
        //set free block table pointer
        BLK_MAP_CACHE_POOL->BlkMapTblCachePool[i].FreeBlkTbl = \
                BLK_MAP_CACHE_POOL->BlkMapTblCachePool[i].DataBlkTbl + DATA_BLK_CNT_OF_ZONE;

        //request buffer for log block table
        BLK_MAP_CACHE_POOL->BlkMapTblCachePool[i].LogBlkTbl =  \
                (struct __LogBlkType_t *)MALLOC(sizeof(struct __LogBlkType_t)*LOG_BLK_CNT_OF_ZONE);
        if(NULL == BLK_MAP_CACHE_POOL->BlkMapTblCachePool[i].LogBlkTbl)
        {
            MAPPING_ERR("BMM_InitMapTblCache : allocate memory err\n");
            FREE(BLK_MAP_CACHE_POOL->BlkMapTblCachePool[i].DataBlkTbl,sizeof(struct __SuperPhyBlkType_t)*BLOCK_CNT_OF_ZONE);
            return -ERR_MALLOC;
        }
    }

    /*init log block access time*/
    MEMSET(BLK_MAP_CACHE_POOL->LogBlkAccessAge, 0x0, MAX_LOG_BLK_CNT);

    return NAND_OP_TRUE;
}
Ejemplo n.º 6
0
/*
************************************************************************************************************************
*                       GET FREE BLOCK FROM FREE BLOCK TABLE
*
*Description: Get a free block from the free block table with highest erase counter or lowest
*             erase counter.
*
*Arguments  : nType     the type of the free block which need be got;
*             pFreeBlk  the pointer to the free block pointer for return.
*
*Return     : get free block result;
*               = 0     get free block successful;
*               =-1     get free block failed.
************************************************************************************************************************
*/
__s32 BMM_GetFreeBlk(__u32 nType, struct __SuperPhyBlkType_t *pFreeBlk)
{
    __s32   i, tmpFreePst = -1;
    __u16   tmpItem = LAST_FREE_BLK_PST + 1;
    __u32   tmpEraseCnt;

    if(nType == LOWEST_EC_TYPE)
    {
        //need look for the free block with the lowest erase count
        tmpEraseCnt = 0xffff;
    }
    else
    {
        //need look for the free block with the highest erase count
        tmpEraseCnt = 0x0000;
    }

    for(i=0; i<FREE_BLK_CNT_OF_ZONE; i++, tmpItem++)
    {
        if(tmpItem >= FREE_BLK_CNT_OF_ZONE)
        {
            tmpItem = 0;
        }

        if(FREE_BLK_TBL[tmpItem].PhyBlkNum != 0xffff)
        {
            //current free block item is valid
            if(((nType == LOWEST_EC_TYPE) && (FREE_BLK_TBL[tmpItem].BlkEraseCnt <= tmpEraseCnt))
                || ((nType != LOWEST_EC_TYPE) && (FREE_BLK_TBL[tmpItem].BlkEraseCnt >= tmpEraseCnt)))
            {
                tmpEraseCnt = FREE_BLK_TBL[tmpItem].BlkEraseCnt;

                tmpFreePst = tmpItem;
            }
        }
    }

    if(tmpFreePst < 0)
    {
        MAPPING_ERR("[MAPPING_ERR] There is none free block in the free block table!\n");
        pFreeBlk->PhyBlkNum = 0xffff;
        pFreeBlk->BlkEraseCnt = 0xffff;

        return -1;
    }

    pFreeBlk->PhyBlkNum = FREE_BLK_TBL[tmpFreePst].PhyBlkNum;
    pFreeBlk->BlkEraseCnt = FREE_BLK_TBL[tmpFreePst].BlkEraseCnt;
    LAST_FREE_BLK_PST = tmpFreePst;

    //delete the free block item from the free block table
    FREE_BLK_TBL[tmpFreePst].PhyBlkNum = 0xffff;
    FREE_BLK_TBL[tmpFreePst].BlkEraseCnt = 0xffff;

	DBUG_MSG("[DBUG] BMM_GetFreeBlk, pos: %x\n", LAST_FREE_BLK_PST);

    return 0;
}
Ejemplo n.º 7
0
/*
************************************************************************************************************************
*                       GET THE PARAMETER OF DATA BLOCK
*
*Description: Get the parameter of the data block from the data block mapping table.
*
*Arguments  : nBlk      the number of the logical block whose data block need be got;
*             pDataBlk  the pointer to the super block parameter.
*
*Return     : get data block result;
*               = 0    get data block successful;
*               =-1    get data block failed.
************************************************************************************************************************
*/
__s32 BMM_GetDataBlk(__u32 nBlk, struct __SuperPhyBlkType_t *pDataBlk)
{
    if(nBlk > DATA_BLK_CNT_OF_ZONE)
    {
        MAPPING_ERR("[MAPPING_ERR] Logical block number(0x%x) is invalid when get data block!\n", nBlk);
        pDataBlk->BlkEraseCnt = 0xffff;
        pDataBlk->PhyBlkNum = 0xffff;
        return -1;
    }
    else
    {
        pDataBlk->BlkEraseCnt = DATA_BLK_TBL[nBlk].BlkEraseCnt;
        pDataBlk->PhyBlkNum = DATA_BLK_TBL[nBlk].PhyBlkNum;
        return 0;
    }
}
Ejemplo n.º 8
0
static __s32 _rebuild_page_map_tbl(__u32 nLogBlkPst)
{
    __s32 ret;
    __u16 TablePage;
    __u32 TableBlk;
    __u16 logicpagenum;
    //__u8  status;
    struct  __NandUserData_t  UserData[2];
    struct  __PhysicOpPara_t  param;

    MEMSET(PAGE_MAP_TBL,0xff, PAGE_CNT_OF_SUPER_BLK*sizeof(struct __PageMapTblItem_t));
    TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum;

    param.MDataPtr = LML_PROCESS_TBL_BUF;
    param.SDataPtr = (void *)&UserData;
    param.SectBitmap = 0x3;

    //PRINT("-----------------------rebuild page table for blk %x\n",TableBlk);

    for(TablePage = 0; TablePage < PAGE_CNT_OF_SUPER_BLK; TablePage++) {
        LML_CalculatePhyOpPar(&param, CUR_MAP_ZONE, TableBlk, TablePage);
        ret = LML_VirtualPageRead(&param);
        if (ret < 0) {
            MAPPING_ERR("rebuild logic block %x page map table : read err\n",LOG_BLK_TBL[nLogBlkPst].LogicBlkNum);
            return NAND_OP_FALSE;
        }

        //status = UserData[0].PageStatus;
        logicpagenum = UserData[0].LogicPageNum;

        //if(((!TablePage || (status == 0x55))) && (logicpagenum != 0xffff) && (logicpagenum < PAGE_CNT_OF_SUPER_BLK)) /*legal page*/
        if((logicpagenum != 0xffff) && (logicpagenum < PAGE_CNT_OF_SUPER_BLK)) /*legal page*/
        {
            PAGE_MAP_TBL[logicpagenum].PhyPageNum = TablePage; /*l2p:logical to physical*/
        }
    }

    PAGE_MAP_CACHE->DirtyFlag = 1;
    BMM_SetDirtyFlag();

    return NAND_OP_TRUE;
}
Ejemplo n.º 9
0
static __s32 _write_back_all_page_map_tbl(__u8 nZone)
{
    __u32 i;

    for(i=0; i<PAGE_MAP_TBL_CACHE_CNT; i++)
    {
        if((PAGE_MAP_CACHE_POOL->PageMapTblCachePool[i].ZoneNum == nZone)\
                && (PAGE_MAP_CACHE_POOL->PageMapTblCachePool[i].DirtyFlag == 1))
        {
            PAGE_MAP_CACHE = &(PAGE_MAP_CACHE_POOL->PageMapTblCachePool[i]);
            if (NAND_OP_TRUE != _write_back_page_map_tbl(PAGE_MAP_CACHE->LogBlkPst))
            {
                MAPPING_ERR("write back all page tbl : write page map table err \n");
                return NAND_OP_FALSE;
            }
            PAGE_MAP_CACHE->DirtyFlag = 0;
        }
    }

    return NAND_OP_TRUE;
}
Ejemplo n.º 10
0
/*
************************************************************************************************************************
*                   SET LOG BLOCK PARAMETER IN THE LOG BLOCK TABLE
*
*Description: Set the parameter for log block in the log block table.
*
*Arguments  : nLogicBlk     the logical block number which the log block is belonged to;
*             pLogBlk       the pointer to log block parameter which need be set to log block table.
*
*Return     : set log block result;
*               = 0     set log block successful;
*               < 0     set log block failed.
************************************************************************************************************************
*/
__s32 BMM_SetLogBlk(__u32 nLogicBlk, struct __LogBlkType_t *pLogBlk)
{
    __s32   tmpLogPst;

    tmpLogPst = _GetLogBlkPst(nLogicBlk);
    if(tmpLogPst < 0)
    {
        tmpLogPst = _GetLogBlkPst(0xffff);
        if(tmpLogPst < 0)
        {
            MAPPING_ERR("[MAPPING_ERR] Set log block table item failed!\n");

            return -1;
        }
    }

    //set the log block item in the log block table
    LOG_BLK_TBL[tmpLogPst] = *pLogBlk;

    return 0;
}
Ejemplo n.º 11
0
/*
************************************************************************************************************************
*                       GET LOG PAGE PARAMETER
*
*Description: Get a page from log block for read or write.
*
*Arguments  : nBlk      the logical block number of the log block;
*             nPage     the number of the logical page, which page need log page;
*             nMode     the type of get log page, 'r' or 'w', others is invalid.
*
*Return     : the number of the log page;
*               != 0xffff   get log page successful, return page number;
*                = 0xffff   get log page failed.
*
*Note       : Scan the log block table to try to get the log block.
*             when the get type is 'r', if the log block is exsit and the logical
*             page contain a log page, return the number of the log page, else,
*             return 0xffff;
*             when the get type is 'w', if the log block is not exsit, need create
*             log block, then, if get log page failed, need merge the log block, and
*             try to get log page again, this mode should return a value page number
*             except there is no enough valid blocks.
************************************************************************************************************************
*/
__u32 PMM_GetLogPage(__u32 nBlk, __u32 nPage, __u8 nMode)
{
    __s32   result, tmpLogPst;
    __u16   tmpPage, PhyPageNum;

    if(nMode == 'r')
    {
        tmpLogPst = _GetLogBlkPst(nBlk);
        if(tmpLogPst < 0)
        {
            //get log page by read mode, there is no log block, return invalid value
            return INVALID_PAGE_NUM;
        }

        //need swap the page mapping table to ram which is accessing currently
        result = PMM_SwitchMapTbl(tmpLogPst);
        if(result < 0)
        {
            MAPPING_ERR("[MAPPING_ERR] Switch page mapping table failed when get log page! Err:0x%x\n", result);
            return INVALID_PAGE_NUM;
        }

        _CalLogAccessCnt(tmpLogPst);

		//get active read log block index
		if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[tmpLogPst].LogBlkType == LSB_TYPE))
		{
			PhyPageNum = PAGE_MAP_TBL[nPage].PhyPageNum;
			if((PhyPageNum&(0x1<<15))&&(PhyPageNum!= 0xffff))
			{
				LOG_BLK_TBL[tmpLogPst].ReadBlkIndex = 1;
				PhyPageNum &= 0x7fff;
			}
			else
				LOG_BLK_TBL[tmpLogPst].ReadBlkIndex = 0;

	        return (PhyPageNum|LOG_BLK_TBL[tmpLogPst].ReadBlkIndex<<16);
		}
		else
		{
			LOG_BLK_TBL[tmpLogPst].ReadBlkIndex = 0;
			return PAGE_MAP_TBL[nPage].PhyPageNum;
		}
    }

    result = _GetLogPageForWrite(nBlk, nPage, &tmpPage, (__u32 *)&tmpLogPst);
    if(result < 0)
    {
        //get log page for write failed
        MAPPING_ERR("[MAPPING_ERR] Get log page for write failed!\n");
        return INVALID_PAGE_NUM;
    }

    //check if the log page is valid
    if(!(tmpPage < PAGE_CNT_OF_SUPER_BLK))
    {
        //the log page is not invalid, need to merge the log block, and get again
        result = LML_MergeLogBlk(SPECIAL_MERGE_MODE, nBlk);
        if(result < 0)
        {
            //merge log block failed, report error
            MAPPING_ERR("[MAPPING_ERR] Merge log block failed when get log page! Err:0x%x\n", result);
            return INVALID_PAGE_NUM;
        }

        //try to get log page for write again
        result = _GetLogPageForWrite(nBlk, nPage, &tmpPage, (__u32 *)&tmpLogPst);
        if(result < 0)
        {
            //get log page for write failed
            MAPPING_ERR("[MAPPING_ERR] Get log page for write failed!\n");
            return INVALID_PAGE_NUM;
        }
    }

    //check if the log page is valid
    if(!(tmpPage < PAGE_CNT_OF_SUPER_BLK))
    {
        //get log page for write failed
        MAPPING_ERR("[MAPPING_ERR] Get log page for write failed!\n");
        return INVALID_PAGE_NUM;
    }
    else
    {
        LOG_BLK_TBL[tmpLogPst].LastUsedPage = tmpPage;
    }

    //update the page mapping table item
    if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[tmpLogPst].LogBlkType == LSB_TYPE))
    {
    	DBUG_MSG("[DBUG_MSG] PMM_GetLogPage 2, select bak log block\n");
    	PAGE_MAP_TBL[nPage].PhyPageNum = tmpPage|((LOG_BLK_TBL[tmpLogPst].WriteBlkIndex&0x1)<<15);
    }
	else
		PAGE_MAP_TBL[nPage].PhyPageNum = tmpPage;

    //set the flag that mark need update the page mapping table
    PAGE_MAP_CACHE->DirtyFlag = 1;

    _CalLogAccessCnt(tmpLogPst);

	if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[tmpLogPst].LogBlkType == LSB_TYPE))
		return (tmpPage|LOG_BLK_TBL[tmpLogPst].WriteBlkIndex<<16);
	else
    	return tmpPage;
}
Ejemplo n.º 12
0
/*
************************************************************************************************************************
*                       GET LOG PAGE FOR WRITE
*
*Description: Get a log page for write.
*
*Arguments  : nBlk      the logical block number of the log block;
*             nPage     the number of the logical page, which page need log page;
*             pLogPage  the pointer to the log page number, for return value;
*             pLogPst   the pointer to the position of the log block in the log block table.
*
*Return     : get log page result.
*               = 0     get log page for write successful;
*               =-1     get log page for write failed.
************************************************************************************************************************
*/
static __s32 _GetLogPageForWrite(__u32 nBlk, __u32 nPage, __u16 *pLogPage, __u32 *pLogPst)
{
    __s32   result, tmpLogPst;
    __u16   tmpPage, tempBank;
    struct __PhysicOpPara_t tmpPhyPage;
    struct __NandUserData_t tmpSpare[2];

    tmpLogPst = _GetLogBlkPst(nBlk);
    if(tmpLogPst < 0)
    {
        //get log block position failed, there is no such log block, need create a new one
        result = _CreateNewLogBlk(nBlk, (__u32 *)&tmpLogPst);
        if(result < 0)
        {
            MAPPING_ERR("[MAPPING_ERR] Create new log block failed!\n");
            return -1;
        }
    }

    //need swap the page mapping table to ram which is accessing currently
    result = PMM_SwitchMapTbl(tmpLogPst);
    if(result < 0)
    {
        MAPPING_ERR("[MAPPING_ERR] Switch page mapping table failed when get log page! Err:0x%x\n", result);
        return -1;
    }

    //need get log page by write mode,
    tmpPage = LOG_BLK_TBL[tmpLogPst].LastUsedPage;

	if(SUPPORT_ALIGN_NAND_BNK)
    {
        if(tmpPage == 0xffff)
        {
            //the log block is empty, need get log page in the first page line
            tmpPage = nPage % INTERLEAVE_BANK_CNT;
        }
        else
        {
            //need bank align, the log page and the data page should be in the same bank
            if((nPage % INTERLEAVE_BANK_CNT) > (tmpPage % INTERLEAVE_BANK_CNT))
            {
                //get the log page in the same page line with last used page
                tmpPage = tmpPage + ((nPage % INTERLEAVE_BANK_CNT) - (tmpPage % INTERLEAVE_BANK_CNT));
            }
            else
            {
                //need get the log page in the next page line of the last used page
                tmpPage = tmpPage + (nPage % INTERLEAVE_BANK_CNT) + (INTERLEAVE_BANK_CNT - (tmpPage % INTERLEAVE_BANK_CNT));
            }
        }
    }
    else
    {

        //use the page which is the next of the last used page
        tmpPage = tmpPage + 1;
    }

	if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[tmpLogPst].LogBlkType == LSB_TYPE))
	{
		DBUG_MSG("[DBUG_MSG] _GetLogPageForWrite, select bak log block\n");

		if(SUPPORT_ALIGN_NAND_BNK)
		{
			tempBank = tmpPage%INTERLEAVE_BANK_CNT;
			tmpPage =PMM_CalNextLogPage(tmpPage);
			while(tmpPage%INTERLEAVE_BANK_CNT != tempBank)
			{
				tmpPage++;
				tmpPage =PMM_CalNextLogPage(tmpPage);
				if(tmpPage>=PAGE_CNT_OF_SUPER_BLK)
					break;
			}
		}
		else
		{
			tmpPage =PMM_CalNextLogPage(tmpPage);
		}

		if((tmpPage >= PAGE_CNT_OF_SUPER_BLK)&&(LOG_BLK_TBL[tmpLogPst].WriteBlkIndex == 0))
		{
			LOG_BLK_TBL[tmpLogPst].WriteBlkIndex = 1;
			tmpPage = tmpPage - PAGE_CNT_OF_SUPER_BLK;
		}
		if(LOG_BLK_TBL[tmpLogPst].WriteBlkIndex == 1)
			DBUG_MSG("[DBUG_MSG] _GetLogPageForWrite, log block index: %x, log block num: %x, page: %x \n", LOG_BLK_TBL[tmpLogPst].WriteBlkIndex, LOG_BLK_TBL[tmpLogPst].PhyBlk1.PhyBlkNum, tmpPage);
		else
			DBUG_MSG("[DBUG_MSG] _GetLogPageForWrite, log block index: %x, log block num: %x, page: %x \n", LOG_BLK_TBL[tmpLogPst].WriteBlkIndex, LOG_BLK_TBL[tmpLogPst].PhyBlk.PhyBlkNum, tmpPage);
	}

__CHECK_WRITE_LOGICAL_INFO_OF_LOG_BLOCK:
    //check if need write the logical information in the first page of the log block
    if((LOG_BLK_TBL[tmpLogPst].LastUsedPage == 0xffff) && (tmpPage != 0))
    {
        //get logical information from the data block
        LML_CalculatePhyOpPar(&tmpPhyPage, CUR_MAP_ZONE, DATA_BLK_TBL[nBlk].PhyBlkNum, 0);
        tmpPhyPage.SectBitmap = 0x03;
        tmpPhyPage.MDataPtr = LML_TEMP_BUF;
        tmpPhyPage.SDataPtr = (void *)tmpSpare;
        LML_VirtualPageRead(&tmpPhyPage);

		//if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[tmpLogPst].LogBlkType == LSB_TYPE))
		//{
		//	PRINT("_GetLogPageForWrite log %x page 0, data age: %x, log age: %x\n", LOG_BLK_TBL[tmpLogPst].WriteBlkIndex, tmpSpare[0].PageStatus, tmpSpare[0].PageStatus+1);
		//}

        tmpSpare[0].BadBlkFlag = 0xff;
        tmpSpare[1].BadBlkFlag = 0xff;
        tmpSpare[0].LogicInfo = ((CUR_MAP_ZONE % ZONE_CNT_OF_DIE)<<10) | nBlk;
        tmpSpare[1].LogicInfo = ((CUR_MAP_ZONE % ZONE_CNT_OF_DIE)<<10) | nBlk;
        tmpSpare[0].LogicPageNum = 0xffff;
        tmpSpare[1].LogicPageNum = 0xffff;
        tmpSpare[0].PageStatus =  tmpSpare[0].PageStatus + 1;
        tmpSpare[1].PageStatus = tmpSpare[0].PageStatus;
		if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[tmpLogPst].LogBlkType == LSB_TYPE))
		{
			tmpSpare[0].LogType = LSB_TYPE|(LOG_BLK_TBL[tmpLogPst].WriteBlkIndex<<4);
			tmpSpare[1].LogType = LSB_TYPE|(LOG_BLK_TBL[tmpLogPst].WriteBlkIndex<<4);
		}
		else
		{
			tmpSpare[0].LogType = 0xff;
			tmpSpare[1].LogType = 0xff;
		}

       //write the logical information to the spare area of the data block
       	if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[tmpLogPst].LogBlkType == LSB_TYPE))
       	{
       		DBUG_MSG("[DBUG] _GetLogPageForWrite, write the logical information to log page 0, writeblkindex: %x\n", LOG_BLK_TBL[tmpLogPst].WriteBlkIndex);
       		LML_CalculatePhyOpPar(&tmpPhyPage, CUR_MAP_ZONE, LOG_BLK_TBL[tmpLogPst].PhyBlk.PhyBlkNum, 0);
       	}
		else
        	LML_CalculatePhyOpPar(&tmpPhyPage, CUR_MAP_ZONE, LOG_BLK_TBL[tmpLogPst].PhyBlk.PhyBlkNum, 0);

		tmpPhyPage.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE;
        result = LML_VirtualPageWrite(&tmpPhyPage);
        if(result < 0)
        {
            LOGICCTL_ERR("[MAPPING_ERR] Physical write module failed when write logical information, Err:0x%x!\n", result);
            return -1;
        }

        result = PHY_SynchBank(tmpPhyPage.BankNum, SYNC_CHIP_MODE);
        if(result < 0)
        {
            //the last write operation on current bank is failed, the block is bad, need proccess it
            LOGICCTL_DBG("[LOGICCTL_DBG] Find a bad block when write logical page! bank:0x%x, block:0x%x, page:0x%x\n",
                    tmpPhyPage.BankNum, tmpPhyPage.BlkNum, tmpPhyPage.PageNum);

            //process the bad block
            result = LML_BadBlkManage(&LOG_BLK_TBL[tmpLogPst].PhyBlk, CUR_MAP_ZONE, 0, &LOG_BLK_TBL[tmpLogPst].PhyBlk);
            if(result < 0)
            {
                LOGICCTL_ERR("[MAPPING_ERR] Bad block process failed when get log page for write, Err:0x%x!\n", result);
                return -1;
            }

           goto __CHECK_WRITE_LOGICAL_INFO_OF_LOG_BLOCK;
        }
    }

    //set the log page number for return
    *pLogPage = tmpPage;
    *pLogPst = tmpLogPst;

    return 0;
}
Ejemplo n.º 13
0
/*
************************************************************************************************************************
*                       CREATE A NEW LOG BLOCK
*
*Description: Create a new log block.
*
*Arguments  : nBlk      the logical block number of the log block;
*             pLogPst   the pointer to the log block position in the log block table.
*
*Return     : create new log block result.
*               = 0     create new log block successful;
*               =-1     create new log block failed.
************************************************************************************************************************
*/
static __s32 _CreateNewLogBlk(__u32 nBlk, __u32 *pLogPst)
{
    __s32   i, result, LogBlkType,tmpPst=-1;
    __u16   tmpLogAccessAge = 0xffff;
    struct __SuperPhyBlkType_t tmpFreeBlk, tmpFreeBlk1;
    struct __PhysicOpPara_t tmpPhyPage;
    struct __NandUserData_t tmpSpare[2];

    #if CFG_SUPPORT_WEAR_LEVELLING

    //check if need do wear-levelling
    if(BLK_ERASE_CNTER >= WEAR_LEVELLING_FREQUENCY)
    {
        LML_WearLevelling();
    }

    #endif
    //try to search an empty item in the log block table
    for(i=0; i<LOG_BLK_CNT_OF_ZONE; i++)
    {
        if(LOG_BLK_TBL[i].LogicBlkNum == 0xffff)
        {
            //find a empty item
            tmpPst = i;
            break;
        }
    }

    //there is no empty item in the log block table, need merge a log block
    if(tmpPst == -1)
    {
        //check if there is some full log block
        for(i=0; i<LOG_BLK_CNT_OF_ZONE; i++)
        {
            if(LOG_BLK_TBL[i].LastUsedPage == PAGE_CNT_OF_SUPER_BLK-1)
            {
                tmpPst = i;
                break;
            }
        }

        if(tmpPst == -1)
        {
            //there is no full log block, look for an oldest log block to merge
            for(i=0; i<LOG_BLK_CNT_OF_ZONE; i++)
            {
                if(LOG_ACCESS_AGE[i] < tmpLogAccessAge)
                {
                    tmpLogAccessAge = LOG_ACCESS_AGE[i];
                    tmpPst = i;
                }
            }
        }

        //switch the page mapping table for merge the log block
        result = PMM_SwitchMapTbl(tmpPst);
        if(result < 0)
        {
            MAPPING_ERR("[MAPPING_ERR] Switch page mapping table failed when create new log block! Err:0x%x\n", result);
            return -1;
        }

        //merge the log block with normal type, to make an empty item
        result = LML_MergeLogBlk(NORMAL_MERGE_MODE, LOG_BLK_TBL[tmpPst].LogicBlkNum);
        if(result < 0)
        {
            //merge log block failed, report error
            MAPPING_ERR("[MAPPING_ERR] Merge log block failed when create new log block! Err:0x%x\n", result);
            return -1;
        }
    }

	LogBlkType = BMM_CalLogBlkType(nBlk);

	if((SUPPORT_LOG_BLOCK_MANAGE)&&(LogBlkType == LSB_TYPE))
	{
		DBUG_MSG("[DBUG_MSG] _CreateNewLogBlk, select bak log block\n");

		//get a free block to create a new log block
	    result = BMM_GetFreeBlk(LOWEST_EC_TYPE, &tmpFreeBlk);
	    if(result < 0)
	    {
	        MAPPING_ERR("[MAPPING_ERR] Get free block failed when create new log block!\n");
	        return -1;
	    }

		//get a free block to create a new log block
	    result = BMM_GetFreeBlk(LOWEST_EC_TYPE, &tmpFreeBlk1);
	    if(result < 0)
	    {
	        MAPPING_ERR("[MAPPING_ERR] Get free block failed when create new log block!\n");
	        return -1;
	    }

		//DBUG_INF("[DBUG] _CreateNewLogBlk, logic: %x, logblk0: %x, logblk1:%x \n", nBlk, tmpFreeBlk.PhyBlkNum, tmpFreeBlk1.PhyBlkNum);

	    //make a new log item in the log block table
	    LOG_BLK_TBL[tmpPst].LogicBlkNum = nBlk;
	    LOG_BLK_TBL[tmpPst].LastUsedPage = 0xffff;
		LOG_BLK_TBL[tmpPst].LogBlkType = LogBlkType;
		LOG_BLK_TBL[tmpPst].WriteBlkIndex = 0;
		LOG_BLK_TBL[tmpPst].ReadBlkIndex = 0;
	    LOG_BLK_TBL[tmpPst].PhyBlk = tmpFreeBlk;
		LOG_BLK_TBL[tmpPst].PhyBlk1 = tmpFreeBlk1;
	    //set the return vaule of the log position
	    *pLogPst = tmpPst;
	}
	else
	{
	    //get a free block to create a new log block
	    result = BMM_GetFreeBlk(LOWEST_EC_TYPE, &tmpFreeBlk);
	    if(result < 0)
	    {
	        MAPPING_ERR("[MAPPING_ERR] Get free block failed when create new log block!\n");
	        return -1;
	    }

	    //make a new log item in the log block table
	    LOG_BLK_TBL[tmpPst].LogicBlkNum = nBlk;
	    LOG_BLK_TBL[tmpPst].LastUsedPage = 0xffff;
		LOG_BLK_TBL[tmpPst].LogBlkType = LogBlkType;
		LOG_BLK_TBL[tmpPst].WriteBlkIndex = 0;
		LOG_BLK_TBL[tmpPst].ReadBlkIndex = 0;
	    LOG_BLK_TBL[tmpPst].PhyBlk = tmpFreeBlk;
	    //set the return vaule of the log position
	    *pLogPst = tmpPst;
	}


__CHECK_LOGICAL_INFO_OF_DATA_BLOCK:
    //check if the data block is an empty block, if so, need update the logic information in the spare area
    LML_CalculatePhyOpPar(&tmpPhyPage, CUR_MAP_ZONE, DATA_BLK_TBL[nBlk].PhyBlkNum, 0);
    tmpPhyPage.SectBitmap = 0x03;
    tmpPhyPage.MDataPtr = LML_TEMP_BUF;
    tmpPhyPage.SDataPtr = (void *)tmpSpare;
    LML_VirtualPageRead(&tmpPhyPage);

    if(tmpSpare[0].LogicInfo == 0xffff)
    {
        tmpSpare[0].BadBlkFlag = 0xff;
        tmpSpare[1].BadBlkFlag = 0xff;
        tmpSpare[0].LogicInfo = ((CUR_MAP_ZONE % ZONE_CNT_OF_DIE)<<10) | nBlk;
        tmpSpare[1].LogicInfo = ((CUR_MAP_ZONE % ZONE_CNT_OF_DIE)<<10) | nBlk;
        tmpSpare[0].LogicPageNum = 0xffff;
        tmpSpare[1].LogicPageNum = 0xffff;
        tmpSpare[0].PageStatus = 0xff;
        tmpSpare[1].PageStatus = 0xff;

        //write the logical information to the spare area of the data block
        tmpPhyPage.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE;
        result = LML_VirtualPageWrite(&tmpPhyPage);
        if(result < 0)
        {
            LOGICCTL_ERR("[MAPPING_ERR] Physical write module failed when write logical information, Err:0x%x!\n", result);
            return -1;
        }

        result = PHY_SynchBank(tmpPhyPage.BankNum, SYNC_CHIP_MODE);
        if(result < 0)
        {
            //the last write operation on current bank is failed, the block is bad, need proccess it
            LOGICCTL_DBG("[LOGICCTL_DBG] Find a bad block when write logical page! bank:0x%x, block:0x%x, page:0x%x\n",
                    tmpPhyPage.BankNum, tmpPhyPage.BlkNum, tmpPhyPage.PageNum);

            //process the bad block
            result = LML_BadBlkManage(&DATA_BLK_TBL[nBlk], CUR_MAP_ZONE, 0, &tmpFreeBlk);
            if(result < 0)
            {
                LOGICCTL_ERR("[MAPPING_ERR] Bad block process failed when create new log block, Err:0x%x!\n", result);
                return -1;
            }
            DATA_BLK_TBL[nBlk] = tmpFreeBlk;

            goto __CHECK_LOGICAL_INFO_OF_DATA_BLOCK;
        }
    }

    return 0;
}
Ejemplo n.º 14
0
/*
************************************************************************************************************************
*                       GET PARAMETER OF LOG BLOCK
*
*Description: Get parameter of log block.
*
*Arguments  : nLogicBlk     the logical block number which the log block is belonged to;
*             pLogBlk       the pointer to the log block item for return;
*
*Return     : get log block result;
*               = 0     get log block successful;
*               =-1     get log block failed.
*
*Note       : Scan the log block table which is accessing in the buffer currently,
*             to look for the log block, if the log block is exsit, return 0,
*             else, return -1
************************************************************************************************************************
*/
__s32 BMM_GetLogBlk(__u32 nLogicBlk, struct __LogBlkType_t *pLogBlk)
{
    __s32   tmpLogPst, result;
	struct __SuperPhyBlkType_t tmpFreeBlk1;
	__u32   LogBlkType;

    tmpLogPst = _GetLogBlkPst(nLogicBlk);

    if(tmpLogPst < 0)
    {
        //if the logic block number is invalid, report error
        if(nLogicBlk > DATA_BLK_CNT_OF_ZONE)
        {
            MAPPING_ERR("[MAPPING_ERR] Logical block number(0x%x) is invalid when get log block!\n", nLogicBlk);
        }

        if(pLogBlk != NULL)
        {
            pLogBlk->LogicBlkNum = 0xffff;
            pLogBlk->LastUsedPage = 0xffff;
			pLogBlk->LogBlkType = 0;
			pLogBlk->ReadBlkIndex = 0;
			pLogBlk->WriteBlkIndex = 0;
            pLogBlk->PhyBlk.PhyBlkNum = 0xffff;
            pLogBlk->PhyBlk.BlkEraseCnt = 0xffff;
			pLogBlk->PhyBlk1.PhyBlkNum = 0xffff;
            pLogBlk->PhyBlk1.BlkEraseCnt = 0xffff;
			pLogBlk->PhyBlk2.PhyBlkNum = 0xffff;
            pLogBlk->PhyBlk2.BlkEraseCnt = 0xffff;
        }

        return -1;
    }
    else
    {
        if(pLogBlk != NULL)
        {

			if(SUPPORT_LOG_BLOCK_MANAGE)
			{
				LogBlkType = BMM_CalLogBlkType(nLogicBlk);
				if(LOG_BLK_TBL[tmpLogPst].LogBlkType == 0xffff)
				{
					PRINT("[DBUG] find a log table item with valid log block type!\n");
					LOG_BLK_TBL[tmpLogPst].LogBlkType = 0;
					LOG_BLK_TBL[tmpLogPst].WriteBlkIndex = 0;
					LOG_BLK_TBL[tmpLogPst].ReadBlkIndex = 0;
				}

	        	//check log type for debug
				if(LOG_BLK_TBL[tmpLogPst].LogBlkType != LogBlkType)
				{
					PRINT("[DBUG] LogBlkTye mismatch: 0x%x, 0x%x\n",LOG_BLK_TBL[tmpLogPst].LogBlkType, LogBlkType);
					if((LOG_BLK_TBL[tmpLogPst].LogBlkType == NORMAL_TYPE)&&(LogBlkType == LSB_TYPE))
					{
						//get a free block to create a new log block
					    result = BMM_GetFreeBlk(LOWEST_EC_TYPE, &tmpFreeBlk1);
					    if(result < 0)
					    {
					        MAPPING_ERR("[MAPPING_ERR] Get free block failed when create new log block!\n");
					        return -1;
					    }

					    //make a new log item in the log block table
						LOG_BLK_TBL[tmpLogPst].LogBlkType = LogBlkType;
						LOG_BLK_TBL[tmpLogPst].WriteBlkIndex = 0;
						LOG_BLK_TBL[tmpLogPst].ReadBlkIndex = 0;
						LOG_BLK_TBL[tmpLogPst].PhyBlk1.PhyBlkNum = tmpFreeBlk1.PhyBlkNum;
						LOG_BLK_TBL[tmpLogPst].PhyBlk1.BlkEraseCnt = tmpFreeBlk1.BlkEraseCnt;

						*pLogBlk = LOG_BLK_TBL[tmpLogPst];

					}

				}
				else
				{
                	*pLogBlk = LOG_BLK_TBL[tmpLogPst];
				}
			}
			else
			{
            	*pLogBlk = LOG_BLK_TBL[tmpLogPst];
			}
        }
    }

    return 0;
}
Ejemplo n.º 15
0
static __s32 _write_back_page_map_tbl(__u32 nLogBlkPst)
{
    __u16 TablePage;
    __u32 TableBlk;
    struct  __NandUserData_t  UserData[2];
    struct  __PhysicOpPara_t  param, tmpPage0;
    struct  __SuperPhyBlkType_t BadBlk,NewBlk;
	__s32 result;


    /*check page poisition, merge if no free page*/
	if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[nLogBlkPst].LogBlkType == LSB_TYPE))
	{
		TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage + 1;
    	TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum;
		DBUG_MSG("[DBUG] _write_back_page_map_tbl, log block: %x, bak log block %x\n", LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum, LOG_BLK_TBL[nLogBlkPst].PhyBlk1.PhyBlkNum);
		DBUG_MSG("[DBUG] _write_back_page_map_tbl, select bak log block\n");
		TablePage = PMM_CalNextLogPage(TablePage);

		if((TablePage >= PAGE_CNT_OF_SUPER_BLK)&&(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex == 0))
		{
			DBUG_MSG("[DBUG] _write_back_page_map_tbl, change to log block 1, phyblock1: %x\n", LOG_BLK_TBL[nLogBlkPst].PhyBlk1.PhyBlkNum);

			LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex = 1;
			TablePage = TablePage - PAGE_CNT_OF_SUPER_BLK;
		}

		if(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex == 1)
    		TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk1.PhyBlkNum;
		else
			TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum;

		if (TablePage >= PAGE_CNT_OF_SUPER_BLK){
			//DBUG_INF("[DBUG] _write_back_page_map_tbl, log block full, need merge\n");
	        /*block id full,need merge*/
	        if (LML_MergeLogBlk(SPECIAL_MERGE_MODE,LOG_BLK_TBL[nLogBlkPst].LogicBlkNum)){
	            MAPPING_ERR("write back page tbl : merge err\n");
	            return NAND_OP_FALSE;
	        }
			DBUG_MSG("[DBUG] _write_back_page_map_tbl, log block merge end\n");
	        if (PAGE_MAP_CACHE->ZoneNum != 0xff){
	            /*move merge*/
	            TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage + 1;
	            TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum;
				TablePage = PMM_CalNextLogPage(TablePage);
				//DBUG_INF("[DBUG] _write_back_page_map_tbl, after move merge, table block: %x, table page %x\n", TableBlk, TablePage);
	        }
	        else
	            return NAND_OP_TRUE;
	    }
	}
	else
	{
		TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage + 1;
    	TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum;

		if (TablePage == PAGE_CNT_OF_SUPER_BLK){
	        /*block id full,need merge*/
	        if (LML_MergeLogBlk(SPECIAL_MERGE_MODE,LOG_BLK_TBL[nLogBlkPst].LogicBlkNum)){
	            MAPPING_ERR("write back page tbl : merge err\n");
	            return NAND_OP_FALSE;
	        }

	        if (PAGE_MAP_CACHE->ZoneNum != 0xff){
	            /*move merge*/
	            TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage + 1;
	            TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum;
	        }
	        else
	            return NAND_OP_TRUE;
	    }
	}



rewrite:
//PRINT("-------------------write back page tbl for blk %x\n",TableBlk);
    /*write page map table*/
	if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[nLogBlkPst].LogBlkType == LSB_TYPE))
	{
		if((TablePage== 0)&&(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex == 1))
	    {
			MEMSET((void *)(&UserData[0]),0xff,sizeof(struct __NandUserData_t) * 2);
	        //log page is the page0 of the logblk1, should copy page0 of logblock0, and skip the page
	        LML_CalculatePhyOpPar(&tmpPage0, CUR_MAP_ZONE, LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum, 0);
	        tmpPage0.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE;
	        tmpPage0.MDataPtr = LML_TEMP_BUF;
	        tmpPage0.SDataPtr = (void *)UserData;
	        result = LML_VirtualPageRead(&tmpPage0);
	        if(result < 0)
	        {
	            LOGICCTL_ERR("[LOGICCTL_ERR] Get log age of data block failed when write logical page, Err:0x%x!\n", result);
	            return -ERR_PHYSIC;
	        }

			//log page is the page0 of the logblk1, should skip the page
			UserData[0].LogType = LSB_TYPE|(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex<<4);
			UserData[1].LogType = LSB_TYPE|(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex<<4);

	        LML_CalculatePhyOpPar(&tmpPage0, CUR_MAP_ZONE, LOG_BLK_TBL[nLogBlkPst].PhyBlk1.PhyBlkNum, 0);
	        tmpPage0.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE;
	        tmpPage0.MDataPtr = LML_TEMP_BUF;
	        tmpPage0.SDataPtr = (void *)UserData;
	        result = LML_VirtualPageWrite(&tmpPage0);

			TablePage++;

	    }
	}

	MEMSET((void *)(&UserData[0]),0xff,sizeof(struct __NandUserData_t) * 2);
    UserData[0].PageStatus = 0xaa;
	if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[nLogBlkPst].LogBlkType == LSB_TYPE))
	{
		UserData[0].LogType = LSB_TYPE|(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex<<4);
		UserData[1].LogType = LSB_TYPE|(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex<<4);
	}
	else
	{
		UserData[0].LogType = 0xff;
		UserData[1].LogType = 0xff;
	}

	//if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[nLogBlkPst].LogBlkType == LSB_TYPE)&&(TablePage== 0))
	//{
	//	DBUG_INF("[DBUG] _write_back_page_map_tbl in page0, TablePage: %x, TableBlk: %x\n", TablePage, TableBlk);
	//	DBUG_INF("[DBUG] _write_back_page_map_tbl in page0, logicNum: %x, log0: %x, log1: %x\n", LOG_BLK_TBL[nLogBlkPst].LogicBlkNum,LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum, LOG_BLK_TBL[nLogBlkPst].PhyBlk1.PhyBlkNum);
	//	DBUG_INF("[DBUG] _write_back_page_map_tbl in page0, logicinfo: %x, logicpage: %x\n", UserData[0].LogicInfo, UserData[0].LogicPageNum);
	//	DBUG_INF("[DBUG] _write_back_page_map_tbl in page0, logtype: %x, pagestatus: %x\n", UserData[0].LogType, UserData[0].PageStatus);
	//}

    MEMSET(LML_PROCESS_TBL_BUF,0xff,SECTOR_CNT_OF_SUPER_PAGE * SECTOR_SIZE);

	if(PAGE_CNT_OF_SUPER_BLK >= 512)
	{
		__u32 page;

		for(page = 0; page < PAGE_CNT_OF_SUPER_BLK; page++)
			*((__u16 *)LML_PROCESS_TBL_BUF + page) = PAGE_MAP_TBL[page].PhyPageNum;

		((__u32 *)LML_PROCESS_TBL_BUF)[511] = \
        	_GetTblCheckSum((__u32 *)LML_PROCESS_TBL_BUF, PAGE_CNT_OF_SUPER_BLK*2/(sizeof (__u32)));
	}

	else
	{
		MEMCPY(LML_PROCESS_TBL_BUF, PAGE_MAP_TBL,PAGE_CNT_OF_SUPER_BLK*sizeof(struct __PageMapTblItem_t));
    	((__u32 *)LML_PROCESS_TBL_BUF)[511] = \
        	_GetTblCheckSum((__u32 *)LML_PROCESS_TBL_BUF, PAGE_CNT_OF_SUPER_BLK*sizeof(struct __PageMapTblItem_t)/(sizeof (__u32)));
	}

    param.MDataPtr = LML_PROCESS_TBL_BUF;
    param.SDataPtr = (void *)&UserData;
    param.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE;

//rewrite:
    LML_CalculatePhyOpPar(&param, CUR_MAP_ZONE, TableBlk, TablePage);
    LML_VirtualPageWrite(&param);

    if (NAND_OP_TRUE != PHY_SynchBank(param.BankNum, SYNC_CHIP_MODE)){
        BadBlk.PhyBlkNum = TableBlk;
        if (NAND_OP_TRUE != LML_BadBlkManage(&BadBlk,CUR_MAP_ZONE,TablePage,&NewBlk)){
            MAPPING_ERR("write page map table : bad block mange err after write\n");
            return NAND_OP_FALSE;
        }
        TableBlk = NewBlk.PhyBlkNum;
        LOG_BLK_TBL[nLogBlkPst].PhyBlk = NewBlk;
        goto rewrite;
    }

    LOG_BLK_TBL[nLogBlkPst].LastUsedPage = TablePage;
    PAGE_MAP_CACHE->ZoneNum = 0xff;
    PAGE_MAP_CACHE->LogBlkPst = 0xff;

	if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[nLogBlkPst].LogBlkType == LSB_TYPE))
		DBUG_MSG("[DBUG] _write_back_page_map_tbl end, lastusedpage: %x, write_index: %x\n", LOG_BLK_TBL[nLogBlkPst].LastUsedPage, LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex);

    return NAND_OP_TRUE;

}
Ejemplo n.º 16
0
__s32 BMM_RleaseLogBlock(__u32 log_level)
{
	__u32 tmpZoneNum, ZoneCnt, tmpLogPos;
	__s32 result, release_logblk_cnt;
	__s32 tmpPst=-1, i, ValidLogblkCnt = 0;
    __u16 tmpLogAccessAge = 0xffff;

	 //check if need to release log block
	ValidLogblkCnt = 0;
    for(i=0; i<LOG_BLK_CNT_OF_ZONE; i++)
    {
        if(LOG_BLK_TBL[i].LogicBlkNum != 0xffff)
        {
            ValidLogblkCnt++;
        }
    }

	//valid log block if less than log_level, no need to release log block
	if(ValidLogblkCnt<=log_level)
        return 0;

	//PRINT("BMM_RleaseLogBlock\n");
	BMM_SetDirtyFlag();

	 //check if there is some full log block
    for(i=0; i<LOG_BLK_CNT_OF_ZONE; i++)
    {
        if(LOG_BLK_TBL[i].LastUsedPage == PAGE_CNT_OF_SUPER_BLK-1)
        {
            tmpPst = i;
            break;
        }
    }

    if(tmpPst == -1)
    {
        //there is no full log block, look for an oldest log block to merge
        for(i=0; i<LOG_BLK_CNT_OF_ZONE; i++)
        {
            if((LOG_ACCESS_AGE[i] < tmpLogAccessAge)&&(LOG_BLK_TBL[i].LogicBlkNum != 0xffff))
            {
                tmpLogAccessAge = LOG_ACCESS_AGE[i];
                tmpPst = i;
            }
        }
    }

	if(tmpPst == -1)
		return -1;

    //switch the page mapping table for merge the log block
    result = PMM_SwitchMapTbl(tmpPst);
    if(result < 0)
    {
        MAPPING_ERR("[MAPPING_ERR] Switch page mapping table failed when create new log block! Err:0x%x\n", result);
        return -1;
    }

    //merge the log block with normal type, to make an empty item
    result = LML_MergeLogBlk(NORMAL_MERGE_MODE, LOG_BLK_TBL[tmpPst].LogicBlkNum);
    if(result < 0)
    {
        //merge log block failed, report error
        MAPPING_ERR("[MAPPING_ERR] Merge log block failed when create new log block! Err:0x%x\n", result);
        return -1;
    }

	return (ValidLogblkCnt-1);

}
Ejemplo n.º 17
0
/* fetch block map table from flash */
static __s32 _read_block_map_tbl(__u8 nZone)
{
    __s32 TablePage;
    __u32 TableBlk;
    struct  __PhysicOpPara_t  param;

    /*set table block number and table page number*/
    TableBlk = NandDriverInfo.ZoneTblPstInfo[nZone].PhyBlkNum;
    TablePage = NandDriverInfo.ZoneTblPstInfo[nZone].TablePst;

    /*read data block and free block map tbl*/

    param.MDataPtr = LML_PROCESS_TBL_BUF;
    param.SDataPtr = NULL;
    param.SectBitmap = 0xf;
    LML_CalculatePhyOpPar(&param, nZone, TableBlk, TablePage);
    if(LML_VirtualPageRead(&param) < 0)
    {
        MAPPING_ERR("_read_block_map_tbl :read block map table0 err\n");
        return NAND_OP_FALSE;
    }

    MEMCPY(DATA_BLK_TBL,LML_PROCESS_TBL_BUF,2048);

    TablePage++;
    param.MDataPtr = LML_PROCESS_TBL_BUF;
    LML_CalculatePhyOpPar(&param, nZone, TableBlk, TablePage);
    if( LML_VirtualPageRead(&param) < 0)
    {
        MAPPING_ERR("_read_block_map_tbl : read block map table1 err\n");
        return NAND_OP_FALSE;
    }

    MEMCPY(&DATA_BLK_TBL[512],LML_PROCESS_TBL_BUF,2048);
    if(((__u32 *)DATA_BLK_TBL)[1023] != \
            _GetTblCheckSum((__u32 *)DATA_BLK_TBL,(DATA_BLK_CNT_OF_ZONE+FREE_BLK_CNT_OF_ZONE)))
    {
        MAPPING_ERR("_read_block_map_tbl : read data block map table checksum err\n");
        dump((void*)DATA_BLK_TBL,1024*4,4,8);
        return NAND_OP_FALSE;
    }

    /*read log block table*/
    TablePage++;
    param.MDataPtr = LML_PROCESS_TBL_BUF;
    LML_CalculatePhyOpPar(&param, nZone, TableBlk, TablePage);
    if ( LML_VirtualPageRead(&param) < 0) {
        MAPPING_ERR("_read_block_map_tbl : read block map table2 err\n");
        return NAND_OP_FALSE;
    }
    if (((__u32 *)LML_PROCESS_TBL_BUF)[511] != \
            _GetTblCheckSum((__u32 *)LML_PROCESS_TBL_BUF, LOG_BLK_CNT_OF_ZONE*sizeof(struct __LogBlkType_t)/sizeof(__u32)))
    {
        MAPPING_ERR("_read_block_map_tbl : read log block table checksum err\n");
        dump((void*)LML_PROCESS_TBL_BUF,512*8,2,8);
        return NAND_OP_FALSE;
    }
    MEMCPY(LOG_BLK_TBL,LML_PROCESS_TBL_BUF,LOG_BLK_CNT_OF_ZONE*sizeof(struct __LogBlkType_t));

    return NAND_OP_TRUE;
}
Ejemplo n.º 18
0
/*write block map table to flash*/
static __s32 _write_back_block_map_tbl(__u8 nZone)
{
    __s32 TablePage;
    __u32 TableBlk;
    struct  __NandUserData_t  UserData[2];
    struct  __PhysicOpPara_t  param;
    struct __SuperPhyBlkType_t BadBlk,NewBlk;

    /*write back all page map table within this zone*/
    if (NAND_OP_TRUE != _write_back_all_page_map_tbl(nZone)) {
        MAPPING_ERR("write back all page map tbl err\n");
        return NAND_OP_FALSE;
    }

    /*set table block number and table page number*/
    TableBlk = NandDriverInfo.ZoneTblPstInfo[nZone].PhyBlkNum;
    TablePage = NandDriverInfo.ZoneTblPstInfo[nZone].TablePst;
    if(TablePage >= PAGE_CNT_OF_SUPER_BLK - 4)
    {
        if(NAND_OP_TRUE != LML_VirtualBlkErase(nZone, TableBlk))
        {
            BadBlk.PhyBlkNum = TableBlk;

            if(NAND_OP_TRUE != LML_BadBlkManage(&BadBlk,CUR_MAP_ZONE,0,&NewBlk))
            {
                MAPPING_ERR("write back block tbl : bad block manage err erase data block\n");
                return NAND_OP_FALSE;
            }

            TableBlk = NewBlk.PhyBlkNum;
        }
        TablePage = -4;
    }

    TablePage += 4;

    //calculate checksum for data block table and free block table
    ((__u32 *)DATA_BLK_TBL)[1023] = \
                                    _GetTblCheckSum((__u32 *)DATA_BLK_TBL, (DATA_BLK_CNT_OF_ZONE + FREE_BLK_CNT_OF_ZONE));
    //clear full page data
    MEMSET(LML_PROCESS_TBL_BUF, 0xff, SECTOR_CNT_OF_SUPER_PAGE * SECTOR_SIZE);

rewrite:
    /*write back data block and free block map table*/
    MEMSET((void *)&UserData,0xff,sizeof(struct __NandUserData_t) * 2);
    MEMCPY(LML_PROCESS_TBL_BUF,DATA_BLK_TBL,2048);
    /*write page 0, need set spare info*/
    if (TablePage == 0)
    {
        UserData[0].LogicInfo = (1<<14) | ((nZone % ZONE_CNT_OF_DIE) << 10) | 0xaa ;
    }
    UserData[0].PageStatus = 0x55;
    param.MDataPtr = LML_PROCESS_TBL_BUF;
    param.SDataPtr = (void *)&UserData;
    param.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE;
    LML_CalculatePhyOpPar(&param, nZone, TableBlk, TablePage);
    LML_VirtualPageWrite(&param);
    if (NAND_OP_TRUE !=  PHY_SynchBank(param.BankNum, SYNC_CHIP_MODE)) {
        BadBlk.PhyBlkNum = TableBlk;
        if (NAND_OP_TRUE != LML_BadBlkManage(&BadBlk,nZone,0,&NewBlk)) {
            MAPPING_ERR("write blk map table : bad block mange err after write\n");
            return NAND_OP_FALSE;
        }
        TableBlk = NewBlk.PhyBlkNum;
        TablePage = 0;
        goto rewrite;
    }

    MEMCPY(LML_PROCESS_TBL_BUF, &DATA_BLK_TBL[512], 2048);
    TablePage ++;
    param.MDataPtr = LML_PROCESS_TBL_BUF;
    MEMSET((void *)&UserData,0xff,sizeof(struct __NandUserData_t) * 2);
    UserData[0].PageStatus = 0x55;
    LML_CalculatePhyOpPar(&param, nZone, TableBlk, TablePage);
    LML_VirtualPageWrite(&param);
    if(NAND_OP_TRUE != PHY_SynchBank(param.BankNum, SYNC_CHIP_MODE))
    {
        BadBlk.PhyBlkNum = TableBlk;
        if(NAND_OP_TRUE != LML_BadBlkManage(&BadBlk,nZone,0,&NewBlk))
        {
            MAPPING_ERR("write blk map table : bad block mange err after write\n");
            return NAND_OP_FALSE;
        }
        TableBlk = NewBlk.PhyBlkNum;
        TablePage = 0;
        goto rewrite;
    }


    /*write back log block map table*/
    TablePage++;
    MEMSET(LML_PROCESS_TBL_BUF, 0xff, SECTOR_CNT_OF_SUPER_PAGE * SECTOR_SIZE);
    MEMCPY(LML_PROCESS_TBL_BUF,LOG_BLK_TBL,LOG_BLK_CNT_OF_ZONE*sizeof(struct __LogBlkType_t));
    /*cal checksum*/
    ((__u32 *)LML_PROCESS_TBL_BUF)[511] = \
                                          _GetTblCheckSum((__u32 *)LML_PROCESS_TBL_BUF, LOG_BLK_CNT_OF_ZONE*sizeof(struct __LogBlkType_t)/sizeof(__u32));
    LML_CalculatePhyOpPar(&param, nZone, TableBlk, TablePage);
    LML_VirtualPageWrite(&param);
    if(NAND_OP_TRUE !=  PHY_SynchBank(param.BankNum, SYNC_CHIP_MODE))
    {
        BadBlk.PhyBlkNum = TableBlk;
        if(NAND_OP_TRUE != LML_BadBlkManage(&BadBlk,nZone,0,&NewBlk))
        {
            MAPPING_ERR("write blk map table : bad block mange err after write\n");
            return NAND_OP_FALSE;
        }
        TableBlk = NewBlk.PhyBlkNum;
        TablePage = 0;
        goto rewrite;
    }

    /*reset zone info*/
    NandDriverInfo.ZoneTblPstInfo[nZone].PhyBlkNum = TableBlk;
    NandDriverInfo.ZoneTblPstInfo[nZone].TablePst = TablePage - 2;

    return NAND_OP_TRUE;
}
Ejemplo n.º 19
0
static __s32 _write_back_page_map_tbl(__u32 nLogBlkPst)
{
    __u16 TablePage;
    __u32 TableBlk;
    struct  __NandUserData_t  UserData[2];
    struct  __PhysicOpPara_t  param;
    struct  __SuperPhyBlkType_t BadBlk,NewBlk;


    /*check page poisition, merge if no free page*/
    TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage + 1;
    TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum;
    if (TablePage == PAGE_CNT_OF_SUPER_BLK) {
        /*block id full,need merge*/
        if (LML_MergeLogBlk(SPECIAL_MERGE_MODE,LOG_BLK_TBL[nLogBlkPst].LogicBlkNum)) {
            MAPPING_ERR("write back page tbl : merge err\n");
            return NAND_OP_FALSE;
        }

        if (PAGE_MAP_CACHE->ZoneNum != 0xff) {
            /*move merge*/
            TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage + 1;
            TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum;
        }
        else
            return NAND_OP_TRUE;
    }

rewrite:
//PRINT("-------------------write back page tbl for blk %x\n",TableBlk);
    /*write page map table*/
    MEMSET((void *)&UserData,0xff,sizeof(struct __NandUserData_t) * 2);
    UserData[0].PageStatus = 0xaa;
    MEMSET(LML_PROCESS_TBL_BUF,0xff,SECTOR_CNT_OF_SUPER_PAGE * SECTOR_SIZE);

    if(PAGE_CNT_OF_SUPER_BLK >= 512)
    {
        __u32 page;

        for(page = 0; page < PAGE_CNT_OF_SUPER_BLK; page++)
            *((__u16 *)LML_PROCESS_TBL_BUF + page) = PAGE_MAP_TBL[page].PhyPageNum;

        ((__u32 *)LML_PROCESS_TBL_BUF)[511] = \
                                              _GetTblCheckSum((__u32 *)LML_PROCESS_TBL_BUF, PAGE_CNT_OF_SUPER_BLK*2/(sizeof (__u32)));
    }

    else
    {
        MEMCPY(LML_PROCESS_TBL_BUF, PAGE_MAP_TBL,PAGE_CNT_OF_SUPER_BLK*sizeof(struct __PageMapTblItem_t));
        ((__u32 *)LML_PROCESS_TBL_BUF)[511] = \
                                              _GetTblCheckSum((__u32 *)LML_PROCESS_TBL_BUF, PAGE_CNT_OF_SUPER_BLK*sizeof(struct __PageMapTblItem_t)/(sizeof (__u32)));
    }

    param.MDataPtr = LML_PROCESS_TBL_BUF;
    param.SDataPtr = (void *)&UserData;
    param.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE;

//rewrite:
    LML_CalculatePhyOpPar(&param, CUR_MAP_ZONE, TableBlk, TablePage);
    LML_VirtualPageWrite(&param);
    if (NAND_OP_TRUE != PHY_SynchBank(param.BankNum, SYNC_CHIP_MODE)) {
        BadBlk.PhyBlkNum = TableBlk;
        if (NAND_OP_TRUE != LML_BadBlkManage(&BadBlk,CUR_MAP_ZONE,TablePage,&NewBlk)) {
            MAPPING_ERR("write page map table : bad block mange err after write\n");
            return NAND_OP_FALSE;
        }
        TableBlk = NewBlk.PhyBlkNum;
        LOG_BLK_TBL[nLogBlkPst].PhyBlk = NewBlk;
        goto rewrite;
    }

    LOG_BLK_TBL[nLogBlkPst].LastUsedPage = TablePage;
    PAGE_MAP_CACHE->ZoneNum = 0xff;
    PAGE_MAP_CACHE->LogBlkPst = 0xff;

    return NAND_OP_TRUE;

}
Ejemplo n.º 20
0
static __s32 _read_page_map_tbl(__u32 nLogBlkPst)
{
    __s32 ret;
    __u16 TablePage;
    __u32 TableBlk, checksum;
    __u16 logicpagenum;
    __u8  status;
    struct  __NandUserData_t  UserData[2];
    struct  __PhysicOpPara_t  param;


    /*check page poisition, merge if no free page*/
    TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage;
    TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum;

    if (TablePage == 0xffff) {
        /*log block is empty*/
        MEMSET(PAGE_MAP_TBL, 0xff,PAGE_CNT_OF_SUPER_BLK*sizeof(struct __PageMapTblItem_t) );
        return NAND_OP_TRUE;
    }

    /*read page map table*/
    param.MDataPtr = LML_PROCESS_TBL_BUF;
    param.SDataPtr = (void *)&UserData;
    param.SectBitmap = 0xf;

    LML_CalculatePhyOpPar(&param, CUR_MAP_ZONE, TableBlk, TablePage);
    ret = LML_VirtualPageRead(&param);

    if(PAGE_CNT_OF_SUPER_BLK >= 512)
    {
        checksum = _GetTblCheckSum((__u32 *)LML_PROCESS_TBL_BUF,  \
                                   PAGE_CNT_OF_SUPER_BLK*2/sizeof(__u32));
    }
    else
    {
        checksum = _GetTblCheckSum((__u32 *)LML_PROCESS_TBL_BUF,  \
                                   PAGE_CNT_OF_SUPER_BLK*sizeof(struct __PageMapTblItem_t)/sizeof(__u32));
    }

    status = UserData[0].PageStatus;
    logicpagenum = UserData[0].LogicPageNum;

    if((ret < 0) || (status != 0xaa) || (logicpagenum != 0xffff) || (checksum != ((__u32 *)LML_PROCESS_TBL_BUF)[511]))
    {
        if(NAND_OP_TRUE != _rebuild_page_map_tbl(nLogBlkPst))
        {
            MAPPING_ERR("rebuild page map table err\n");
            return NAND_OP_FALSE;
        }
    }
    else
    {
        if(PAGE_CNT_OF_SUPER_BLK >= 512)
        {
            __u32 page;

            for(page = 0; page < PAGE_CNT_OF_SUPER_BLK; page++)
                PAGE_MAP_TBL[page].PhyPageNum = *((__u16 *)LML_PROCESS_TBL_BUF + page);
        }
        else
            MEMCPY(PAGE_MAP_TBL,LML_PROCESS_TBL_BUF, PAGE_CNT_OF_SUPER_BLK*sizeof(struct __PageMapTblItem_t));
    }

    return NAND_OP_TRUE;
}