static bool performanceTest() { static const int kTableMax = 100; IndirectRefTable irt; IndirectRef manyRefs[kTableMax]; ClassObject* clazz = dvmFindClass("Ljava/lang/Object;", NULL); Object* obj0 = dvmAllocObject(clazz, ALLOC_DONT_TRACK); const u4 cookie = IRT_FIRST_SEGMENT; const int kLoops = 100000; Stopwatch stopwatch; DBUG_MSG("+++ START performance\n"); if (!irt.init(kTableMax, kTableMax, kIndirectKindGlobal)) { return false; } stopwatch.reset(); for (int loop = 0; loop < kLoops; loop++) { for (int i = 0; i < kTableMax; i++) { manyRefs[i] = irt.add(cookie, obj0); } for (int i = 0; i < kTableMax; i++) { irt.remove(cookie, manyRefs[i]); } } DBUG_MSG("Add/remove %d objects FIFO order, %d iterations, %0.3fms / iteration", kTableMax, kLoops, stopwatch.elapsedSeconds() * 1000 / kLoops); stopwatch.reset(); for (int loop = 0; loop < kLoops; loop++) { for (int i = 0; i < kTableMax; i++) { manyRefs[i] = irt.add(cookie, obj0); } for (int i = kTableMax; i-- > 0; ) { irt.remove(cookie, manyRefs[i]); } } DBUG_MSG("Add/remove %d objects LIFO order, %d iterations, %0.3fms / iteration", kTableMax, kLoops, stopwatch.elapsedSeconds() * 1000 / kLoops); for (int i = 0; i < kTableMax; i++) { manyRefs[i] = irt.add(cookie, obj0); } stopwatch.reset(); for (int loop = 0; loop < kLoops; loop++) { for (int i = 0; i < kTableMax; i++) { irt.get(manyRefs[i]); } } DBUG_MSG("Get %d objects, %d iterations, %0.3fms / iteration", kTableMax, kLoops, stopwatch.elapsedSeconds() * 1000 / kLoops); for (int i = kTableMax; i-- > 0; ) { irt.remove(cookie, manyRefs[i]); } irt.destroy(); return true; }
void _get_page_map_tbl_info(__u32 nlogical,__u8 *InOrder, __u16 *nValidPage) { __u16 LastUsedPage,PhysicPage; __u32 i; struct __LogBlkType_t LogBlk; *InOrder = 1; *nValidPage = 0; BMM_GetLogBlk(nlogical, &LogBlk); LastUsedPage = LogBlk.LastUsedPage; if((SUPPORT_LOG_BLOCK_MANAGE)&&(LogBlk.LogBlkType == LSB_TYPE)) { DBUG_MSG("[DBUG_MSG] _get_page_map_tbl_info, select bak log block\n"); *InOrder = 0; } for (i = 0; i < PAGE_CNT_OF_SUPER_BLK; i++) { PhysicPage = PMM_GetCurMapPage(i); if (PhysicPage != 0xffff){ *nValidPage = *nValidPage + 1; if (PhysicPage != i) *InOrder = 0; } } if (*nValidPage < LastUsedPage + 1) *InOrder = 0; }
/* ************************************************************************************************************************ * GET FREE BLOCK FROM FREE BLOCK TABLE * *Description: Get a free block from the free block table with highest erase counter or lowest * erase counter. * *Arguments : nType the type of the free block which need be got; * pFreeBlk the pointer to the free block pointer for return. * *Return : get free block result; * = 0 get free block successful; * =-1 get free block failed. ************************************************************************************************************************ */ __s32 BMM_GetFreeBlk(__u32 nType, struct __SuperPhyBlkType_t *pFreeBlk) { __s32 i, tmpFreePst = -1; __u16 tmpItem = LAST_FREE_BLK_PST + 1; __u32 tmpEraseCnt; if(nType == LOWEST_EC_TYPE) { //need look for the free block with the lowest erase count tmpEraseCnt = 0xffff; } else { //need look for the free block with the highest erase count tmpEraseCnt = 0x0000; } for(i=0; i<FREE_BLK_CNT_OF_ZONE; i++, tmpItem++) { if(tmpItem >= FREE_BLK_CNT_OF_ZONE) { tmpItem = 0; } if(FREE_BLK_TBL[tmpItem].PhyBlkNum != 0xffff) { //current free block item is valid if(((nType == LOWEST_EC_TYPE) && (FREE_BLK_TBL[tmpItem].BlkEraseCnt <= tmpEraseCnt)) || ((nType != LOWEST_EC_TYPE) && (FREE_BLK_TBL[tmpItem].BlkEraseCnt >= tmpEraseCnt))) { tmpEraseCnt = FREE_BLK_TBL[tmpItem].BlkEraseCnt; tmpFreePst = tmpItem; } } } if(tmpFreePst < 0) { MAPPING_ERR("[MAPPING_ERR] There is none free block in the free block table!\n"); pFreeBlk->PhyBlkNum = 0xffff; pFreeBlk->BlkEraseCnt = 0xffff; return -1; } pFreeBlk->PhyBlkNum = FREE_BLK_TBL[tmpFreePst].PhyBlkNum; pFreeBlk->BlkEraseCnt = FREE_BLK_TBL[tmpFreePst].BlkEraseCnt; LAST_FREE_BLK_PST = tmpFreePst; //delete the free block item from the free block table FREE_BLK_TBL[tmpFreePst].PhyBlkNum = 0xffff; FREE_BLK_TBL[tmpFreePst].BlkEraseCnt = 0xffff; DBUG_MSG("[DBUG] BMM_GetFreeBlk, pos: %x\n", LAST_FREE_BLK_PST); return 0; }
/* ************************************************************************************************************************ * SET FREE BLOCK TO FREE BLOCK TABLE * *Description: Fill a free block to the free block table. * *Arguments : pFreeBlk the pointer to the free block which need be fill free block table. * *Return : set free block result; * = 0 set free block successful; * =-1 set free block failed. ************************************************************************************************************************ */ __s32 BMM_SetFreeBlk(struct __SuperPhyBlkType_t *pFreeBlk) { __s32 i; for(i=0; i<FREE_BLK_CNT_OF_ZONE; i++) { //look for a empty free block item in the free block table to fill the free block if(FREE_BLK_TBL[i].PhyBlkNum == 0xffff) { FREE_BLK_TBL[i].PhyBlkNum = pFreeBlk->PhyBlkNum; FREE_BLK_TBL[i].BlkEraseCnt = pFreeBlk->BlkEraseCnt; DBUG_MSG("[DBUG] BMM_SetFreeBlk, pos: %x\n", i); return 0; } } return -1; }
__u32 PMM_CalNextLogPage(__u32 current_page) { __u32 next_page = 0xffff; __u32 page_index = current_page; if(SUPPORT_LOG_BLOCK_MANAGE) { //DBUG_MSG("[DBUG_MSG] PMM_CalNextLogPage, select bak log block\n"); if(current_page == 0xffff) next_page = 0; else { while(page_index <PAGE_CNT_OF_LOGIC_BLK) { if(lsb_page[page_index] == 1) { next_page = page_index; break; } else { page_index++; } } if(page_index == PAGE_CNT_OF_LOGIC_BLK) next_page = PAGE_CNT_OF_LOGIC_BLK; if((page_index<PAGE_CNT_OF_LOGIC_BLK)&&(lsb_page[page_index]!= 1)) PRINT("PMM_CalNextLogPage error, current: %x, next: %x\n", current_page, next_page); } } else next_page = current_page; DBUG_MSG("[DBUG] PMM_CalNextLogPage, current: %x, next: %x\n", current_page, next_page); return next_page; }
/* * Basic add/get/delete tests in an unsegmented table. */ static bool basicTest() { static const int kTableMax = 20; IndirectRefTable irt; IndirectRef iref0, iref1, iref2, iref3; IndirectRef manyRefs[kTableMax]; ClassObject* clazz = dvmFindClass("Ljava/lang/Object;", NULL); Object* obj0 = dvmAllocObject(clazz, ALLOC_DONT_TRACK); Object* obj1 = dvmAllocObject(clazz, ALLOC_DONT_TRACK); Object* obj2 = dvmAllocObject(clazz, ALLOC_DONT_TRACK); Object* obj3 = dvmAllocObject(clazz, ALLOC_DONT_TRACK); const u4 cookie = IRT_FIRST_SEGMENT; bool result = false; if (!irt.init(kTableMax/2, kTableMax, kIndirectKindGlobal)) { return false; } iref0 = (IndirectRef) 0x11110; if (irt.remove(cookie, iref0)) { ALOGE("unexpectedly successful removal"); goto bail; } /* * Add three, check, remove in the order in which they were added. */ DBUG_MSG("+++ START fifo\n"); iref0 = irt.add(cookie, obj0); iref1 = irt.add(cookie, obj1); iref2 = irt.add(cookie, obj2); if (iref0 == NULL || iref1 == NULL || iref2 == NULL) { ALOGE("trivial add1 failed"); goto bail; } if (irt.get(iref0) != obj0 || irt.get(iref1) != obj1 || irt.get(iref2) != obj2) { ALOGE("objects don't match expected values %p %p %p vs. %p %p %p", irt.get(iref0), irt.get(iref1), irt.get(iref2), obj0, obj1, obj2); goto bail; } else { DBUG_MSG("+++ obj1=%p --> iref1=%p\n", obj1, iref1); } if (!irt.remove(cookie, iref0) || !irt.remove(cookie, iref1) || !irt.remove(cookie, iref2)) { ALOGE("fifo deletion failed"); goto bail; } /* table should be empty now */ if (irt.capacity() != 0) { ALOGE("fifo del not empty"); goto bail; } /* get invalid entry (off the end of the list) */ if (irt.get(iref0) != kInvalidIndirectRefObject) { ALOGE("stale entry get succeeded unexpectedly"); goto bail; } /* * Add three, remove in the opposite order. */ DBUG_MSG("+++ START lifo\n"); iref0 = irt.add(cookie, obj0); iref1 = irt.add(cookie, obj1); iref2 = irt.add(cookie, obj2); if (iref0 == NULL || iref1 == NULL || iref2 == NULL) { ALOGE("trivial add2 failed"); goto bail; } if (!irt.remove(cookie, iref2) || !irt.remove(cookie, iref1) || !irt.remove(cookie, iref0)) { ALOGE("lifo deletion failed"); goto bail; } /* table should be empty now */ if (irt.capacity() != 0) { ALOGE("lifo del not empty"); goto bail; } /* * Add three, remove middle / middle / bottom / top. (Second attempt * to remove middle should fail.) */ DBUG_MSG("+++ START unorder\n"); iref0 = irt.add(cookie, obj0); iref1 = irt.add(cookie, obj1); iref2 = irt.add(cookie, obj2); if (iref0 == NULL || iref1 == NULL || iref2 == NULL) { ALOGE("trivial add3 failed"); goto bail; } if (irt.capacity() != 3) { ALOGE("expected 3 entries, found %d", irt.capacity()); goto bail; } if (!irt.remove(cookie, iref1) || irt.remove(cookie, iref1)) { ALOGE("unorder deletion1 failed"); goto bail; } /* get invalid entry (from hole) */ if (irt.get(iref1) != kInvalidIndirectRefObject) { ALOGE("hole get succeeded unexpectedly"); goto bail; } if (!irt.remove(cookie, iref2) || !irt.remove(cookie, iref0)) { ALOGE("unorder deletion2 failed"); goto bail; } /* table should be empty now */ if (irt.capacity() != 0) { ALOGE("unorder del not empty"); goto bail; } /* * Add four entries. Remove #1, add new entry, verify that table size * is still 4 (i.e. holes are getting filled). Remove #1 and #3, verify * that we delete one and don't hole-compact the other. */ DBUG_MSG("+++ START hole fill\n"); iref0 = irt.add(cookie, obj0); iref1 = irt.add(cookie, obj1); iref2 = irt.add(cookie, obj2); iref3 = irt.add(cookie, obj3); if (iref0 == NULL || iref1 == NULL || iref2 == NULL || iref3 == NULL) { ALOGE("trivial add4 failed"); goto bail; } if (!irt.remove(cookie, iref1)) { ALOGE("remove 1 of 4 failed"); goto bail; } iref1 = irt.add(cookie, obj1); if (irt.capacity() != 4) { ALOGE("hole not filled"); goto bail; } if (!irt.remove(cookie, iref1) || !irt.remove(cookie, iref3)) { ALOGE("remove 1/3 failed"); goto bail; } if (irt.capacity() != 3) { ALOGE("should be 3 after two deletions"); goto bail; } if (!irt.remove(cookie, iref2) || !irt.remove(cookie, iref0)) { ALOGE("remove 2/0 failed"); goto bail; } if (irt.capacity() != 0) { ALOGE("not empty after split remove"); goto bail; } /* * Add an entry, remove it, add a new entry, and try to use the original * iref. They have the same slot number but are for different objects. * With the extended checks in place, this should fail. */ DBUG_MSG("+++ START switched\n"); iref0 = irt.add(cookie, obj0); irt.remove(cookie, iref0); iref1 = irt.add(cookie, obj1); if (irt.remove(cookie, iref0)) { ALOGE("mismatched del succeeded (%p vs %p)", iref0, iref1); goto bail; } if (!irt.remove(cookie, iref1)) { ALOGE("switched del failed"); goto bail; } if (irt.capacity() != 0) { ALOGE("switching del not empty"); goto bail; } /* * Same as above, but with the same object. A more rigorous checker * (e.g. with slot serialization) will catch this. */ DBUG_MSG("+++ START switched same object\n"); iref0 = irt.add(cookie, obj0); irt.remove(cookie, iref0); iref1 = irt.add(cookie, obj0); if (iref0 != iref1) { /* try 0, should not work */ if (irt.remove(cookie, iref0)) { ALOGE("temporal del succeeded (%p vs %p)", iref0, iref1); goto bail; } } if (!irt.remove(cookie, iref1)) { ALOGE("temporal cleanup failed"); goto bail; } if (irt.capacity() != 0) { ALOGE("temporal del not empty"); goto bail; } DBUG_MSG("+++ START null lookup\n"); if (irt.get(NULL) != kInvalidIndirectRefObject) { ALOGE("null lookup succeeded"); goto bail; } DBUG_MSG("+++ START stale lookup\n"); iref0 = irt.add(cookie, obj0); irt.remove(cookie, iref0); if (irt.get(iref0) != kInvalidIndirectRefObject) { ALOGE("stale lookup succeeded"); goto bail; } /* * Test table overflow. */ DBUG_MSG("+++ START overflow\n"); int i; for (i = 0; i < kTableMax; i++) { manyRefs[i] = irt.add(cookie, obj0); if (manyRefs[i] == NULL) { ALOGE("Failed adding %d of %d", i, kTableMax); goto bail; } } if (irt.add(cookie, obj0) != NULL) { ALOGE("Table overflow succeeded"); goto bail; } if (irt.capacity() != (size_t)kTableMax) { ALOGE("Expected %d entries, found %d", kTableMax, irt.capacity()); goto bail; } irt.dump("table with 20 entries, all filled"); for (i = 0; i < kTableMax-1; i++) { if (!irt.remove(cookie, manyRefs[i])) { ALOGE("multi-remove failed at %d", i); goto bail; } } irt.dump("table with 20 entries, 19 of them holes"); /* because of removal order, should have 20 entries, 19 of them holes */ if (irt.capacity() != (size_t)kTableMax) { ALOGE("Expected %d entries (with holes), found %d", kTableMax, irt.capacity()); goto bail; } if (!irt.remove(cookie, manyRefs[kTableMax-1])) { ALOGE("multi-remove final failed"); goto bail; } if (irt.capacity() != 0) { ALOGE("multi-del not empty"); goto bail; } /* Done */ DBUG_MSG("+++ basic test complete\n"); result = true; bail: irt.destroy(); return result; }
/* ************************************************************************************************************************ * GET LOG PAGE PARAMETER * *Description: Get a page from log block for read or write. * *Arguments : nBlk the logical block number of the log block; * nPage the number of the logical page, which page need log page; * nMode the type of get log page, 'r' or 'w', others is invalid. * *Return : the number of the log page; * != 0xffff get log page successful, return page number; * = 0xffff get log page failed. * *Note : Scan the log block table to try to get the log block. * when the get type is 'r', if the log block is exsit and the logical * page contain a log page, return the number of the log page, else, * return 0xffff; * when the get type is 'w', if the log block is not exsit, need create * log block, then, if get log page failed, need merge the log block, and * try to get log page again, this mode should return a value page number * except there is no enough valid blocks. ************************************************************************************************************************ */ __u32 PMM_GetLogPage(__u32 nBlk, __u32 nPage, __u8 nMode) { __s32 result, tmpLogPst; __u16 tmpPage, PhyPageNum; if(nMode == 'r') { tmpLogPst = _GetLogBlkPst(nBlk); if(tmpLogPst < 0) { //get log page by read mode, there is no log block, return invalid value return INVALID_PAGE_NUM; } //need swap the page mapping table to ram which is accessing currently result = PMM_SwitchMapTbl(tmpLogPst); if(result < 0) { MAPPING_ERR("[MAPPING_ERR] Switch page mapping table failed when get log page! Err:0x%x\n", result); return INVALID_PAGE_NUM; } _CalLogAccessCnt(tmpLogPst); //get active read log block index if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[tmpLogPst].LogBlkType == LSB_TYPE)) { PhyPageNum = PAGE_MAP_TBL[nPage].PhyPageNum; if((PhyPageNum&(0x1<<15))&&(PhyPageNum!= 0xffff)) { LOG_BLK_TBL[tmpLogPst].ReadBlkIndex = 1; PhyPageNum &= 0x7fff; } else LOG_BLK_TBL[tmpLogPst].ReadBlkIndex = 0; return (PhyPageNum|LOG_BLK_TBL[tmpLogPst].ReadBlkIndex<<16); } else { LOG_BLK_TBL[tmpLogPst].ReadBlkIndex = 0; return PAGE_MAP_TBL[nPage].PhyPageNum; } } result = _GetLogPageForWrite(nBlk, nPage, &tmpPage, (__u32 *)&tmpLogPst); if(result < 0) { //get log page for write failed MAPPING_ERR("[MAPPING_ERR] Get log page for write failed!\n"); return INVALID_PAGE_NUM; } //check if the log page is valid if(!(tmpPage < PAGE_CNT_OF_SUPER_BLK)) { //the log page is not invalid, need to merge the log block, and get again result = LML_MergeLogBlk(SPECIAL_MERGE_MODE, nBlk); if(result < 0) { //merge log block failed, report error MAPPING_ERR("[MAPPING_ERR] Merge log block failed when get log page! Err:0x%x\n", result); return INVALID_PAGE_NUM; } //try to get log page for write again result = _GetLogPageForWrite(nBlk, nPage, &tmpPage, (__u32 *)&tmpLogPst); if(result < 0) { //get log page for write failed MAPPING_ERR("[MAPPING_ERR] Get log page for write failed!\n"); return INVALID_PAGE_NUM; } } //check if the log page is valid if(!(tmpPage < PAGE_CNT_OF_SUPER_BLK)) { //get log page for write failed MAPPING_ERR("[MAPPING_ERR] Get log page for write failed!\n"); return INVALID_PAGE_NUM; } else { LOG_BLK_TBL[tmpLogPst].LastUsedPage = tmpPage; } //update the page mapping table item if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[tmpLogPst].LogBlkType == LSB_TYPE)) { DBUG_MSG("[DBUG_MSG] PMM_GetLogPage 2, select bak log block\n"); PAGE_MAP_TBL[nPage].PhyPageNum = tmpPage|((LOG_BLK_TBL[tmpLogPst].WriteBlkIndex&0x1)<<15); } else PAGE_MAP_TBL[nPage].PhyPageNum = tmpPage; //set the flag that mark need update the page mapping table PAGE_MAP_CACHE->DirtyFlag = 1; _CalLogAccessCnt(tmpLogPst); if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[tmpLogPst].LogBlkType == LSB_TYPE)) return (tmpPage|LOG_BLK_TBL[tmpLogPst].WriteBlkIndex<<16); else return tmpPage; }
/* ************************************************************************************************************************ * GET LOG PAGE FOR WRITE * *Description: Get a log page for write. * *Arguments : nBlk the logical block number of the log block; * nPage the number of the logical page, which page need log page; * pLogPage the pointer to the log page number, for return value; * pLogPst the pointer to the position of the log block in the log block table. * *Return : get log page result. * = 0 get log page for write successful; * =-1 get log page for write failed. ************************************************************************************************************************ */ static __s32 _GetLogPageForWrite(__u32 nBlk, __u32 nPage, __u16 *pLogPage, __u32 *pLogPst) { __s32 result, tmpLogPst; __u16 tmpPage, tempBank; struct __PhysicOpPara_t tmpPhyPage; struct __NandUserData_t tmpSpare[2]; tmpLogPst = _GetLogBlkPst(nBlk); if(tmpLogPst < 0) { //get log block position failed, there is no such log block, need create a new one result = _CreateNewLogBlk(nBlk, (__u32 *)&tmpLogPst); if(result < 0) { MAPPING_ERR("[MAPPING_ERR] Create new log block failed!\n"); return -1; } } //need swap the page mapping table to ram which is accessing currently result = PMM_SwitchMapTbl(tmpLogPst); if(result < 0) { MAPPING_ERR("[MAPPING_ERR] Switch page mapping table failed when get log page! Err:0x%x\n", result); return -1; } //need get log page by write mode, tmpPage = LOG_BLK_TBL[tmpLogPst].LastUsedPage; if(SUPPORT_ALIGN_NAND_BNK) { if(tmpPage == 0xffff) { //the log block is empty, need get log page in the first page line tmpPage = nPage % INTERLEAVE_BANK_CNT; } else { //need bank align, the log page and the data page should be in the same bank if((nPage % INTERLEAVE_BANK_CNT) > (tmpPage % INTERLEAVE_BANK_CNT)) { //get the log page in the same page line with last used page tmpPage = tmpPage + ((nPage % INTERLEAVE_BANK_CNT) - (tmpPage % INTERLEAVE_BANK_CNT)); } else { //need get the log page in the next page line of the last used page tmpPage = tmpPage + (nPage % INTERLEAVE_BANK_CNT) + (INTERLEAVE_BANK_CNT - (tmpPage % INTERLEAVE_BANK_CNT)); } } } else { //use the page which is the next of the last used page tmpPage = tmpPage + 1; } if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[tmpLogPst].LogBlkType == LSB_TYPE)) { DBUG_MSG("[DBUG_MSG] _GetLogPageForWrite, select bak log block\n"); if(SUPPORT_ALIGN_NAND_BNK) { tempBank = tmpPage%INTERLEAVE_BANK_CNT; tmpPage =PMM_CalNextLogPage(tmpPage); while(tmpPage%INTERLEAVE_BANK_CNT != tempBank) { tmpPage++; tmpPage =PMM_CalNextLogPage(tmpPage); if(tmpPage>=PAGE_CNT_OF_SUPER_BLK) break; } } else { tmpPage =PMM_CalNextLogPage(tmpPage); } if((tmpPage >= PAGE_CNT_OF_SUPER_BLK)&&(LOG_BLK_TBL[tmpLogPst].WriteBlkIndex == 0)) { LOG_BLK_TBL[tmpLogPst].WriteBlkIndex = 1; tmpPage = tmpPage - PAGE_CNT_OF_SUPER_BLK; } if(LOG_BLK_TBL[tmpLogPst].WriteBlkIndex == 1) DBUG_MSG("[DBUG_MSG] _GetLogPageForWrite, log block index: %x, log block num: %x, page: %x \n", LOG_BLK_TBL[tmpLogPst].WriteBlkIndex, LOG_BLK_TBL[tmpLogPst].PhyBlk1.PhyBlkNum, tmpPage); else DBUG_MSG("[DBUG_MSG] _GetLogPageForWrite, log block index: %x, log block num: %x, page: %x \n", LOG_BLK_TBL[tmpLogPst].WriteBlkIndex, LOG_BLK_TBL[tmpLogPst].PhyBlk.PhyBlkNum, tmpPage); } __CHECK_WRITE_LOGICAL_INFO_OF_LOG_BLOCK: //check if need write the logical information in the first page of the log block if((LOG_BLK_TBL[tmpLogPst].LastUsedPage == 0xffff) && (tmpPage != 0)) { //get logical information from the data block LML_CalculatePhyOpPar(&tmpPhyPage, CUR_MAP_ZONE, DATA_BLK_TBL[nBlk].PhyBlkNum, 0); tmpPhyPage.SectBitmap = 0x03; tmpPhyPage.MDataPtr = LML_TEMP_BUF; tmpPhyPage.SDataPtr = (void *)tmpSpare; LML_VirtualPageRead(&tmpPhyPage); //if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[tmpLogPst].LogBlkType == LSB_TYPE)) //{ // PRINT("_GetLogPageForWrite log %x page 0, data age: %x, log age: %x\n", LOG_BLK_TBL[tmpLogPst].WriteBlkIndex, tmpSpare[0].PageStatus, tmpSpare[0].PageStatus+1); //} tmpSpare[0].BadBlkFlag = 0xff; tmpSpare[1].BadBlkFlag = 0xff; tmpSpare[0].LogicInfo = ((CUR_MAP_ZONE % ZONE_CNT_OF_DIE)<<10) | nBlk; tmpSpare[1].LogicInfo = ((CUR_MAP_ZONE % ZONE_CNT_OF_DIE)<<10) | nBlk; tmpSpare[0].LogicPageNum = 0xffff; tmpSpare[1].LogicPageNum = 0xffff; tmpSpare[0].PageStatus = tmpSpare[0].PageStatus + 1; tmpSpare[1].PageStatus = tmpSpare[0].PageStatus; if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[tmpLogPst].LogBlkType == LSB_TYPE)) { tmpSpare[0].LogType = LSB_TYPE|(LOG_BLK_TBL[tmpLogPst].WriteBlkIndex<<4); tmpSpare[1].LogType = LSB_TYPE|(LOG_BLK_TBL[tmpLogPst].WriteBlkIndex<<4); } else { tmpSpare[0].LogType = 0xff; tmpSpare[1].LogType = 0xff; } //write the logical information to the spare area of the data block if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[tmpLogPst].LogBlkType == LSB_TYPE)) { DBUG_MSG("[DBUG] _GetLogPageForWrite, write the logical information to log page 0, writeblkindex: %x\n", LOG_BLK_TBL[tmpLogPst].WriteBlkIndex); LML_CalculatePhyOpPar(&tmpPhyPage, CUR_MAP_ZONE, LOG_BLK_TBL[tmpLogPst].PhyBlk.PhyBlkNum, 0); } else LML_CalculatePhyOpPar(&tmpPhyPage, CUR_MAP_ZONE, LOG_BLK_TBL[tmpLogPst].PhyBlk.PhyBlkNum, 0); tmpPhyPage.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE; result = LML_VirtualPageWrite(&tmpPhyPage); if(result < 0) { LOGICCTL_ERR("[MAPPING_ERR] Physical write module failed when write logical information, Err:0x%x!\n", result); return -1; } result = PHY_SynchBank(tmpPhyPage.BankNum, SYNC_CHIP_MODE); if(result < 0) { //the last write operation on current bank is failed, the block is bad, need proccess it LOGICCTL_DBG("[LOGICCTL_DBG] Find a bad block when write logical page! bank:0x%x, block:0x%x, page:0x%x\n", tmpPhyPage.BankNum, tmpPhyPage.BlkNum, tmpPhyPage.PageNum); //process the bad block result = LML_BadBlkManage(&LOG_BLK_TBL[tmpLogPst].PhyBlk, CUR_MAP_ZONE, 0, &LOG_BLK_TBL[tmpLogPst].PhyBlk); if(result < 0) { LOGICCTL_ERR("[MAPPING_ERR] Bad block process failed when get log page for write, Err:0x%x!\n", result); return -1; } goto __CHECK_WRITE_LOGICAL_INFO_OF_LOG_BLOCK; } } //set the log page number for return *pLogPage = tmpPage; *pLogPst = tmpLogPst; return 0; }
/* ************************************************************************************************************************ * CREATE A NEW LOG BLOCK * *Description: Create a new log block. * *Arguments : nBlk the logical block number of the log block; * pLogPst the pointer to the log block position in the log block table. * *Return : create new log block result. * = 0 create new log block successful; * =-1 create new log block failed. ************************************************************************************************************************ */ static __s32 _CreateNewLogBlk(__u32 nBlk, __u32 *pLogPst) { __s32 i, result, LogBlkType,tmpPst=-1; __u16 tmpLogAccessAge = 0xffff; struct __SuperPhyBlkType_t tmpFreeBlk, tmpFreeBlk1; struct __PhysicOpPara_t tmpPhyPage; struct __NandUserData_t tmpSpare[2]; #if CFG_SUPPORT_WEAR_LEVELLING //check if need do wear-levelling if(BLK_ERASE_CNTER >= WEAR_LEVELLING_FREQUENCY) { LML_WearLevelling(); } #endif //try to search an empty item in the log block table for(i=0; i<LOG_BLK_CNT_OF_ZONE; i++) { if(LOG_BLK_TBL[i].LogicBlkNum == 0xffff) { //find a empty item tmpPst = i; break; } } //there is no empty item in the log block table, need merge a log block if(tmpPst == -1) { //check if there is some full log block for(i=0; i<LOG_BLK_CNT_OF_ZONE; i++) { if(LOG_BLK_TBL[i].LastUsedPage == PAGE_CNT_OF_SUPER_BLK-1) { tmpPst = i; break; } } if(tmpPst == -1) { //there is no full log block, look for an oldest log block to merge for(i=0; i<LOG_BLK_CNT_OF_ZONE; i++) { if(LOG_ACCESS_AGE[i] < tmpLogAccessAge) { tmpLogAccessAge = LOG_ACCESS_AGE[i]; tmpPst = i; } } } //switch the page mapping table for merge the log block result = PMM_SwitchMapTbl(tmpPst); if(result < 0) { MAPPING_ERR("[MAPPING_ERR] Switch page mapping table failed when create new log block! Err:0x%x\n", result); return -1; } //merge the log block with normal type, to make an empty item result = LML_MergeLogBlk(NORMAL_MERGE_MODE, LOG_BLK_TBL[tmpPst].LogicBlkNum); if(result < 0) { //merge log block failed, report error MAPPING_ERR("[MAPPING_ERR] Merge log block failed when create new log block! Err:0x%x\n", result); return -1; } } LogBlkType = BMM_CalLogBlkType(nBlk); if((SUPPORT_LOG_BLOCK_MANAGE)&&(LogBlkType == LSB_TYPE)) { DBUG_MSG("[DBUG_MSG] _CreateNewLogBlk, select bak log block\n"); //get a free block to create a new log block result = BMM_GetFreeBlk(LOWEST_EC_TYPE, &tmpFreeBlk); if(result < 0) { MAPPING_ERR("[MAPPING_ERR] Get free block failed when create new log block!\n"); return -1; } //get a free block to create a new log block result = BMM_GetFreeBlk(LOWEST_EC_TYPE, &tmpFreeBlk1); if(result < 0) { MAPPING_ERR("[MAPPING_ERR] Get free block failed when create new log block!\n"); return -1; } //DBUG_INF("[DBUG] _CreateNewLogBlk, logic: %x, logblk0: %x, logblk1:%x \n", nBlk, tmpFreeBlk.PhyBlkNum, tmpFreeBlk1.PhyBlkNum); //make a new log item in the log block table LOG_BLK_TBL[tmpPst].LogicBlkNum = nBlk; LOG_BLK_TBL[tmpPst].LastUsedPage = 0xffff; LOG_BLK_TBL[tmpPst].LogBlkType = LogBlkType; LOG_BLK_TBL[tmpPst].WriteBlkIndex = 0; LOG_BLK_TBL[tmpPst].ReadBlkIndex = 0; LOG_BLK_TBL[tmpPst].PhyBlk = tmpFreeBlk; LOG_BLK_TBL[tmpPst].PhyBlk1 = tmpFreeBlk1; //set the return vaule of the log position *pLogPst = tmpPst; } else { //get a free block to create a new log block result = BMM_GetFreeBlk(LOWEST_EC_TYPE, &tmpFreeBlk); if(result < 0) { MAPPING_ERR("[MAPPING_ERR] Get free block failed when create new log block!\n"); return -1; } //make a new log item in the log block table LOG_BLK_TBL[tmpPst].LogicBlkNum = nBlk; LOG_BLK_TBL[tmpPst].LastUsedPage = 0xffff; LOG_BLK_TBL[tmpPst].LogBlkType = LogBlkType; LOG_BLK_TBL[tmpPst].WriteBlkIndex = 0; LOG_BLK_TBL[tmpPst].ReadBlkIndex = 0; LOG_BLK_TBL[tmpPst].PhyBlk = tmpFreeBlk; //set the return vaule of the log position *pLogPst = tmpPst; } __CHECK_LOGICAL_INFO_OF_DATA_BLOCK: //check if the data block is an empty block, if so, need update the logic information in the spare area LML_CalculatePhyOpPar(&tmpPhyPage, CUR_MAP_ZONE, DATA_BLK_TBL[nBlk].PhyBlkNum, 0); tmpPhyPage.SectBitmap = 0x03; tmpPhyPage.MDataPtr = LML_TEMP_BUF; tmpPhyPage.SDataPtr = (void *)tmpSpare; LML_VirtualPageRead(&tmpPhyPage); if(tmpSpare[0].LogicInfo == 0xffff) { tmpSpare[0].BadBlkFlag = 0xff; tmpSpare[1].BadBlkFlag = 0xff; tmpSpare[0].LogicInfo = ((CUR_MAP_ZONE % ZONE_CNT_OF_DIE)<<10) | nBlk; tmpSpare[1].LogicInfo = ((CUR_MAP_ZONE % ZONE_CNT_OF_DIE)<<10) | nBlk; tmpSpare[0].LogicPageNum = 0xffff; tmpSpare[1].LogicPageNum = 0xffff; tmpSpare[0].PageStatus = 0xff; tmpSpare[1].PageStatus = 0xff; //write the logical information to the spare area of the data block tmpPhyPage.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE; result = LML_VirtualPageWrite(&tmpPhyPage); if(result < 0) { LOGICCTL_ERR("[MAPPING_ERR] Physical write module failed when write logical information, Err:0x%x!\n", result); return -1; } result = PHY_SynchBank(tmpPhyPage.BankNum, SYNC_CHIP_MODE); if(result < 0) { //the last write operation on current bank is failed, the block is bad, need proccess it LOGICCTL_DBG("[LOGICCTL_DBG] Find a bad block when write logical page! bank:0x%x, block:0x%x, page:0x%x\n", tmpPhyPage.BankNum, tmpPhyPage.BlkNum, tmpPhyPage.PageNum); //process the bad block result = LML_BadBlkManage(&DATA_BLK_TBL[nBlk], CUR_MAP_ZONE, 0, &tmpFreeBlk); if(result < 0) { LOGICCTL_ERR("[MAPPING_ERR] Bad block process failed when create new log block, Err:0x%x!\n", result); return -1; } DATA_BLK_TBL[nBlk] = tmpFreeBlk; goto __CHECK_LOGICAL_INFO_OF_DATA_BLOCK; } } return 0; }
/*! * * \par Description: * This function move valuable data from log block to free block,then replace them. * * \param [in] LogNum,serial number within log block space * \return sucess or failed. * \note this function was called when log block is full, and valid pages is less than half of one block. **/ __s32 _free2log_move_merge(__u32 nlogical) { __u8 bank; __u16 LastUsedPage,SuperPage; __u16 SrcPage,DstPage, SrcBlock, DstBlock; struct __SuperPhyBlkType_t FreeBlk,FreeBlk1; struct __LogBlkType_t LogBlk; struct __PhysicOpPara_t SrcParam,DstParam; struct __NandUserData_t UserData[2]; /*init info of log block , and get one free block */ BMM_GetLogBlk(nlogical, &LogBlk); if (NAND_OP_TRUE != BMM_GetFreeBlk(LOWEST_EC_TYPE, &FreeBlk)) return NAND_OP_FALSE; if((SUPPORT_LOG_BLOCK_MANAGE)&&(LogBlk.LogBlkType == LSB_TYPE)) { if (NAND_OP_TRUE != BMM_GetFreeBlk(LOWEST_EC_TYPE, &FreeBlk1)) return NAND_OP_FALSE; //DBUG_INF("[DBUG] lsb move merge, new log0: %x, new log1: %x\n", FreeBlk.PhyBlkNum, FreeBlk1.PhyBlkNum); } SrcParam.MDataPtr = DstParam.MDataPtr = NULL; SrcParam.SDataPtr = DstParam.SDataPtr = NULL; SrcParam.SectBitmap = DstParam.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE; if(SUPPORT_ALIGN_NAND_BNK) { redo: /*copy data bank by bank, for copy-back using*/ LastUsedPage = 0; for (bank = 0; bank < INTERLEAVE_BANK_CNT; bank++) { DstPage = bank; for (SuperPage = bank; SuperPage < PAGE_CNT_OF_SUPER_BLK; SuperPage+= INTERLEAVE_BANK_CNT) { SrcPage = PMM_GetCurMapPage(SuperPage); if (SrcPage != 0xffff) { /*set source and destinate address*/ if((SUPPORT_LOG_BLOCK_MANAGE)&&(LogBlk.LogBlkType == LSB_TYPE)) { DBUG_MSG("[DBUG_MSG] _free2log_move_merge 2, select bak log block\n"); DstPage = PMM_CalNextLogPage(DstPage); while((DstPage%INTERLEAVE_BANK_CNT)!=bank) { DstPage++; DstPage = PMM_CalNextLogPage(DstPage); if(DstPage>=PAGE_CNT_OF_SUPER_BLK) break; } if(DstPage >= PAGE_CNT_OF_SUPER_BLK) { LOGICCTL_ERR("move merge : dst page cal error\n"); return NAND_OP_FALSE; } if(SrcPage&(0x1<<15)) SrcBlock = LogBlk.PhyBlk1.PhyBlkNum; else SrcBlock = LogBlk.PhyBlk.PhyBlkNum; DstBlock = FreeBlk.PhyBlkNum; SrcPage &= (~(0x1<<15)); } else { SrcBlock = LogBlk.PhyBlk.PhyBlkNum; DstBlock = FreeBlk.PhyBlkNum; } LML_CalculatePhyOpPar(&SrcParam,CUR_MAP_ZONE, SrcBlock, SrcPage); LML_CalculatePhyOpPar(&DstParam,CUR_MAP_ZONE, DstBlock, DstPage); if (DstPage == 0) { if ( NAND_OP_FALSE == _copy_page0(SrcBlock,SrcPage,FreeBlk.PhyBlkNum,0)) { LOGICCTL_ERR("move merge : copy page 0 err1\n"); return NAND_OP_FALSE; } } else { if (NAND_OP_TRUE != PHY_PageCopyback(&SrcParam,&DstParam)) { LOGICCTL_ERR("move merge : copy back err\n"); return NAND_OP_FALSE; } } if (NAND_OP_TRUE != PHY_SynchBank(DstParam.BankNum, SYNC_BANK_MODE)) { struct __SuperPhyBlkType_t SubBlk; if (NAND_OP_TRUE != LML_BadBlkManage(&FreeBlk,CUR_MAP_ZONE,0,&SubBlk)) { LOGICCTL_ERR("move merge : bad block manage err after copy back\n"); return NAND_OP_FALSE; } FreeBlk = SubBlk; goto redo; } PMM_SetCurMapPage(SuperPage,DstPage); DstPage += INTERLEAVE_BANK_CNT; } } /*if bank 0 is empty, need write mange info in page 0*/ if ((bank == 0) && (DstPage == 0)) { if ( NAND_OP_FALSE == _copy_page0(LogBlk.PhyBlk.PhyBlkNum,0,FreeBlk.PhyBlkNum,0)) { LOGICCTL_ERR("move merge : copy page 0 err2\n"); return NAND_OP_FALSE; } LML_CalculatePhyOpPar(&DstParam, CUR_MAP_ZONE, FreeBlk.PhyBlkNum, 0); if (NAND_OP_TRUE != PHY_SynchBank(DstParam.BankNum, SYNC_BANK_MODE)) { struct __SuperPhyBlkType_t SubBlk; if (NAND_OP_TRUE != LML_BadBlkManage(&FreeBlk,CUR_MAP_ZONE,0,&SubBlk)) { LOGICCTL_ERR("move merge : bad block manage err after copy back\n"); return NAND_OP_FALSE; } FreeBlk = SubBlk; goto redo; } } /*reset LastUsedPage*/ if ((DstPage - INTERLEAVE_BANK_CNT) > LastUsedPage) { LastUsedPage = DstPage - INTERLEAVE_BANK_CNT; } } } else { /*copy data page by page*/ DstPage = 0; LastUsedPage = 0; for (SuperPage = 0; SuperPage < PAGE_CNT_OF_LOGIC_BLK; SuperPage++) { SrcPage = PMM_GetCurMapPage(SuperPage); if (SrcPage != 0xffff) { /*set source and destinate address*/ if((SUPPORT_LOG_BLOCK_MANAGE)&&(LogBlk.LogBlkType == LSB_TYPE)) { DBUG_MSG("[DBUG_MSG] _free2log_move_merge 3, select bak log block\n"); DstPage = PMM_CalNextLogPage(DstPage); if(DstPage >= PAGE_CNT_OF_SUPER_BLK) { LOGICCTL_ERR("move merge : dst page cal error\n"); return NAND_OP_FALSE; } if(SrcPage&(0x1<<15)) SrcBlock = LogBlk.PhyBlk1.PhyBlkNum; else SrcBlock = LogBlk.PhyBlk.PhyBlkNum; DstBlock = FreeBlk.PhyBlkNum; SrcPage &= 0x7fff; } else { SrcBlock = LogBlk.PhyBlk.PhyBlkNum; DstBlock = FreeBlk.PhyBlkNum; } LML_CalculatePhyOpPar(&SrcParam,CUR_MAP_ZONE, SrcBlock, SrcPage); LML_CalculatePhyOpPar(&DstParam,CUR_MAP_ZONE, DstBlock, DstPage); if (0 == DstPage) { if ( NAND_OP_FALSE == _copy_page0(SrcBlock,SrcPage,FreeBlk.PhyBlkNum,0)) { LOGICCTL_ERR("move merge : copy page 0 err1\n"); return NAND_OP_FALSE; } } else { SrcParam.MDataPtr = DstParam.MDataPtr = LML_TEMP_BUF; SrcParam.SDataPtr = DstParam.SDataPtr = (void *)&UserData; MEMSET((void *)&UserData,0xff,sizeof(struct __NandUserData_t) * 2); SrcParam.SectBitmap = DstParam.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE; if (LML_VirtualPageRead(&SrcParam) < 0){ LOGICCTL_ERR("move merge : read main data err\n"); return NAND_OP_FALSE; } if (NAND_OP_TRUE != LML_VirtualPageWrite(&DstParam)){ LOGICCTL_ERR("move merge : write err\n"); return NAND_OP_FALSE; } } if (NAND_OP_TRUE != PHY_SynchBank(DstParam.BankNum, SYNC_BANK_MODE)) { struct __SuperPhyBlkType_t SubBlk; if (NAND_OP_TRUE != LML_BadBlkManage(&FreeBlk,CUR_MAP_ZONE,LastUsedPage,&SubBlk)) { LOGICCTL_ERR("move merge : bad block manage err after copy back\n"); return NAND_OP_FALSE; } FreeBlk = SubBlk; SuperPage -= 1; } PMM_SetCurMapPage(SuperPage,DstPage); LastUsedPage = DstPage; DstPage++; } } } /*erase log block*/ if(NAND_OP_TRUE != LML_VirtualBlkErase(CUR_MAP_ZONE, LogBlk.PhyBlk.PhyBlkNum)) { if(NAND_OP_TRUE != LML_BadBlkManage(&LogBlk.PhyBlk,CUR_MAP_ZONE,0,NULL)) { LOGICCTL_ERR("move merge : bad block manage err after erase log block\n"); return NAND_OP_FALSE; } } /*move erased log block to free block*/ if(LogBlk.PhyBlk.BlkEraseCnt < 0xffff) { LogBlk.PhyBlk.BlkEraseCnt ++; } BMM_SetFreeBlk(&LogBlk.PhyBlk); if((SUPPORT_LOG_BLOCK_MANAGE)&&(LogBlk.LogBlkType == LSB_TYPE)) { //DBUG_INF("[DBUG] logic %x move merge: erase log block 0: %x\n", LogBlk.LogicBlkNum, LogBlk.PhyBlk.PhyBlkNum); if(NAND_OP_TRUE != LML_VirtualBlkErase(CUR_MAP_ZONE, LogBlk.PhyBlk1.PhyBlkNum)) { if(NAND_OP_TRUE != LML_BadBlkManage(&LogBlk.PhyBlk1,CUR_MAP_ZONE,0,NULL)) { LOGICCTL_ERR("move merge : bad block manage err after erase log block\n"); return NAND_OP_FALSE; } } /*move erased log block to free block*/ if(LogBlk.PhyBlk1.BlkEraseCnt < 0xffff) { LogBlk.PhyBlk1.BlkEraseCnt ++; } BMM_SetFreeBlk(&LogBlk.PhyBlk1); //DBUG_INF("[DBUG] logic %x move merge: erase log block 1: %x\n", LogBlk.LogicBlkNum, LogBlk.PhyBlk1.PhyBlkNum); } /*move free block to log block*/ LogBlk.PhyBlk.PhyBlkNum= FreeBlk.PhyBlkNum; LogBlk.PhyBlk.BlkEraseCnt= FreeBlk.BlkEraseCnt; if((SUPPORT_LOG_BLOCK_MANAGE)&&(LogBlk.LogBlkType == LSB_TYPE)) { DBUG_MSG("[DBUG_MSG] _free2log_move_merge 4, select bak log block\n"); LogBlk.PhyBlk1.PhyBlkNum= FreeBlk1.PhyBlkNum; LogBlk.PhyBlk1.BlkEraseCnt= FreeBlk1.BlkEraseCnt; LogBlk.WriteBlkIndex = 0; LogBlk.ReadBlkIndex = 0; DBUG_MSG("[DBUG] move merge to new log block, logic block: %x, logblock0: %x, logblock1: %x\n", LogBlk.LogicBlkNum, LogBlk.PhyBlk.PhyBlkNum, LogBlk.PhyBlk1.PhyBlkNum); } else { LogBlk.LogBlkType = 0; LogBlk.WriteBlkIndex = 0; LogBlk.ReadBlkIndex = 0; } LogBlk.LastUsedPage = LastUsedPage; BMM_SetLogBlk(nlogical, &LogBlk); //if((SUPPORT_LOG_BLOCK_MANAGE)&&(LogBlk.LogBlkType == LSB_TYPE)) // DBUG_INF("logic %x move merge, lastusedpage: %x\n", LogBlk.LogicBlkNum, LogBlk.LastUsedPage); return NAND_OP_TRUE; }
static __s32 _read_page_map_tbl(__u32 nLogBlkPst) { __s32 ret; __u16 TablePage; __u32 TableBlk, checksum; __u16 logicpagenum; __u8 status; struct __NandUserData_t UserData[2]; struct __PhysicOpPara_t param; /*check page poisition, merge if no free page*/ if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[nLogBlkPst].LogBlkType == LSB_TYPE)) { DBUG_MSG("[DBUG_MSG] _read_page_map_tbl, select bak log block\n"); TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage; if(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex == 1) TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk1.PhyBlkNum; else TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum; } else { TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage; TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum; } if (TablePage == 0xffff){ /*log block is empty*/ MEMSET(PAGE_MAP_TBL, 0xff,PAGE_CNT_OF_SUPER_BLK*sizeof(struct __PageMapTblItem_t) ); return NAND_OP_TRUE; } /*read page map table*/ param.MDataPtr = LML_PROCESS_TBL_BUF; param.SDataPtr = (void *)&UserData; param.SectBitmap = 0xf; LML_CalculatePhyOpPar(¶m, CUR_MAP_ZONE, TableBlk, TablePage); ret = LML_VirtualPageRead(¶m); if(PAGE_CNT_OF_SUPER_BLK >= 512) { checksum = _GetTblCheckSum((__u32 *)LML_PROCESS_TBL_BUF, \ PAGE_CNT_OF_SUPER_BLK*2/sizeof(__u32)); } else { checksum = _GetTblCheckSum((__u32 *)LML_PROCESS_TBL_BUF, \ PAGE_CNT_OF_SUPER_BLK*sizeof(struct __PageMapTblItem_t)/sizeof(__u32)); } status = UserData[0].PageStatus; logicpagenum = UserData[0].LogicPageNum; if((ret < 0) || (status != 0xaa) || (logicpagenum != 0xffff) || (checksum != ((__u32 *)LML_PROCESS_TBL_BUF)[511])) { if(NAND_OP_TRUE != _rebuild_page_map_tbl(nLogBlkPst)) { MAPPING_ERR("rebuild page map table err\n"); return NAND_OP_FALSE; } } else { if(PAGE_CNT_OF_SUPER_BLK >= 512) { __u32 page; for(page = 0; page < PAGE_CNT_OF_SUPER_BLK; page++) PAGE_MAP_TBL[page].PhyPageNum = *((__u16 *)LML_PROCESS_TBL_BUF + page); } else MEMCPY(PAGE_MAP_TBL,LML_PROCESS_TBL_BUF, PAGE_CNT_OF_SUPER_BLK*sizeof(struct __PageMapTblItem_t)); } return NAND_OP_TRUE; }
static __s32 _rebuild_page_map_tbl(__u32 nLogBlkPst) { __s32 ret; __u16 TablePage; __u32 TableBlk, TableBlk1; __u16 logicpagenum; //__u8 status; struct __NandUserData_t UserData[2]; struct __PhysicOpPara_t param; MEMSET(PAGE_MAP_TBL,0xff, PAGE_CNT_OF_SUPER_BLK*sizeof(struct __PageMapTblItem_t)); TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum; TableBlk1 = LOG_BLK_TBL[nLogBlkPst].PhyBlk1.PhyBlkNum; param.MDataPtr = LML_PROCESS_TBL_BUF; param.SDataPtr = (void *)&UserData; param.SectBitmap = 0x3; //PRINT("-----------------------rebuild page table for blk %x\n",TableBlk); for(TablePage = 0; TablePage < PAGE_CNT_OF_SUPER_BLK; TablePage++){ LML_CalculatePhyOpPar(¶m, CUR_MAP_ZONE, TableBlk, TablePage); ret = LML_VirtualPageRead(¶m); if (ret < 0){ MAPPING_ERR("rebuild logic block %x page map table : read err\n",LOG_BLK_TBL[nLogBlkPst].LogicBlkNum); return NAND_OP_FALSE; } //status = UserData[0].PageStatus; logicpagenum = UserData[0].LogicPageNum; //if(((!TablePage || (status == 0x55))) && (logicpagenum != 0xffff) && (logicpagenum < PAGE_CNT_OF_SUPER_BLK)) /*legal page*/ if((logicpagenum != 0xffff) && (logicpagenum < PAGE_CNT_OF_SUPER_BLK)) /*legal page*/ { PAGE_MAP_TBL[logicpagenum].PhyPageNum = TablePage; /*l2p:logical to physical*/ } } if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[nLogBlkPst].LogBlkType == LSB_TYPE)) { DBUG_MSG("[DBUG_MSG] _rebuild_page_map_tbl, select bak log block\n"); for(TablePage = 0; TablePage < PAGE_CNT_OF_SUPER_BLK; TablePage++){ LML_CalculatePhyOpPar(¶m, CUR_MAP_ZONE, TableBlk1, TablePage); ret = LML_VirtualPageRead(¶m); if (ret < 0){ MAPPING_ERR("rebuild logic block %x page map table : read err\n",LOG_BLK_TBL[nLogBlkPst].LogicBlkNum); return NAND_OP_FALSE; } //status = UserData[0].PageStatus; logicpagenum = UserData[0].LogicPageNum; //if(((!TablePage || (status == 0x55))) && (logicpagenum != 0xffff) && (logicpagenum < PAGE_CNT_OF_SUPER_BLK)) /*legal page*/ if((logicpagenum != 0xffff) && (logicpagenum < PAGE_CNT_OF_SUPER_BLK)) /*legal page*/ { PAGE_MAP_TBL[logicpagenum].PhyPageNum = TablePage|(0x1U<<15); /*l2p:logical to physical*/ } } } PAGE_MAP_CACHE->DirtyFlag = 1; BMM_SetDirtyFlag(); return NAND_OP_TRUE; }
static __s32 _write_back_page_map_tbl(__u32 nLogBlkPst) { __u16 TablePage; __u32 TableBlk; struct __NandUserData_t UserData[2]; struct __PhysicOpPara_t param, tmpPage0; struct __SuperPhyBlkType_t BadBlk,NewBlk; __s32 result; /*check page poisition, merge if no free page*/ if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[nLogBlkPst].LogBlkType == LSB_TYPE)) { TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage + 1; TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum; DBUG_MSG("[DBUG] _write_back_page_map_tbl, log block: %x, bak log block %x\n", LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum, LOG_BLK_TBL[nLogBlkPst].PhyBlk1.PhyBlkNum); DBUG_MSG("[DBUG] _write_back_page_map_tbl, select bak log block\n"); TablePage = PMM_CalNextLogPage(TablePage); if((TablePage >= PAGE_CNT_OF_SUPER_BLK)&&(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex == 0)) { DBUG_MSG("[DBUG] _write_back_page_map_tbl, change to log block 1, phyblock1: %x\n", LOG_BLK_TBL[nLogBlkPst].PhyBlk1.PhyBlkNum); LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex = 1; TablePage = TablePage - PAGE_CNT_OF_SUPER_BLK; } if(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex == 1) TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk1.PhyBlkNum; else TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum; if (TablePage >= PAGE_CNT_OF_SUPER_BLK){ //DBUG_INF("[DBUG] _write_back_page_map_tbl, log block full, need merge\n"); /*block id full,need merge*/ if (LML_MergeLogBlk(SPECIAL_MERGE_MODE,LOG_BLK_TBL[nLogBlkPst].LogicBlkNum)){ MAPPING_ERR("write back page tbl : merge err\n"); return NAND_OP_FALSE; } DBUG_MSG("[DBUG] _write_back_page_map_tbl, log block merge end\n"); if (PAGE_MAP_CACHE->ZoneNum != 0xff){ /*move merge*/ TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage + 1; TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum; TablePage = PMM_CalNextLogPage(TablePage); //DBUG_INF("[DBUG] _write_back_page_map_tbl, after move merge, table block: %x, table page %x\n", TableBlk, TablePage); } else return NAND_OP_TRUE; } } else { TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage + 1; TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum; if (TablePage == PAGE_CNT_OF_SUPER_BLK){ /*block id full,need merge*/ if (LML_MergeLogBlk(SPECIAL_MERGE_MODE,LOG_BLK_TBL[nLogBlkPst].LogicBlkNum)){ MAPPING_ERR("write back page tbl : merge err\n"); return NAND_OP_FALSE; } if (PAGE_MAP_CACHE->ZoneNum != 0xff){ /*move merge*/ TablePage = LOG_BLK_TBL[nLogBlkPst].LastUsedPage + 1; TableBlk = LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum; } else return NAND_OP_TRUE; } } rewrite: //PRINT("-------------------write back page tbl for blk %x\n",TableBlk); /*write page map table*/ if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[nLogBlkPst].LogBlkType == LSB_TYPE)) { if((TablePage== 0)&&(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex == 1)) { MEMSET((void *)(&UserData[0]),0xff,sizeof(struct __NandUserData_t) * 2); //log page is the page0 of the logblk1, should copy page0 of logblock0, and skip the page LML_CalculatePhyOpPar(&tmpPage0, CUR_MAP_ZONE, LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum, 0); tmpPage0.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE; tmpPage0.MDataPtr = LML_TEMP_BUF; tmpPage0.SDataPtr = (void *)UserData; result = LML_VirtualPageRead(&tmpPage0); if(result < 0) { LOGICCTL_ERR("[LOGICCTL_ERR] Get log age of data block failed when write logical page, Err:0x%x!\n", result); return -ERR_PHYSIC; } //log page is the page0 of the logblk1, should skip the page UserData[0].LogType = LSB_TYPE|(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex<<4); UserData[1].LogType = LSB_TYPE|(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex<<4); LML_CalculatePhyOpPar(&tmpPage0, CUR_MAP_ZONE, LOG_BLK_TBL[nLogBlkPst].PhyBlk1.PhyBlkNum, 0); tmpPage0.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE; tmpPage0.MDataPtr = LML_TEMP_BUF; tmpPage0.SDataPtr = (void *)UserData; result = LML_VirtualPageWrite(&tmpPage0); TablePage++; } } MEMSET((void *)(&UserData[0]),0xff,sizeof(struct __NandUserData_t) * 2); UserData[0].PageStatus = 0xaa; if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[nLogBlkPst].LogBlkType == LSB_TYPE)) { UserData[0].LogType = LSB_TYPE|(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex<<4); UserData[1].LogType = LSB_TYPE|(LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex<<4); } else { UserData[0].LogType = 0xff; UserData[1].LogType = 0xff; } //if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[nLogBlkPst].LogBlkType == LSB_TYPE)&&(TablePage== 0)) //{ // DBUG_INF("[DBUG] _write_back_page_map_tbl in page0, TablePage: %x, TableBlk: %x\n", TablePage, TableBlk); // DBUG_INF("[DBUG] _write_back_page_map_tbl in page0, logicNum: %x, log0: %x, log1: %x\n", LOG_BLK_TBL[nLogBlkPst].LogicBlkNum,LOG_BLK_TBL[nLogBlkPst].PhyBlk.PhyBlkNum, LOG_BLK_TBL[nLogBlkPst].PhyBlk1.PhyBlkNum); // DBUG_INF("[DBUG] _write_back_page_map_tbl in page0, logicinfo: %x, logicpage: %x\n", UserData[0].LogicInfo, UserData[0].LogicPageNum); // DBUG_INF("[DBUG] _write_back_page_map_tbl in page0, logtype: %x, pagestatus: %x\n", UserData[0].LogType, UserData[0].PageStatus); //} MEMSET(LML_PROCESS_TBL_BUF,0xff,SECTOR_CNT_OF_SUPER_PAGE * SECTOR_SIZE); if(PAGE_CNT_OF_SUPER_BLK >= 512) { __u32 page; for(page = 0; page < PAGE_CNT_OF_SUPER_BLK; page++) *((__u16 *)LML_PROCESS_TBL_BUF + page) = PAGE_MAP_TBL[page].PhyPageNum; ((__u32 *)LML_PROCESS_TBL_BUF)[511] = \ _GetTblCheckSum((__u32 *)LML_PROCESS_TBL_BUF, PAGE_CNT_OF_SUPER_BLK*2/(sizeof (__u32))); } else { MEMCPY(LML_PROCESS_TBL_BUF, PAGE_MAP_TBL,PAGE_CNT_OF_SUPER_BLK*sizeof(struct __PageMapTblItem_t)); ((__u32 *)LML_PROCESS_TBL_BUF)[511] = \ _GetTblCheckSum((__u32 *)LML_PROCESS_TBL_BUF, PAGE_CNT_OF_SUPER_BLK*sizeof(struct __PageMapTblItem_t)/(sizeof (__u32))); } param.MDataPtr = LML_PROCESS_TBL_BUF; param.SDataPtr = (void *)&UserData; param.SectBitmap = FULL_BITMAP_OF_SUPER_PAGE; //rewrite: LML_CalculatePhyOpPar(¶m, CUR_MAP_ZONE, TableBlk, TablePage); LML_VirtualPageWrite(¶m); if (NAND_OP_TRUE != PHY_SynchBank(param.BankNum, SYNC_CHIP_MODE)){ BadBlk.PhyBlkNum = TableBlk; if (NAND_OP_TRUE != LML_BadBlkManage(&BadBlk,CUR_MAP_ZONE,TablePage,&NewBlk)){ MAPPING_ERR("write page map table : bad block mange err after write\n"); return NAND_OP_FALSE; } TableBlk = NewBlk.PhyBlkNum; LOG_BLK_TBL[nLogBlkPst].PhyBlk = NewBlk; goto rewrite; } LOG_BLK_TBL[nLogBlkPst].LastUsedPage = TablePage; PAGE_MAP_CACHE->ZoneNum = 0xff; PAGE_MAP_CACHE->LogBlkPst = 0xff; if((SUPPORT_LOG_BLOCK_MANAGE)&&(LOG_BLK_TBL[nLogBlkPst].LogBlkType == LSB_TYPE)) DBUG_MSG("[DBUG] _write_back_page_map_tbl end, lastusedpage: %x, write_index: %x\n", LOG_BLK_TBL[nLogBlkPst].LastUsedPage, LOG_BLK_TBL[nLogBlkPst].WriteBlkIndex); return NAND_OP_TRUE; }
/* * Test operations on a segmented table. */ static bool segmentTest(void) { static const int kTableMax = 20; IndirectRefTable irt; IndirectRef iref0, iref1, iref2, iref3; ClassObject* clazz = dvmFindClass("Ljava/lang/Object;", NULL); Object* obj0 = dvmAllocObject(clazz, ALLOC_DONT_TRACK); Object* obj1 = dvmAllocObject(clazz, ALLOC_DONT_TRACK); Object* obj2 = dvmAllocObject(clazz, ALLOC_DONT_TRACK); Object* obj3 = dvmAllocObject(clazz, ALLOC_DONT_TRACK); u4 cookie; u4 segmentState[4]; bool result = false; if (!dvmInitIndirectRefTable(&irt, kTableMax, kTableMax, kIndirectKindLocal)) { return false; } cookie = segmentState[0] = IRT_FIRST_SEGMENT; DBUG_MSG("+++ objs %p %p %p %p\n", obj0, obj1, obj2, obj3); /* * Push two, create new segment, push two more, try to get all four, * try to delete all 4. All four should be accessible, but only the * last two should be deletable. */ DBUG_MSG("+++ START basic segment\n"); iref0 = dvmAddToIndirectRefTable(&irt, cookie, obj0); iref1 = dvmAddToIndirectRefTable(&irt, cookie, obj1); cookie = segmentState[1] = dvmPushIndirectRefTableSegment(&irt); DBUG_MSG("+++ pushed, cookie is 0x%08x\n", cookie); iref2 = dvmAddToIndirectRefTable(&irt, cookie, obj2); iref3 = dvmAddToIndirectRefTable(&irt, cookie, obj3); if (dvmRemoveFromIndirectRefTable(&irt, cookie, iref0) || dvmRemoveFromIndirectRefTable(&irt, cookie, iref1)) { LOGE("removed values from earlier segment\n"); goto bail; } if (!dvmRemoveFromIndirectRefTable(&irt, cookie, iref2) || !dvmRemoveFromIndirectRefTable(&irt, cookie, iref3)) { LOGE("unable to remove values from current segment\n"); goto bail; } if (dvmIndirectRefTableEntries(&irt) != 2) { LOGE("wrong total entries\n"); goto bail; } dvmPopIndirectRefTableSegment(&irt, segmentState[1]); cookie = segmentState[0]; if (!dvmRemoveFromIndirectRefTable(&irt, cookie, iref0) || !dvmRemoveFromIndirectRefTable(&irt, cookie, iref1)) { LOGE("unable to remove values from first segment\n"); goto bail; } if (dvmIndirectRefTableEntries(&irt) != 0) { LOGE("basic push/pop not empty\n"); goto bail; } /* * Push two, delete first, segment, push two more, pop segment, verify * the last two are no longer present and hole count is right. The * adds after the segment pop should not be filling in the hole. */ DBUG_MSG("+++ START segment pop\n"); iref0 = dvmAddToIndirectRefTable(&irt, cookie, obj0); iref1 = dvmAddToIndirectRefTable(&irt, cookie, obj1); dvmRemoveFromIndirectRefTable(&irt, cookie, iref0); cookie = segmentState[1] = dvmPushIndirectRefTableSegment(&irt); iref2 = dvmAddToIndirectRefTable(&irt, cookie, obj2); iref3 = dvmAddToIndirectRefTable(&irt, cookie, obj3); dvmPopIndirectRefTableSegment(&irt, segmentState[1]); cookie = segmentState[0]; if (dvmIndirectRefTableEntries(&irt) != 2) { LOGE("wrong total entries after pop\n"); goto bail; } dvmRemoveFromIndirectRefTable(&irt, cookie, iref1); if (dvmIndirectRefTableEntries(&irt) != 0) { LOGE("not back to zero after pop + del\n"); goto bail; } /* * Multiple segments, some empty. */ DBUG_MSG("+++ START multiseg\n"); iref0 = dvmAppendToIndirectRefTable(&irt, cookie, obj0); iref1 = dvmAppendToIndirectRefTable(&irt, cookie, obj1); cookie = segmentState[1] = dvmPushIndirectRefTableSegment(&irt); cookie = segmentState[2] = dvmPushIndirectRefTableSegment(&irt); iref3 = dvmAppendToIndirectRefTable(&irt, cookie, obj3); iref2 = dvmAppendToIndirectRefTable(&irt, cookie, obj2); dvmRemoveFromIndirectRefTable(&irt, cookie, iref3); cookie = segmentState[3] = dvmPushIndirectRefTableSegment(&irt); iref3 = dvmAppendToIndirectRefTable(&irt, cookie, obj3); if (dvmGetFromIndirectRefTable(&irt, iref0) != obj0 || dvmGetFromIndirectRefTable(&irt, iref1) != obj1 || dvmGetFromIndirectRefTable(&irt, iref2) != obj2 || dvmGetFromIndirectRefTable(&irt, iref3) != obj3) { LOGE("Unable to retrieve all multiseg objects\n"); goto bail; } dvmDumpIndirectRefTable(&irt, "test"); //int i; //for (i = 0; i < sizeof(segmentState) / sizeof(segmentState[0]); i++) { // DBUG_MSG("+++ segment %d = 0x%08x\n", i, segmentState[i]); //} dvmRemoveFromIndirectRefTable(&irt, cookie, iref3); if (dvmRemoveFromIndirectRefTable(&irt, cookie, iref2)) { LOGE("multiseg del2 worked\n"); goto bail; } dvmPopIndirectRefTableSegment(&irt, segmentState[3]); cookie = segmentState[2]; if (!dvmRemoveFromIndirectRefTable(&irt, cookie, iref2)) { LOGE("multiseg del2b failed (cookie=0x%08x ref=%p)\n", cookie, iref2); goto bail; } iref2 = dvmAddToIndirectRefTable(&irt, cookie, obj2); /* pop two off at once */ dvmPopIndirectRefTableSegment(&irt, segmentState[1]); cookie = segmentState[0]; if (dvmIndirectRefTableEntries(&irt) != 2) { LOGE("Unexpected entry count in multiseg\n"); goto bail; } dvmRemoveFromIndirectRefTable(&irt, cookie, iref0); dvmRemoveFromIndirectRefTable(&irt, cookie, iref1); if (dvmIndirectRefTableEntries(&irt) != 0) { LOGE("Unexpected entry count at multiseg end\n"); goto bail; } DBUG_MSG("+++ segment test complete\n"); result = true; bail: dvmClearIndirectRefTable(&irt); return result; }
OMX_ERRORTYPE omx_flacdec_component_Constructor(OMX_COMPONENTTYPE *openmaxStandComp, OMX_STRING cComponentName) { OMX_ERRORTYPE err = OMX_ErrorNone; omx_flacdec_component_PrivateType* omx_flacdec_component_Private; omx_base_audio_PortType *inPort,*outPort; OMX_U32 i; #ifdef HAVE_ANDROID_OS if (1) #else if (!openmaxStandComp->pComponentPrivate) #endif { openmaxStandComp->pComponentPrivate = TCC_calloc(1, sizeof(omx_flacdec_component_PrivateType)); if(openmaxStandComp->pComponentPrivate==NULL) { return OMX_ErrorInsufficientResources; } } else { DBUG_MSG("In %s, Error Component %x Already Allocated\n", __func__, (int)openmaxStandComp->pComponentPrivate); } omx_flacdec_component_Private = openmaxStandComp->pComponentPrivate; omx_flacdec_component_Private->ports = NULL; /** we could create our own port structures here * fixme maybe the base class could use a "port factory" function pointer? */ err = omx_base_filter_Constructor(openmaxStandComp, cComponentName); DBUG_MSG("constructor of FLAC decoder component is called\n"); /* Domain specific section for the ports. */ /* first we set the parameter common to both formats */ /* parameters related to input port which does not depend upon input audio format */ /* Allocate Ports and call port constructor. */ omx_flacdec_component_Private->sPortTypesParam[OMX_PortDomainAudio].nStartPortNumber = 0; omx_flacdec_component_Private->sPortTypesParam[OMX_PortDomainAudio].nPorts = 2; if (omx_flacdec_component_Private->sPortTypesParam[OMX_PortDomainAudio].nPorts && !omx_flacdec_component_Private->ports) { omx_flacdec_component_Private->ports = TCC_calloc(omx_flacdec_component_Private->sPortTypesParam[OMX_PortDomainAudio].nPorts, sizeof(omx_base_PortType *)); if (!omx_flacdec_component_Private->ports) { return OMX_ErrorInsufficientResources; } for (i=0; i < omx_flacdec_component_Private->sPortTypesParam[OMX_PortDomainAudio].nPorts; i++) { omx_flacdec_component_Private->ports[i] = TCC_calloc(1, sizeof(omx_base_audio_PortType)); if (!omx_flacdec_component_Private->ports[i]) { return OMX_ErrorInsufficientResources; } } } base_audio_port_Constructor(openmaxStandComp, &omx_flacdec_component_Private->ports[0], 0, OMX_TRUE); // input base_audio_port_Constructor(openmaxStandComp, &omx_flacdec_component_Private->ports[1], 1, OMX_FALSE); // output /** parameters related to input port */ inPort = (omx_base_audio_PortType *) omx_flacdec_component_Private->ports[OMX_BASE_FILTER_INPUTPORT_INDEX]; inPort->sPortParam.nBufferSize = DEFAULT_IN_BUFFER_SIZE*2; strcpy(inPort->sPortParam.format.audio.cMIMEType, "audio/flac"); inPort->sPortParam.format.audio.eEncoding = OMX_AUDIO_CodingFLAC; inPort->sAudioParam.eEncoding = OMX_AUDIO_CodingFLAC; /** parameters related to output port */ outPort = (omx_base_audio_PortType *) omx_flacdec_component_Private->ports[OMX_BASE_FILTER_OUTPUTPORT_INDEX]; outPort->sPortParam.format.audio.eEncoding = OMX_AUDIO_CodingPCM; outPort->sPortParam.nBufferSize = AUDIO_DEC_OUT_BUFFER_SIZE; outPort->sAudioParam.eEncoding = OMX_AUDIO_CodingPCM; //Default values for AAC audio param port setHeader(&omx_flacdec_component_Private->pAudioFlac, sizeof(OMX_AUDIO_PARAM_FLACTYPE)); omx_flacdec_component_Private->pAudioFlac.nPortIndex = 0; omx_flacdec_component_Private->pAudioFlac.nChannels = 2; omx_flacdec_component_Private->pAudioFlac.nBitRate = 0; omx_flacdec_component_Private->pAudioFlac.nSampleRate = 44100; omx_flacdec_component_Private->pAudioFlac.eChannelMode = OMX_AUDIO_ChannelModeStereo; /** settings of output port audio format - pcm */ setHeader(&omx_flacdec_component_Private->pAudioPcmMode, sizeof(OMX_AUDIO_PARAM_PCMMODETYPE)); omx_flacdec_component_Private->pAudioPcmMode.nPortIndex = 1; omx_flacdec_component_Private->pAudioPcmMode.nChannels = 2; omx_flacdec_component_Private->pAudioPcmMode.eNumData = OMX_NumericalDataSigned; omx_flacdec_component_Private->pAudioPcmMode.eEndian = OMX_EndianLittle; omx_flacdec_component_Private->pAudioPcmMode.bInterleaved = OMX_TRUE; omx_flacdec_component_Private->pAudioPcmMode.nBitPerSample = 16; omx_flacdec_component_Private->pAudioPcmMode.nSamplingRate = 44100; omx_flacdec_component_Private->pAudioPcmMode.ePCMMode = OMX_AUDIO_PCMModeLinear; omx_flacdec_component_Private->pAudioPcmMode.eChannelMapping[0] = OMX_AUDIO_ChannelLF; omx_flacdec_component_Private->pAudioPcmMode.eChannelMapping[1] = OMX_AUDIO_ChannelRF; /** now it's time to know the audio coding type of the component */ if(!strcmp(cComponentName, AUDIO_DEC_FLAC_NAME)) { omx_flacdec_component_Private->audio_coding_type = OMX_AUDIO_CodingFLAC; } else if (!strcmp(cComponentName, AUDIO_DEC_BASE_NAME)) { omx_flacdec_component_Private->audio_coding_type = OMX_AUDIO_CodingUnused; } else { // IL client specified an invalid component name LOGE("OMX_ErrorInvalidComponentName %s", cComponentName); return OMX_ErrorInvalidComponentName; } /** general configuration irrespective of any audio formats */ /** setting values of other fields of omx_maddec_component_Private structure */ omx_flacdec_component_Private->BufferMgmtCallback = omx_audiodec_component_BufferMgmtCallback; omx_flacdec_component_Private->messageHandler = omx_audiodec_component_MessageHandler; omx_flacdec_component_Private->destructor = omx_audiodec_component_Destructor; openmaxStandComp->SetParameter = omx_audiodec_component_SetParameter; openmaxStandComp->GetParameter = omx_audiodec_component_GetParameter; openmaxStandComp->GetExtensionIndex = omx_audiodec_component_GetExtensionIndex; omx_flacdec_component_Private->decode_ready = OMX_FALSE; memset(&omx_flacdec_component_Private->cdk_core, 0x00, sizeof(cdk_core_t)); memset(&omx_flacdec_component_Private->cdmx_info, 0x00, sizeof(cdmx_info_t)); memset(&omx_flacdec_component_Private->cdmx_out, 0x00, sizeof(cdmx_output_t)); omx_flacdec_component_Private->cdk_core.m_iAudioProcessMode = 2; /* decoded pcm mode */ omx_flacdec_component_Private->cdk_core.m_psCallback = &(omx_flacdec_component_Private->callback_func); omx_flacdec_component_Private->cdk_core.m_psCallback->m_pfMalloc = (void* (*) ( unsigned int ))malloc; omx_flacdec_component_Private->cdk_core.m_psCallback->m_pfRealloc = (void* (*) ( void*, unsigned int ))realloc; omx_flacdec_component_Private->cdk_core.m_psCallback->m_pfFree = (void (*) ( void* ))free; omx_flacdec_component_Private->cdk_core.m_psCallback->m_pfMemcpy = (void* (*) ( void*, const void*, unsigned int ))memcpy; omx_flacdec_component_Private->cdk_core.m_psCallback->m_pfMemmove = (void* (*) ( void*, const void*, unsigned int ))memmove; omx_flacdec_component_Private->cdk_core.m_psCallback->m_pfMemset = (void (*) ( void*, int, unsigned int ))memset; omx_flacdec_component_Private->iAdecType = AUDIO_ID_FLAC; omx_flacdec_component_Private->iCtype = CONTAINER_TYPE_AUDIO; omx_flacdec_component_Private->cb_function = TCC_FLAC_DEC; if (omx_flacdec_component_Private->pRest == NULL) { omx_flacdec_component_Private->pRest = (OMX_U8*)malloc(DEFAULT_OUT_BUFFER_SIZE); } DBUG_MSG("constructor of FLAC decoder component is completed ret = %d \n", err); return err; }