void * MEM_Get(t_Handle h_Mem) { t_MemorySegment *p_Mem = (t_MemorySegment *)h_Mem; uint8_t *p_Block; uint32_t intFlags; #ifdef DEBUG_MEM_LEAKS uintptr_t callerAddr = 0; GET_CALLER_ADDR; #endif /* DEBUG_MEM_LEAKS */ ASSERT_COND(h_Mem); intFlags = XX_LockIntrSpinlock(p_Mem->h_Spinlock); /* check if there is an available block */ if ((p_Block = (uint8_t *)MemGet(p_Mem)) == NULL) { XX_UnlockIntrSpinlock(p_Mem->h_Spinlock, intFlags); return NULL; } #ifdef DEBUG_MEM_LEAKS DebugMemGet(p_Mem, p_Block, callerAddr); #endif /* DEBUG_MEM_LEAKS */ XX_UnlockIntrSpinlock(p_Mem->h_Spinlock, intFlags); return (void *)p_Block; }
void BaseMemoryPool::Resize(SectorIndex aSectorCount) { #ifdef DEBUG for(FlagContainer::FlagId i = aSectorCount; i < mFreeSector.GetCount(); ++i) { ASSERT_COND(!mFreeSector.IsFull(), "Memory Pool have sectors that are currently use that will be lost in the resizing"); if(!(!mFreeSector.Get(i))) { return; } } #endif DEBUG size_t originalMemoryBlockSize = mMemoryBlockSize; void* originalMemoryBlock = mMemoryBlock; mMemoryBlockSize = mSectorSize * aSectorCount; // Allocate memory block mMemoryBlock = malloc(mMemoryBlockSize); memcpy(mMemoryBlock, originalMemoryBlock, Math::Min(originalMemoryBlockSize, mMemoryBlockSize)); mFreeSector.Resize(aSectorCount); free(originalMemoryBlock); }
void bm_rcr_pvb_commit(struct bm_portal *portal, uint8_t myverb) { register struct bm_rcr *rcr = &portal->rcr; struct bm_rcr_entry *rcursor; ASSERT_COND(rcr->busy); ASSERT_COND(rcr->pmode == e_BmPortalPVB); lwsync(); rcursor = rcr->cursor; rcursor->__dont_write_directly__verb = (uint8_t)(myverb | rcr->vbit); dcbf_64(rcursor); RCR_INC(rcr); rcr->available--; #ifdef BM_CHECKING rcr->busy = 0; #endif /* BM_CHECKING */ }
void PrsFree(t_FmPcd *p_FmPcd ) { ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID); FmUnregisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PRS, 0, e_FM_INTR_TYPE_ERR); /* register even if no interrupts enabled, to allow future enablement */ FmUnregisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PRS, 0, e_FM_INTR_TYPE_NORMAL); }
void PrsDisable(t_FmPcd *p_FmPcd ) { t_FmPcdPrsRegs *p_Regs = p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs; ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID); WRITE_UINT32(p_Regs->rpimac, GET_UINT32(p_Regs->rpimac) & ~FM_PCD_PRS_RPIMAC_EN); }
TaskFunctionNode& TaskTree::GetOne() { UpdateAvailableTasks(); ASSERT_COND(GetAvailableCount() > 0, "No tasks were available"); TaskFunctionNode* node = mAvailableTasks.back(); mAvailableTasks.pop_back(); return *node; }
struct bm_mc_command *bm_mc_start(struct bm_portal *portal) { register struct bm_mc *mc = &portal->mc; ASSERT_COND(mc->state == mc_idle); #ifdef BM_CHECKING mc->state = mc_user; #endif /* BM_CHECKING */ dcbz_64(mc->cr); return mc->cr; }
void bm_mc_abort(struct bm_portal *portal) { register struct bm_mc *mc = &portal->mc; ASSERT_COND(mc->state == mc_user); #ifdef BM_CHECKING mc->state = mc_idle; #else UNUSED(mc); #endif /* BM_CHECKING */ }
uint8_t bm_rcr_cci_update(struct bm_portal *portal) { register struct bm_rcr *rcr = &portal->rcr; uint8_t diff, old_ci = rcr->ci; ASSERT_COND(rcr->cmode == e_BmPortalRcrCCI); rcr->ci = (uint8_t)(bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1)); diff = cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci); rcr->available += diff; return diff; }
void bm_rcr_abort(struct bm_portal *portal) { register struct bm_rcr *rcr = &portal->rcr; ASSERT_COND(rcr->busy); #ifdef BM_CHECKING rcr->busy = 0; #else UNUSED(rcr); #endif /* BM_CHECKING */ }
uint64_t MM_GetBase(t_Handle h_MM) { t_MM *p_MM = (t_MM*)h_MM; t_MemBlock *p_MemBlock; ASSERT_COND(p_MM); p_MemBlock = p_MM->memBlocks; return p_MemBlock->base; }
void bm_mc_finish(struct bm_portal *portal) { register struct bm_mc *mc = &portal->mc; ASSERT_COND(mc->state == mc_idle); #ifdef BM_CHECKING if (mc->state != mc_idle) REPORT_ERROR(WARNING, E_INVALID_STATE, ("Losing incomplete MC command")); #else UNUSED(mc); #endif /* BM_CHECKING */ }
void bm_mc_commit(struct bm_portal *portal, uint8_t myverb) { register struct bm_mc *mc = &portal->mc; ASSERT_COND(mc->state == mc_user); lwsync(); mc->cr->__dont_write_directly__verb = (uint8_t)(myverb | mc->vbit); dcbf_64(mc->cr); dcbit_ro(mc->rr + mc->rridx); #ifdef BM_CHECKING mc->state = mc_hw; #endif /* BM_CHECKING */ }
void bm_isr_bscn_mask(struct bm_portal *portal, uint8_t bpid, int enable) { uint32_t val; ASSERT_COND(bpid < BM_MAX_NUM_OF_POOLS); /* REG_SCN for bpid=0..31, REG_SCN+4 for bpid=32..63 */ val = __bm_in(&portal->addr, SCN_REG(bpid)); if (enable) val |= SCN_BIT(bpid); else val &= ~SCN_BIT(bpid); __bm_out(&portal->addr, SCN_REG(bpid), val); }
struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal) { register struct bm_rcr *rcr = &portal->rcr; ASSERT_COND(!rcr->busy); if (!rcr->available) return NULL; #ifdef BM_CHECKING rcr->busy = 1; #endif /* BM_CHECKING */ dcbz_64(rcr->cursor); return rcr->cursor; }
t_Error MM_Add(t_Handle h_MM, uint64_t base, uint64_t size) { t_MM *p_MM = (t_MM *)h_MM; t_MemBlock *p_MemB, *p_NewMemB; t_Error errCode; uint32_t intFlags; ASSERT_COND(p_MM); /* find a last block in the list of memory blocks to insert a new * memory block */ intFlags = XX_LockIntrSpinlock(p_MM->h_Spinlock); p_MemB = p_MM->memBlocks; while ( p_MemB->p_Next ) { if ( base >= p_MemB->base && base < p_MemB->end ) { XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, NO_MSG); } p_MemB = p_MemB->p_Next; } /* check for a last memory block */ if ( base >= p_MemB->base && base < p_MemB->end ) { XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, NO_MSG); } /* create a new memory block */ if ((p_NewMemB = CreateNewBlock(base, size)) == NULL) { XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG); } /* append a new memory block to the end of the list of memory blocks */ p_MemB->p_Next = p_NewMemB; /* add a new free block to the free lists */ errCode = AddFree(p_MM, base, base+size); if (errCode) { XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); p_MemB->p_Next = 0; XX_Free(p_NewMemB); return ((t_Error)errCode); } XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); return (E_OK); }
static void PcdPrsException(t_Handle h_FmPcd) { t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; uint32_t event, force; ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID); event = GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->pevr); event &= GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->pever); ASSERT_COND(event & FM_PCD_PRS_SINGLE_ECC); DBG(TRACE, ("parser event - 0x%08x\n",event)); /* clear the forced events */ force = GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->pevfr); if(force & event) WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->pevfr, force & ~event); WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->pevr, event); p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC); }
void bm_rcr_finish(struct bm_portal *portal) { register struct bm_rcr *rcr = &portal->rcr; uint8_t pi = (uint8_t)(bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1)); uint8_t ci = (uint8_t)(bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1)); ASSERT_COND(!rcr->busy); if (pi != RCR_PTR2IDX(rcr->cursor)) REPORT_ERROR(WARNING, E_INVALID_STATE, ("losing uncommitted RCR entries")); if (ci != rcr->ci) REPORT_ERROR(WARNING, E_INVALID_STATE, ("missing existing RCR completions")); if (rcr->ci != RCR_PTR2IDX(rcr->cursor)) REPORT_ERROR(WARNING, E_INVALID_STATE, ("RCR destroyed unquiesced")); }
uint64_t MM_GetForce(t_Handle h_MM, uint64_t base, uint64_t size, char* name) { t_MM *p_MM = (t_MM *)h_MM; t_FreeBlock *p_FreeB; t_BusyBlock *p_NewBusyB; uint32_t intFlags; bool blockIsFree = FALSE; ASSERT_COND(p_MM); intFlags = XX_LockIntrSpinlock(p_MM->h_Spinlock); p_FreeB = p_MM->freeBlocks[0]; /* The biggest free blocks are in the free list with alignment 1 */ while ( p_FreeB ) { if ( base >= p_FreeB->base && (base+size) <= p_FreeB->end ) { blockIsFree = TRUE; break; } else p_FreeB = p_FreeB->p_Next; } if ( !blockIsFree ) { XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); return (uint64_t)(ILLEGAL_BASE); } /* init a new busy block */ if ((p_NewBusyB = CreateBusyBlock(base, size, name)) == NULL) { XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); return (uint64_t)(ILLEGAL_BASE); } /* calls Update routine to update a lists of free blocks */ if ( CutFree ( p_MM, base, base+size ) != E_OK ) { XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); return (uint64_t)(ILLEGAL_BASE); } /* insert the new busy block into the list of busy blocks */ AddBusy ( p_MM, p_NewBusyB ); XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); return (base); }
bool MM_InRange(t_Handle h_MM, uint64_t addr) { t_MM *p_MM = (t_MM*)h_MM; t_MemBlock *p_MemBlock; ASSERT_COND(p_MM); p_MemBlock = p_MM->memBlocks; if ((addr >= p_MemBlock->base) && (addr < p_MemBlock->end)) return TRUE; else return FALSE; }
static uint32_t GetSwPrsOffset(t_Handle h_FmPcd, e_NetHeaderType hdr, uint8_t indexPerHdr) { t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; int i; t_FmPcdPrsLabelParams *p_Label; SANITY_CHECK_RETURN_VALUE(p_FmPcd, E_INVALID_HANDLE, 0); SANITY_CHECK_RETURN_VALUE(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE, 0); ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID); ASSERT_COND(p_FmPcd->p_FmPcdPrs->currLabel < FM_PCD_PRS_NUM_OF_LABELS); for (i=0; i < p_FmPcd->p_FmPcdPrs->currLabel; i++) { p_Label = &p_FmPcd->p_FmPcdPrs->labelsTable[i]; if ((hdr == p_Label->hdr) && (indexPerHdr == p_Label->indexPerHdr)) return p_Label->instructionOffset; } REPORT_ERROR(MAJOR, E_NOT_FOUND, ("Sw Parser attachment Not found")); return (uint32_t)ILLEGAL_BASE; }
uint64_t MM_Put(t_Handle h_MM, uint64_t base) { t_MM *p_MM = (t_MM *)h_MM; t_BusyBlock *p_BusyB, *p_PrevBusyB; uint64_t size; uint32_t intFlags; ASSERT_COND(p_MM); /* Look for a busy block that have the given base value. * That block will be returned back to the memory. */ p_PrevBusyB = 0; intFlags = XX_LockIntrSpinlock(p_MM->h_Spinlock); p_BusyB = p_MM->busyBlocks; while ( p_BusyB && base != p_BusyB->base ) { p_PrevBusyB = p_BusyB; p_BusyB = p_BusyB->p_Next; } if ( !p_BusyB ) { XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); return (uint64_t)(0); } if ( AddFree( p_MM, p_BusyB->base, p_BusyB->end ) != E_OK ) { XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); return (uint64_t)(0); } /* removes a busy block form the list of busy blocks */ if ( p_PrevBusyB ) p_PrevBusyB->p_Next = p_BusyB->p_Next; else p_MM->busyBlocks = p_BusyB->p_Next; size = p_BusyB->end - p_BusyB->base; XX_Free(p_BusyB); XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); return (size); }
void MM_Free(t_Handle h_MM) { t_MM *p_MM = (t_MM *)h_MM; t_MemBlock *p_MemBlock; t_BusyBlock *p_BusyBlock; t_FreeBlock *p_FreeBlock; void *p_Block; int i; ASSERT_COND(p_MM); /* release memory allocated for busy blocks */ p_BusyBlock = p_MM->busyBlocks; while ( p_BusyBlock ) { p_Block = p_BusyBlock; p_BusyBlock = p_BusyBlock->p_Next; XX_Free(p_Block); } /* release memory allocated for free blocks */ for (i=0; i <= MM_MAX_ALIGNMENT; i++) { p_FreeBlock = p_MM->freeBlocks[i]; while ( p_FreeBlock ) { p_Block = p_FreeBlock; p_FreeBlock = p_FreeBlock->p_Next; XX_Free(p_Block); } } /* release memory allocated for memory blocks */ p_MemBlock = p_MM->memBlocks; while ( p_MemBlock ) { p_Block = p_MemBlock; p_MemBlock = p_MemBlock->p_Next; XX_Free(p_Block); } if (p_MM->h_Spinlock) XX_FreeSpinlock(p_MM->h_Spinlock); /* release memory allocated for MM object itself */ XX_Free(p_MM); }
uint64_t MM_GetMemBlock(t_Handle h_MM, int index) { t_MM *p_MM = (t_MM*)h_MM; t_MemBlock *p_MemBlock; int i; ASSERT_COND(p_MM); p_MemBlock = p_MM->memBlocks; for (i=0; i < index; i++) p_MemBlock = p_MemBlock->p_Next; if ( p_MemBlock ) return (p_MemBlock->base); else return (uint64_t)ILLEGAL_BASE; }
void MEM_CheckLeaks(t_Handle h_Mem) { t_MemorySegment *p_Mem = (t_MemorySegment *)h_Mem; t_MemDbg *p_MemDbg = (t_MemDbg *)p_Mem->p_MemDbg; uint8_t *p_Block; int i; ASSERT_COND(h_Mem); if (p_Mem->consecutiveMem) { for (i=0; i < p_Mem->num; i++) { if (p_MemDbg[i].ownerAddress != ILLEGAL_BASE) { /* Find the block address */ p_Block = ((p_Mem->p_Bases[0] + p_Mem->blockOffset) + (i * p_Mem->blockSize)); XX_Print("MEM leak: 0x%08x, Caller address: 0x%08x\n", p_Block, p_MemDbg[i].ownerAddress); } } } else { for (i=0; i < p_Mem->num; i++) { if (p_MemDbg[i].ownerAddress != ILLEGAL_BASE) { /* Find the block address */ p_Block = p_Mem->p_Bases[i]; ALIGN_BLOCK(p_Block, p_Mem->prefixSize, p_Mem->alignment); if (p_Block == p_Mem->p_Bases[i]) p_Block += p_Mem->alignment; XX_Print("MEM leak: 0x%08x, Caller address: 0x%08x\n", p_Block, p_MemDbg[i].ownerAddress); } } } }
struct bm_mc_result *bm_mc_result(struct bm_portal *portal) { register struct bm_mc *mc = &portal->mc; struct bm_mc_result *rr = mc->rr + mc->rridx; ASSERT_COND(mc->state == mc_hw); /* The inactive response register's verb byte always returns zero until * its command is submitted and completed. This includes the valid-bit, * in case you were wondering... */ if (!rr->verb) { dcbit_ro(rr); return NULL; } mc->rridx ^= 1; mc->vbit ^= BM_MCC_VERB_VBIT; #ifdef BM_CHECKING mc->state = mc_idle; #endif /* BM_CHECKING */ return rr; }
uint16_t MEM_GetN(t_Handle h_Mem, uint32_t num, void *array[]) { t_MemorySegment *p_Mem = (t_MemorySegment *)h_Mem; uint32_t availableBlocks; register uint32_t i; uint32_t intFlags; #ifdef DEBUG_MEM_LEAKS uintptr_t callerAddr = 0; GET_CALLER_ADDR; #endif /* DEBUG_MEM_LEAKS */ ASSERT_COND(h_Mem); intFlags = XX_LockIntrSpinlock(p_Mem->h_Spinlock); /* check how many blocks are available */ availableBlocks = (uint32_t)(p_Mem->num - p_Mem->current); if (num > availableBlocks) { num = availableBlocks; } for (i=0; i < num; i++) { /* get pointer to block */ if ((array[i] = MemGet(p_Mem)) == NULL) { break; } #ifdef DEBUG_MEM_LEAKS DebugMemGet(p_Mem, array[i], callerAddr); #endif /* DEBUG_MEM_LEAKS */ } XX_UnlockIntrSpinlock(p_Mem->h_Spinlock, intFlags); return (uint16_t)i; }
t_Error MEM_Put(t_Handle h_Mem, void *p_Block) { t_MemorySegment *p_Mem = (t_MemorySegment *)h_Mem; t_Error rc; uint32_t intFlags; ASSERT_COND(h_Mem); intFlags = XX_LockIntrSpinlock(p_Mem->h_Spinlock); /* check if blocks stack is full */ if ((rc = MemPut(p_Mem, p_Block)) != E_OK) { XX_UnlockIntrSpinlock(p_Mem->h_Spinlock, intFlags); RETURN_ERROR(MAJOR, rc, NO_MSG); } #ifdef DEBUG_MEM_LEAKS DebugMemPut(p_Mem, p_Block); #endif /* DEBUG_MEM_LEAKS */ XX_UnlockIntrSpinlock(p_Mem->h_Spinlock, intFlags); return E_OK; }
uint64_t MM_PutForce(t_Handle h_MM, uint64_t base, uint64_t size) { t_MM *p_MM = (t_MM *)h_MM; uint64_t end = base + size; uint32_t intFlags; ASSERT_COND(p_MM); intFlags = XX_LockIntrSpinlock(p_MM->h_Spinlock); if ( CutBusy( p_MM, base, end ) != E_OK ) { XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); return (uint64_t)(0); } if ( AddFree ( p_MM, base, end ) != E_OK ) { XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); return (uint64_t)(0); } XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); return (size); }
t_Error PrsInit(t_FmPcd *p_FmPcd) { t_FmPcdDriverParam *p_Param = p_FmPcd->p_FmPcdDriverParam; t_FmPcdPrsRegs *p_Regs = p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs; uint32_t tmpReg; if(p_FmPcd->guestId != NCSW_MASTER_ID) return E_OK; ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID); #ifdef FM_PRS_MEM_ERRATA_FMAN_SW003 { uint32_t i; uint32_t regsToGlobalOffset = 0x840; uint32_t firstPortToGlobalOffset = 0x45800; uint64_t globalAddr = PTR_TO_UINT(p_Regs) - regsToGlobalOffset; uint32_t firstPortAddr = (uint32_t)(globalAddr - (uint64_t)firstPortToGlobalOffset); uint32_t portSize = 0x1000; t_FmRevisionInfo revInfo; FM_GetRevision(p_FmPcd->h_Fm, &revInfo); if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) { /* clear all parser memory */ IOMemSet32(UINT_TO_PTR(globalAddr), 0x00000000, 0x800); for(i = 0;i<16;i++) IOMemSet32(UINT_TO_PTR(firstPortAddr+i*portSize), (uint8_t)0x00000000, (uint32_t)0x80); } } #endif /* FM_PRS_MEM_ERRATA_FMAN_SW003 */ /**********************RPCLIM******************/ WRITE_UINT32(p_Regs->rpclim, (uint32_t)p_Param->prsMaxParseCycleLimit); /**********************FMPL_RPCLIM******************/ /* register even if no interrupts enabled, to allow future enablement */ FmRegisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PRS, 0, e_FM_INTR_TYPE_ERR, PcdPrsErrorException, p_FmPcd); /* register even if no interrupts enabled, to allow future enablement */ FmRegisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PRS, 0, e_FM_INTR_TYPE_NORMAL, PcdPrsException, p_FmPcd); /**********************PEVR******************/ WRITE_UINT32(p_Regs->pevr, (FM_PCD_PRS_SINGLE_ECC | FM_PCD_PRS_PORT_IDLE_STS) ); /**********************PEVR******************/ /**********************PEVER******************/ if(p_FmPcd->exceptions & FM_PCD_EX_PRS_SINGLE_ECC) { FmEnableRamsEcc(p_FmPcd->h_Fm); WRITE_UINT32(p_Regs->pever, FM_PCD_PRS_SINGLE_ECC); } else WRITE_UINT32(p_Regs->pever, 0); /**********************PEVER******************/ /**********************PERR******************/ WRITE_UINT32(p_Regs->perr, FM_PCD_PRS_DOUBLE_ECC); /**********************PERR******************/ /**********************PERER******************/ tmpReg = 0; if(p_FmPcd->exceptions & FM_PCD_EX_PRS_DOUBLE_ECC) { FmEnableRamsEcc(p_FmPcd->h_Fm); tmpReg |= FM_PCD_PRS_DOUBLE_ECC; } WRITE_UINT32(p_Regs->perer, tmpReg); /**********************PERER******************/ /**********************PPCS******************/ WRITE_UINT32(p_Regs->ppsc, p_FmPcd->p_FmPcdPrs->fmPcdPrsPortIdStatistics); /**********************PPCS******************/ #ifdef FM_PRS_L4_SHELL_ERRATA_FMANb { uint32_t i, j; t_FmRevisionInfo revInfo; uint8_t swPrsL4Patch[] = SW_PRS_L4_PATCH; FM_GetRevision(p_FmPcd->h_Fm, &revInfo); if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) { /* load sw parser L4 patch */ for(i=0;i<sizeof(swPrsL4Patch)/4;i++) { tmpReg = 0; for(j =0;j<4;j++) { tmpReg <<= 8; tmpReg |= swPrsL4Patch[i*4+j]; } WRITE_UINT32(*(p_FmPcd->p_FmPcdPrs->p_SwPrsCode+ FM_PCD_PRS_SW_OFFSET/4 + i), tmpReg); } p_FmPcd->p_FmPcdPrs->p_CurrSwPrs = FM_PCD_PRS_SW_OFFSET/4 + p_FmPcd->p_FmPcdPrs->p_SwPrsCode+sizeof(swPrsL4Patch)/4; } } #endif /* FM_PRS_L4_SHELL_ERRATA_FMANb */ return E_OK; }