void MEM_Free(t_Handle h_Mem) { t_MemorySegment *p_Mem = (t_MemorySegment*)h_Mem; uint32_t num, i; /* Check MEM leaks */ MEM_CheckLeaks(h_Mem); if (p_Mem) { num = p_Mem->consecutiveMem ? 1 : p_Mem->num; if (p_Mem->allocOwner == e_MEM_ALLOC_OWNER_LOCAL_SMART) { for (i=0; i < num; i++) { if (p_Mem->p_Bases[i]) { XX_FreeSmart(p_Mem->p_Bases[i]); } } } else if (p_Mem->allocOwner == e_MEM_ALLOC_OWNER_LOCAL) { for (i=0; i < num; i++) { if (p_Mem->p_Bases[i]) { XX_Free(p_Mem->p_Bases[i]); } } } if (p_Mem->h_Spinlock) XX_FreeSpinlock(p_Mem->h_Spinlock); if (p_Mem->p_Bases) XX_Free(p_Mem->p_Bases); if (p_Mem->p_BlocksStack) XX_Free(p_Mem->p_BlocksStack); #ifdef DEBUG_MEM_LEAKS if (p_Mem->p_MemDbg) XX_Free(p_Mem->p_MemDbg); #endif /* DEBUG_MEM_LEAKS */ XX_Free(p_Mem); } }
t_Error FM_VSP_Free(t_Handle h_FmVsp) { t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry *)h_FmVsp; SANITY_CHECK_RETURN_ERROR(h_FmVsp, E_INVALID_HANDLE); XX_Free(p_FmVspEntry); return E_OK; }
void MM_Free(t_Handle h_MM) { t_MM *p_MM = (t_MM *)h_MM; t_MemBlock *p_MemBlock; t_BusyBlock *p_BusyBlock; t_FreeBlock *p_FreeBlock; void *p_Block; int i; ASSERT_COND(p_MM); /* release memory allocated for busy blocks */ p_BusyBlock = p_MM->busyBlocks; while ( p_BusyBlock ) { p_Block = p_BusyBlock; p_BusyBlock = p_BusyBlock->p_Next; XX_Free(p_Block); } /* release memory allocated for free blocks */ for (i=0; i <= MM_MAX_ALIGNMENT; i++) { p_FreeBlock = p_MM->freeBlocks[i]; while ( p_FreeBlock ) { p_Block = p_FreeBlock; p_FreeBlock = p_FreeBlock->p_Next; XX_Free(p_Block); } } /* release memory allocated for memory blocks */ p_MemBlock = p_MM->memBlocks; while ( p_MemBlock ) { p_Block = p_MemBlock; p_MemBlock = p_MemBlock->p_Next; XX_Free(p_Block); } if (p_MM->h_Spinlock) XX_FreeSpinlock(p_MM->h_Spinlock); /* release memory allocated for MM object itself */ XX_Free(p_MM); }
t_Error MM_Add(t_Handle h_MM, uint64_t base, uint64_t size) { t_MM *p_MM = (t_MM *)h_MM; t_MemBlock *p_MemB, *p_NewMemB; t_Error errCode; uint32_t intFlags; ASSERT_COND(p_MM); /* find a last block in the list of memory blocks to insert a new * memory block */ intFlags = XX_LockIntrSpinlock(p_MM->h_Spinlock); p_MemB = p_MM->memBlocks; while ( p_MemB->p_Next ) { if ( base >= p_MemB->base && base < p_MemB->end ) { XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, NO_MSG); } p_MemB = p_MemB->p_Next; } /* check for a last memory block */ if ( base >= p_MemB->base && base < p_MemB->end ) { XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, NO_MSG); } /* create a new memory block */ if ((p_NewMemB = CreateNewBlock(base, size)) == NULL) { XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG); } /* append a new memory block to the end of the list of memory blocks */ p_MemB->p_Next = p_NewMemB; /* add a new free block to the free lists */ errCode = AddFree(p_MM, base, base+size); if (errCode) { XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); p_MemB->p_Next = 0; XX_Free(p_NewMemB); return ((t_Error)errCode); } XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); return (E_OK); }
uint64_t MM_Put(t_Handle h_MM, uint64_t base) { t_MM *p_MM = (t_MM *)h_MM; t_BusyBlock *p_BusyB, *p_PrevBusyB; uint64_t size; uint32_t intFlags; ASSERT_COND(p_MM); /* Look for a busy block that have the given base value. * That block will be returned back to the memory. */ p_PrevBusyB = 0; intFlags = XX_LockIntrSpinlock(p_MM->h_Spinlock); p_BusyB = p_MM->busyBlocks; while ( p_BusyB && base != p_BusyB->base ) { p_PrevBusyB = p_BusyB; p_BusyB = p_BusyB->p_Next; } if ( !p_BusyB ) { XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); return (uint64_t)(0); } if ( AddFree( p_MM, p_BusyB->base, p_BusyB->end ) != E_OK ) { XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); return (uint64_t)(0); } /* removes a busy block form the list of busy blocks */ if ( p_PrevBusyB ) p_PrevBusyB->p_Next = p_BusyB->p_Next; else p_MM->busyBlocks = p_BusyB->p_Next; size = p_BusyB->end - p_BusyB->base; XX_Free(p_BusyB); XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); return (size); }
t_Error MM_Init(t_Handle *h_MM, uint64_t base, uint64_t size) { t_MM *p_MM; uint64_t newBase, newSize; int i; if (!size) { RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Size (should be positive)")); } /* Initializes a new MM object */ p_MM = (t_MM *)XX_Malloc(sizeof(t_MM)); if (!p_MM) { RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG); } p_MM->h_Spinlock = XX_InitSpinlock(); if (!p_MM->h_Spinlock) { XX_Free(p_MM); RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MM spinlock!")); } /* initializes a new memory block */ if ((p_MM->memBlocks = CreateNewBlock(base, size)) == NULL) RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG); /* A busy list is empty */ p_MM->busyBlocks = 0; /*Initializes a new free block for each free list*/ for (i=0; i <= MM_MAX_ALIGNMENT; i++) { newBase = MAKE_ALIGNED( base, (0x1 << i) ); newSize = size - (newBase - base); if ((p_MM->freeBlocks[i] = CreateFreeBlock(newBase, newSize)) == NULL) RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG); } *h_MM = p_MM; return (E_OK); }
t_Handle FM_VSP_Config(t_FmVspParams *p_FmVspParams) { t_FmVspEntry *p_FmVspEntry = NULL; struct fm_storage_profile_params fm_vsp_params; p_FmVspEntry = (t_FmVspEntry *)XX_Malloc(sizeof(t_FmVspEntry)); if (!p_FmVspEntry) { REPORT_ERROR(MAJOR, E_NO_MEMORY, ("p_StorageProfile allocation failed")); return NULL; } memset(p_FmVspEntry, 0, sizeof(t_FmVspEntry)); p_FmVspEntry->p_FmVspEntryDriverParams = (t_FmVspEntryDriverParams *)XX_Malloc(sizeof(t_FmVspEntryDriverParams)); if (!p_FmVspEntry->p_FmVspEntryDriverParams) { REPORT_ERROR(MAJOR, E_NO_MEMORY, ("p_StorageProfile allocation failed")); XX_Free(p_FmVspEntry); return NULL; } memset(p_FmVspEntry->p_FmVspEntryDriverParams, 0, sizeof(t_FmVspEntryDriverParams)); fman_vsp_defconfig(&fm_vsp_params); p_FmVspEntry->p_FmVspEntryDriverParams->dmaHeaderCacheAttr = fm_vsp_params.header_cache_attr; p_FmVspEntry->p_FmVspEntryDriverParams->dmaIntContextCacheAttr = fm_vsp_params.int_context_cache_attr; p_FmVspEntry->p_FmVspEntryDriverParams->dmaScatterGatherCacheAttr = fm_vsp_params.scatter_gather_cache_attr; p_FmVspEntry->p_FmVspEntryDriverParams->dmaSwapData = fm_vsp_params.dma_swap_data; p_FmVspEntry->p_FmVspEntryDriverParams->dmaWriteOptimize = fm_vsp_params.dma_write_optimize; p_FmVspEntry->p_FmVspEntryDriverParams->noScatherGather = fm_vsp_params.no_scather_gather; p_FmVspEntry->p_FmVspEntryDriverParams->bufferPrefixContent.privDataSize = DEFAULT_FM_SP_bufferPrefixContent_privDataSize; p_FmVspEntry->p_FmVspEntryDriverParams->bufferPrefixContent.passPrsResult= DEFAULT_FM_SP_bufferPrefixContent_passPrsResult; p_FmVspEntry->p_FmVspEntryDriverParams->bufferPrefixContent.passTimeStamp= DEFAULT_FM_SP_bufferPrefixContent_passTimeStamp; p_FmVspEntry->p_FmVspEntryDriverParams->bufferPrefixContent.passAllOtherPCDInfo = DEFAULT_FM_SP_bufferPrefixContent_passTimeStamp; p_FmVspEntry->p_FmVspEntryDriverParams->bufferPrefixContent.dataAlign = DEFAULT_FM_SP_bufferPrefixContent_dataAlign; p_FmVspEntry->p_FmVspEntryDriverParams->liodnOffset = p_FmVspParams->liodnOffset; memcpy(&p_FmVspEntry->p_FmVspEntryDriverParams->extBufPools, &p_FmVspParams->extBufPools, sizeof(t_FmExtPools)); p_FmVspEntry->h_Fm = p_FmVspParams->h_Fm; p_FmVspEntry->portType = p_FmVspParams->portParams.portType; p_FmVspEntry->portId = p_FmVspParams->portParams.portId; p_FmVspEntry->relativeProfileId = p_FmVspParams->relativeProfileId; return p_FmVspEntry; }
/**************************************************************** * Routine: CutBusy * * Description: * Cuts a block from base to end from the list of busy blocks. * This is done by updating the list of busy blocks do not * include a given block, that block is going to be free. If a * given block is a part of some other busy block, so that * busy block is updated. If there are number of busy blocks * included in the given block, so all that blocks are removed * from the busy list and the end blocks are updated. * If the given block devides some block into two parts, a new * busy block is added to the busy list. * * Arguments: * p_MM - pointer to the MM object * base - base address of a given busy block * end - end address of a given busy block * * Return value: * E_OK on success, E_NOMEMORY otherwise. * ****************************************************************/ static t_Error CutBusy(t_MM *p_MM, uint64_t base, uint64_t end) { t_BusyBlock *p_CurrB, *p_PrevB, *p_NewB; p_CurrB = p_MM->busyBlocks; p_PrevB = p_NewB = 0; while ( p_CurrB ) { if ( base < p_CurrB->end ) { if ( end > p_CurrB->end ) { t_BusyBlock *p_NextB; while ( p_CurrB->p_Next && end >= p_CurrB->p_Next->end ) { p_NextB = p_CurrB->p_Next; p_CurrB->p_Next = p_CurrB->p_Next->p_Next; XX_Free(p_NextB); } p_NextB = p_CurrB->p_Next; if ( p_NextB && end > p_NextB->base ) { p_NextB->base = end; } } if ( base <= p_CurrB->base ) { if ( end < p_CurrB->end && end > p_CurrB->base ) { p_CurrB->base = end; } else if ( end >= p_CurrB->end ) { if ( p_PrevB ) p_PrevB->p_Next = p_CurrB->p_Next; else p_MM->busyBlocks = p_CurrB->p_Next; XX_Free(p_CurrB); } } else { if ( end < p_CurrB->end && end > p_CurrB->base ) { if ((p_NewB = CreateBusyBlock(end, p_CurrB->end-end, p_CurrB->name)) == NULL) RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG); p_NewB->p_Next = p_CurrB->p_Next; p_CurrB->p_Next = p_NewB; } p_CurrB->end = base; } break; } else { p_PrevB = p_CurrB; p_CurrB = p_CurrB->p_Next; } } return (E_OK); }
/**************************************************************** * Routine: CutFree * * Description: * Cuts a free block from holdBase to holdEnd from the free lists. * That is, it updates all free lists of the MM object do * not include a block of memory from holdBase to holdEnd. * For each free lists it seek for a free block that holds * either holdBase or holdEnd. If such block is found it updates it. * * Arguments: * p_MM - pointer to the MM object * holdBase - base address of the allocated block * holdEnd - end address of the allocated block * * Return value: * E_OK is returned on success, * otherwise returns an error code. * ****************************************************************/ static t_Error CutFree(t_MM *p_MM, uint64_t holdBase, uint64_t holdEnd) { t_FreeBlock *p_PrevB, *p_CurrB, *p_NewB; uint64_t alignBase, base, end; uint64_t alignment; int i; for (i=0; i <= MM_MAX_ALIGNMENT; i++) { p_PrevB = p_NewB = 0; p_CurrB = p_MM->freeBlocks[i]; alignment = (uint64_t)(0x1 << i); alignBase = MAKE_ALIGNED(holdEnd, alignment); while ( p_CurrB ) { base = p_CurrB->base; end = p_CurrB->end; if ( (holdBase <= base) && (holdEnd <= end) && (holdEnd > base) ) { if ( alignBase >= end || (alignBase < end && ((end-alignBase) < alignment)) ) { if (p_PrevB) p_PrevB->p_Next = p_CurrB->p_Next; else p_MM->freeBlocks[i] = p_CurrB->p_Next; XX_Free(p_CurrB); } else { p_CurrB->base = alignBase; } break; } else if ( (holdBase > base) && (holdEnd <= end) ) { if ( (holdBase-base) >= alignment ) { if ( (alignBase < end) && ((end-alignBase) >= alignment) ) { if ((p_NewB = CreateFreeBlock(alignBase, end-alignBase)) == NULL) RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG); p_NewB->p_Next = p_CurrB->p_Next; p_CurrB->p_Next = p_NewB; } p_CurrB->end = holdBase; } else if ( (alignBase < end) && ((end-alignBase) >= alignment) ) { p_CurrB->base = alignBase; } else { if (p_PrevB) p_PrevB->p_Next = p_CurrB->p_Next; else p_MM->freeBlocks[i] = p_CurrB->p_Next; XX_Free(p_CurrB); } break; } else { p_PrevB = p_CurrB; p_CurrB = p_CurrB->p_Next; } } } return (E_OK); }
/**************************************************************** * Routine: AddFree * * Description: * Adds a new free block to the free lists. It updates each * free list to include a new free block. * Note, that all free block in each free list are ordered * by their base address. * * Arguments: * p_MM - pointer to the MM object * base - base address of a given free block * end - end address of a given free block * * Return value: * * ****************************************************************/ static t_Error AddFree(t_MM *p_MM, uint64_t base, uint64_t end) { t_FreeBlock *p_PrevB, *p_CurrB, *p_NewB; uint64_t alignment; uint64_t alignBase; int i; /* Updates free lists to include a just released block */ for (i=0; i <= MM_MAX_ALIGNMENT; i++) { p_PrevB = p_NewB = 0; p_CurrB = p_MM->freeBlocks[i]; alignment = (uint64_t)(0x1 << i); alignBase = MAKE_ALIGNED(base, alignment); /* Goes to the next free list if there is no block to free */ if (alignBase >= end) continue; /* Looks for a free block that should be updated */ while ( p_CurrB ) { if ( alignBase <= p_CurrB->end ) { if ( end > p_CurrB->end ) { t_FreeBlock *p_NextB; while ( p_CurrB->p_Next && end > p_CurrB->p_Next->end ) { p_NextB = p_CurrB->p_Next; p_CurrB->p_Next = p_CurrB->p_Next->p_Next; XX_Free(p_NextB); } p_NextB = p_CurrB->p_Next; if ( !p_NextB || (p_NextB && end < p_NextB->base) ) { p_CurrB->end = end; } else { p_CurrB->end = p_NextB->end; p_CurrB->p_Next = p_NextB->p_Next; XX_Free(p_NextB); } } else if ( (end < p_CurrB->base) && ((end-alignBase) >= alignment) ) { if ((p_NewB = CreateFreeBlock(alignBase, end-alignBase)) == NULL) RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG); p_NewB->p_Next = p_CurrB; if (p_PrevB) p_PrevB->p_Next = p_NewB; else p_MM->freeBlocks[i] = p_NewB; break; } if ((alignBase < p_CurrB->base) && (end >= p_CurrB->base)) { p_CurrB->base = alignBase; } /* if size of the free block is less then alignment * deletes that free block from the free list. */ if ( (p_CurrB->end - p_CurrB->base) < alignment) { if ( p_PrevB ) p_PrevB->p_Next = p_CurrB->p_Next; else p_MM->freeBlocks[i] = p_CurrB->p_Next; XX_Free(p_CurrB); } break; } else { p_PrevB = p_CurrB; p_CurrB = p_CurrB->p_Next; } } /* If no free block found to be updated, insert a new free block * to the end of the free list. */ if ( !p_CurrB && ((((uint64_t)(end-base)) & ((uint64_t)(alignment-1))) == 0) ) { if ((p_NewB = CreateFreeBlock(alignBase, end-base)) == NULL) RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG); if (p_PrevB) p_PrevB->p_Next = p_NewB; else p_MM->freeBlocks[i] = p_NewB; } /* Update boundaries of the new free block */ if ((alignment == 1) && !p_NewB) { if ( p_CurrB && base > p_CurrB->base ) base = p_CurrB->base; if ( p_CurrB && end < p_CurrB->end ) end = p_CurrB->end; } } return (E_OK); }
t_Error MEM_InitByAddress(char name[], t_Handle *p_Handle, uint32_t num, uint16_t dataSize, uint16_t prefixSize, uint16_t postfixSize, uint16_t alignment, uint8_t *p_Memory) { t_MemorySegment *p_Mem; uint32_t i, blockSize; uint16_t alignPad, endPad; uint8_t *p_Blocks; /* prepare in case of error */ *p_Handle = NULL; if (!p_Memory) { RETURN_ERROR(MAJOR, E_NULL_POINTER, ("Memory blocks")); } p_Blocks = p_Memory; /* make sure that the alignment is at least 4 and power of 2 */ if (alignment < 4) { alignment = 4; } else if (!POWER_OF_2(alignment)) { RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Alignment (should be power of 2)")); } /* first allocate the segment descriptor */ p_Mem = (t_MemorySegment *)XX_Malloc(sizeof(t_MemorySegment)); if (!p_Mem) { RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment structure")); } /* allocate the blocks stack */ p_Mem->p_BlocksStack = (uint8_t **)XX_Malloc(num * sizeof(uint8_t*)); if (!p_Mem->p_BlocksStack) { XX_Free(p_Mem); RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment block pointers stack")); } /* allocate the blocks bases array */ p_Mem->p_Bases = (uint8_t **)XX_Malloc(sizeof(uint8_t*)); if (!p_Mem->p_Bases) { MEM_Free(p_Mem); RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment base pointers array")); } memset(p_Mem->p_Bases, 0, sizeof(uint8_t*)); /* store info about this segment */ p_Mem->num = num; p_Mem->current = 0; p_Mem->dataSize = dataSize; p_Mem->p_Bases[0] = p_Blocks; p_Mem->getFailures = 0; p_Mem->allocOwner = e_MEM_ALLOC_OWNER_EXTERNAL; p_Mem->consecutiveMem = TRUE; p_Mem->prefixSize = prefixSize; p_Mem->postfixSize = postfixSize; p_Mem->alignment = alignment; /* store name */ strncpy(p_Mem->name, name, MEM_MAX_NAME_LENGTH-1); p_Mem->h_Spinlock = XX_InitSpinlock(); if (!p_Mem->h_Spinlock) { MEM_Free(p_Mem); RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Can't create spinlock!")); } alignPad = (uint16_t)PAD_ALIGNMENT(4, prefixSize); /* Make sure the entire size is a multiple of alignment */ endPad = (uint16_t)PAD_ALIGNMENT(alignment, (alignPad + prefixSize + dataSize + postfixSize)); /* The following manipulation places the data of block[0] in an aligned address, since block size is aligned the following block datas will all be aligned */ ALIGN_BLOCK(p_Blocks, prefixSize, alignment); blockSize = (uint32_t)(alignPad + prefixSize + dataSize + postfixSize + endPad); /* initialize the blocks */ for (i=0; i < num; i++) { p_Mem->p_BlocksStack[i] = p_Blocks; p_Blocks += blockSize; } /* return handle to caller */ *p_Handle = (t_Handle)p_Mem; #ifdef DEBUG_MEM_LEAKS { t_Error errCode = InitMemDebugDatabase(p_Mem); if (errCode != E_OK) RETURN_ERROR(MAJOR, errCode, NO_MSG); p_Mem->blockOffset = (uint32_t)(p_Mem->p_BlocksStack[0] - p_Mem->p_Bases[0]); p_Mem->blockSize = blockSize; } #endif /* DEBUG_MEM_LEAKS */ return E_OK; }
t_Error FM_VSP_Init(t_Handle h_FmVsp) { t_FmVspEntry *p_FmVspEntry = (t_FmVspEntry *)h_FmVsp; struct fm_storage_profile_params fm_vsp_params; uint8_t orderedArray[FM_PORT_MAX_NUM_OF_EXT_POOLS]; uint16_t sizesArray[BM_MAX_NUM_OF_POOLS]; t_Error err; uint16_t absoluteProfileId = 0; int i = 0; SANITY_CHECK_RETURN_ERROR(p_FmVspEntry, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmVspEntry->p_FmVspEntryDriverParams,E_INVALID_HANDLE); CHECK_INIT_PARAMETERS(p_FmVspEntry, CheckParams); memset(&orderedArray, 0, sizeof(uint8_t) * FM_PORT_MAX_NUM_OF_EXT_POOLS); memset(&sizesArray, 0, sizeof(uint16_t) * BM_MAX_NUM_OF_POOLS); err = FmSpBuildBufferStructure(&p_FmVspEntry->intContext, &p_FmVspEntry->p_FmVspEntryDriverParams->bufferPrefixContent, &p_FmVspEntry->bufMargins, &p_FmVspEntry->bufferOffsets, &p_FmVspEntry->internalBufferOffset); if (err != E_OK) RETURN_ERROR(MAJOR, err, NO_MSG); err = CheckParamsGeneratedInternally(p_FmVspEntry); if (err != E_OK) RETURN_ERROR(MAJOR, err, NO_MSG); p_FmVspEntry->p_FmSpRegsBase = (struct fm_pcd_storage_profile_regs *)FmGetVSPBaseAddr(p_FmVspEntry->h_Fm); if (!p_FmVspEntry->p_FmSpRegsBase) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("impossible to initialize SpRegsBase")); /* order external buffer pools in ascending order of buffer pools sizes */ FmSpSetBufPoolsInAscOrderOfBufSizes(&(p_FmVspEntry->p_FmVspEntryDriverParams)->extBufPools, orderedArray, sizesArray); p_FmVspEntry->extBufPools.numOfPoolsUsed = p_FmVspEntry->p_FmVspEntryDriverParams->extBufPools.numOfPoolsUsed; for (i = 0; i < p_FmVspEntry->extBufPools.numOfPoolsUsed; i++) { p_FmVspEntry->extBufPools.extBufPool[i].id = orderedArray[i]; p_FmVspEntry->extBufPools.extBufPool[i].size = sizesArray[orderedArray[i]]; } /* on user responsibility to fill it according requirement */ memset(&fm_vsp_params, 0, sizeof(struct fm_storage_profile_params)); fm_vsp_params.dma_swap_data = p_FmVspEntry->p_FmVspEntryDriverParams->dmaSwapData; fm_vsp_params.int_context_cache_attr = p_FmVspEntry->p_FmVspEntryDriverParams->dmaIntContextCacheAttr; fm_vsp_params.header_cache_attr = p_FmVspEntry->p_FmVspEntryDriverParams->dmaHeaderCacheAttr; fm_vsp_params.scatter_gather_cache_attr = p_FmVspEntry->p_FmVspEntryDriverParams->dmaScatterGatherCacheAttr; fm_vsp_params.dma_write_optimize = p_FmVspEntry->p_FmVspEntryDriverParams->dmaWriteOptimize; fm_vsp_params.liodn_offset = p_FmVspEntry->p_FmVspEntryDriverParams->liodnOffset; fm_vsp_params.no_scather_gather = p_FmVspEntry->p_FmVspEntryDriverParams->noScatherGather; if (p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion) { fm_vsp_params.buf_pool_depletion.buf_pool_depletion_enabled = TRUE; fm_vsp_params.buf_pool_depletion.pools_grp_mode_enable = p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion->poolsGrpModeEnable; fm_vsp_params.buf_pool_depletion.num_pools = p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion->numOfPools; fm_vsp_params.buf_pool_depletion.pools_to_consider = p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion->poolsToConsider; fm_vsp_params.buf_pool_depletion.single_pool_mode_enable = p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion->singlePoolModeEnable; fm_vsp_params.buf_pool_depletion.pools_to_consider_for_single_mode = p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion->poolsToConsiderForSingleMode; fm_vsp_params.buf_pool_depletion.has_pfc_priorities = TRUE; fm_vsp_params.buf_pool_depletion.pfc_priorities_en = p_FmVspEntry->p_FmVspEntryDriverParams->p_BufPoolDepletion->pfcPrioritiesEn; } else fm_vsp_params.buf_pool_depletion.buf_pool_depletion_enabled = FALSE; if (p_FmVspEntry->p_FmVspEntryDriverParams->p_BackupBmPools) { fm_vsp_params.backup_pools.num_backup_pools = p_FmVspEntry->p_FmVspEntryDriverParams->p_BackupBmPools->numOfBackupPools; fm_vsp_params.backup_pools.pool_ids = p_FmVspEntry->p_FmVspEntryDriverParams->p_BackupBmPools->poolIds; } else fm_vsp_params.backup_pools.num_backup_pools = 0; fm_vsp_params.fm_ext_pools.num_pools_used = p_FmVspEntry->extBufPools.numOfPoolsUsed; fm_vsp_params.fm_ext_pools.ext_buf_pool = (struct fman_ext_pool_params*)&p_FmVspEntry->extBufPools.extBufPool; fm_vsp_params.buf_margins = (struct fman_sp_buf_margins*)&p_FmVspEntry->bufMargins; fm_vsp_params.int_context = (struct fman_sp_int_context_data_copy*)&p_FmVspEntry->intContext; /* no check on err - it was checked earlier */ FmVSPGetAbsoluteProfileId(p_FmVspEntry->h_Fm, p_FmVspEntry->portType, p_FmVspEntry->portId, p_FmVspEntry->relativeProfileId, &absoluteProfileId); ASSERT_COND(p_FmVspEntry->p_FmSpRegsBase); ASSERT_COND(fm_vsp_params.int_context); ASSERT_COND(fm_vsp_params.buf_margins); ASSERT_COND((absoluteProfileId <= FM_VSP_MAX_NUM_OF_ENTRIES)); /* Set all registers related to VSP */ fman_vsp_init(p_FmVspEntry->p_FmSpRegsBase, absoluteProfileId, &fm_vsp_params,FM_PORT_MAX_NUM_OF_EXT_POOLS, BM_MAX_NUM_OF_POOLS, FM_MAX_NUM_OF_PFC_PRIORITIES); p_FmVspEntry->absoluteSpId = absoluteProfileId; if (p_FmVspEntry->p_FmVspEntryDriverParams) XX_Free(p_FmVspEntry->p_FmVspEntryDriverParams); p_FmVspEntry->p_FmVspEntryDriverParams = NULL; return E_OK; }