/*
 * Copy count bytes from src to dest ,
 * returns pointer to the dest mem zone.
 */
void *
ixOsalMemCopy (void *dest, void *src, UINT32 count)
{
    IX_OSAL_ASSERT (dest != NULL);
    IX_OSAL_ASSERT (src != NULL);
    return (memcpy (dest, src, count));
}
Example #2
0
PRIVATE IX_OSAL_MBUF *
ixOsalBuffPoolMbufInit (UINT32 mbufSizeAligned,
                      UINT32 dataSizeAligned,
                      IX_OSAL_MBUF_POOL *poolPtr)
{
    UINT8 *dataPtr;
    IX_OSAL_MBUF *realMbufPtr;
    /* Allocate cache-aligned memory for mbuf header */
    realMbufPtr = (IX_OSAL_MBUF *) IX_OSAL_CACHE_DMA_MALLOC (mbufSizeAligned);
    IX_OSAL_ASSERT (realMbufPtr != NULL);
    memset (realMbufPtr, 0, mbufSizeAligned);

    /* Allocate cache-aligned memory for mbuf data */
    dataPtr = (UINT8 *) IX_OSAL_CACHE_DMA_MALLOC (dataSizeAligned);
    IX_OSAL_ASSERT (dataPtr != NULL);
    memset (dataPtr, 0, dataSizeAligned);

    /* Fill in mbuf header fields */
    IX_OSAL_MBUF_MDATA (realMbufPtr) = dataPtr;
    IX_OSAL_MBUF_ALLOCATED_BUFF_DATA (realMbufPtr) = (UINT32)dataPtr;

    IX_OSAL_MBUF_MLEN (realMbufPtr) = dataSizeAligned;
    IX_OSAL_MBUF_ALLOCATED_BUFF_LEN (realMbufPtr) = dataSizeAligned;

    IX_OSAL_MBUF_NET_POOL (realMbufPtr) = (IX_OSAL_MBUF_POOL *) poolPtr;

    IX_OSAL_MBUF_SYS_SIGNATURE_INIT(realMbufPtr);

    /* update some statistical information */
    poolPtr->mbufMemSize += mbufSizeAligned;
    poolPtr->dataMemSize += dataSizeAligned;

    return realMbufPtr;
}
Example #3
0
/*
 * Function definition: ixNpeDlNpeMgrInit
 */
void
ixNpeDlNpeMgrInit (void)
{
    /* Only map the memory once */
    if (!ixNpeDlMemInitialised)
    {
	UINT32 virtAddr;

	/* map the register memory for NPE-A */
	virtAddr = (UINT32) IX_OSAL_MEM_MAP (IX_NPEDL_NPEBASEADDRESS_NPEA,
					    IX_OSAL_IXP400_NPEA_MAP_SIZE); 
	IX_OSAL_ASSERT(virtAddr);
	ixNpeDlNpeInfo[IX_NPEDL_NPEID_NPEA].baseAddress = virtAddr;

	/* map the register memory for NPE-B */
	virtAddr = (UINT32) IX_OSAL_MEM_MAP (IX_NPEDL_NPEBASEADDRESS_NPEB,
					    IX_OSAL_IXP400_NPEB_MAP_SIZE); 
	IX_OSAL_ASSERT(virtAddr);
	ixNpeDlNpeInfo[IX_NPEDL_NPEID_NPEB].baseAddress = virtAddr;

	/* map the register memory for NPE-C */
	virtAddr = (UINT32) IX_OSAL_MEM_MAP (IX_NPEDL_NPEBASEADDRESS_NPEC,
					    IX_OSAL_IXP400_NPEC_MAP_SIZE); 
	IX_OSAL_ASSERT(virtAddr);
	ixNpeDlNpeInfo[IX_NPEDL_NPEID_NPEC].baseAddress = virtAddr;

	ixNpeDlMemInitialised = TRUE;
    }
}
Example #4
0
PRIVATE void
ixOamCellRxCallback (IxAtmLogicalPort port,
		     IxAtmdAccUserId userId,
		     IxAtmdAccPduStatus status,
		     IxAtmdAccClpStatus clp,
		     IX_OSAL_MBUF *rxMbuf)
{
    IX_OSAL_ASSERT(userId == oamRxUserId);
    
    /* Status should always be OAM Valid */
    IX_OSAL_ASSERT(status == IX_ATMDACC_OAM_VALID);

    /* queue the mbuf to an rx task */
    ixOamQueuePut( &ixOamSwQueue[port], rxMbuf );
    
}    
Example #5
0
PRIVATE IxOsalTimerRec *
evaluateTimerPriority (IxOsalTimerRec * first, IxOsalTimerRec * second)
{
    IX_OSAL_ASSERT (first != NULL || second != NULL);

    if (first == NULL)
    {
        return second;
    }

    if (second == NULL)
    {
        return first;
    }

    /*
     *  For now we just compare the values of the priority with lower
     *  values getting higher priority.
     *
     *  If someone needs to change priorities of different purposes without
     *  modifying the order of the enums, then more code will be required
     *  here.
     */

    if (first->priority <= second->priority)
    {
        return first;
    }

    return second;
}
Example #6
0
void
ixAtmmVcQueryCallbackRegister ( IxAtmmVcQueryCallback callback)
{
    /* NULL callback not supported */
    IX_OSAL_ASSERT(callback != NULL);

    vcQueryCallback = callback;
}
Example #7
0
PRIVATE void
ixOamRxFreeLowReplenishCallback (IxAtmdAccUserId userId)
{
    IX_OSAL_MBUF *mBuf;
    UINT32 numFreeEntries;
    UINT32 cnt;
    IX_STATUS retval;
   


    IX_OSAL_ASSERT(userId == oamRxUserId);
    
    retval = ixAtmdAccRxVcFreeEntriesQuery (oamRxConnId, &numFreeEntries);
    if (retval != IX_SUCCESS)
    {
	IX_OAM_CODELET_IRQ_SAFE_LOG_ERROR("Failed to query depth of Oam Rx Free Q");
	return;
    }

    /* Replenish Rx buffers  */
    for (cnt=0; cnt<numFreeEntries; cnt++)
    {
     
	ixAtmUtilsMbufGet(IX_ATM_OAM_CELL_SIZE_NO_HEC, &mBuf);

	if (mBuf == NULL)
	{
	    IX_OAM_CODELET_IRQ_SAFE_LOG_ERROR("Failed to get rx free buffer");
	    return;
	}
	
	/* 
	 * Set the number of bytes of data in this MBUF to 1 OAM cell
	 */
      
	IX_OSAL_MBUF_MLEN(mBuf) = IX_ATM_OAM_CELL_SIZE_NO_HEC;
	
        /* invalidate cache */
        ixOamRxInvalidate( mBuf );
        
	/* Send free buffers to NPE */
	retval = ixAtmdAccRxVcFreeReplenish (oamRxConnId, mBuf);
      
	if (retval != IX_SUCCESS)
	{
	    /* Free the allocated buffer */
	    ixAtmUtilsMbufFree(mBuf);
	    IX_OAM_CODELET_IRQ_SAFE_LOG_ERROR("Failed to pass Oam Rx free buffers to Atmd");
	    return;
	}	
    }
    replenishCallbackCount++;
}
Example #8
0
void
ixOsalMemFree (void *ptr)
{
    IX_OSAL_ASSERT (ptr != NULL);
    
    if(IS_VMALLOC_ADDR(ptr))
     {
        vfree(ptr);
        return;
    }
    
    kfree (ptr);
}
Example #9
0
PRIVATE void
ixAtmmTxLowHandle (IxAtmLogicalPort port, unsigned maxTxCells)
{
    IxAtmScheduleTable *retTable;
    IX_STATUS retval;
    UINT32 lockKey;

    /*
     * If we can get the PORT fast mutex it means the port was
     * UP so can initiate transmission.
     */
    if (IX_ATMM_PORT_MUTEX_TRY_LOCK(port) == IX_SUCCESS)
    {
        lockKey = ixOsalIrqLock();

        /*
         * Build a new schedule table
         * N.B. This is data path so IxAtmSch will validate parameters
         *      rather than do this twice.
         */
        retval = ixAtmSchTableUpdate (port,
    				  maxTxCells,
    				  &retTable);
    
        if (retval == IX_SUCCESS)
        {
    	    /*
    	     * Tell Atmd to send the cells in the schedule table.
    	     */
    	    retval = ixAtmdAccPortTxProcess (port, retTable);	
        }
        else
        {
    	    /*
    	     * Only other valid retval is QUEUE_EMPTY
    	     */
    	    IX_OSAL_ASSERT(retval == IX_ATMSCH_RET_QUEUE_EMPTY);

    	    /*
    	     * The schedule table is empty so the port has gone
    	     * IDLE. Free the BUSY mutex.
    	     */    	
	    IX_ATMM_TX_MUTEX_UNLOCK(port);
        }

        ixOsalIrqUnlock(lockKey); 
    	IX_ATMM_PORT_MUTEX_UNLOCK(port);
    }
}
Example #10
0
PUBLIC UINT32
ixOsalOsIxp400TimestampGet (void)
{
    IX_STATUS ixStatus;
    /*
     * ensure the register is I/O mapped 
     */
    if (IxOsalOemInitialized == FALSE)
    {
        /*
         * Assert if not success 
         */
        ixStatus = ixOsalOemInit ();
        IX_OSAL_ASSERT (ixStatus == IX_SUCCESS);
    }

    return IX_OSAL_READ_LONG (ixOsalOstsRegAddr);
}
Example #11
0
/* ----------------------------------------------------------
Customized version of an assert handler
*/
void
ixAtmdAccAssert (BOOL condition,
                 char *fileName,
                 unsigned int line,
                 char *conditionString,
                 char *infoString)
{
    char *ptFname = fileName;

    /* test the condition status */
    if (!condition)
    {

        /* skip directory name from file name */
        while (*ptFname)
        {
            if (*ptFname == '/' || *ptFname == '\\')
            {
                fileName = ptFname + 1;
            }
            ptFname++;
        } /* end of while(ptFname) */

        /* remember the last assert strings */
        strcpy(fileNameCopy, fileName);
        strcpy(conditionStringCopy, conditionString);
        strcpy(infoStringCopy, infoString);
        lineCopy = line;

        /* display/log a message */
        ixOsalLog(IX_OSAL_LOG_LVL_ERROR, 
	    IX_OSAL_LOG_DEV_STDERR, 
            "\nATMDASSERT (%s:%u) : !(%s) : %s\n",
            (int)fileName,
            (int)line,
            (int)conditionString,
            (int)infoString,
            0,
            0);

        IX_OSAL_ASSERT (0);
    } /* end of if(condition) */
}
Example #12
0
PRIVATE void 
ixOamCellRxTask( void )
{
    IxOamITU610Cell *rxCell;
    UINT32 pti;
    UINT32 vci;
    UINT32 vpi;
    UINT32 oamRxHdr=0x0;

    IX_OSAL_MBUF *rxMbuf;

    int port;

    while(1)
    {
        for( port=0; port<ixOamCodeletNumPorts; port++ )
        {
            if(!ixOamQueueEmptyQuery(&ixOamSwQueue[port]))
            {
                rxMbuf = ixOamMBufQueueGet(&ixOamSwQueue[port]);
                IX_OSAL_ASSERT( rxMbuf != NULL );
    
                /* invalidate cache */
                ixOamRxInvalidate( rxMbuf );
                
                rxCell = (IxOamITU610Cell *)(IX_OSAL_MBUF_MDATA(rxMbuf));
                
                /* Read the VCI & Payload Type */
		IX_OAM_HEADER_GET(rxCell, oamRxHdr);
		vci = IX_OAM_VCI_GET(oamRxHdr);
		vpi = IX_OAM_VPI_GET(oamRxHdr);
		pti = IX_OAM_PTI_GET(oamRxHdr);
                
                /* Ensure access layer delivered a correct OAM cell */
                IX_OSAL_ASSERT ((vci == IX_OAM_ITU610_F4_SEG_VCI) || (vci == IX_OAM_ITU610_F4_ETE_VCI) || 
                           (pti == IX_OAM_ITU610_F5_ETE_PTI) || (pti == IX_OAM_ITU610_F5_SEG_PTI));                
                
                /* Is it an Loopback cell ? */
                if ( (IX_OAM_TYPE_AND_FUNC_GET(rxCell)) == IX_OAM_ITU610_TYPE_FAULT_MAN_LB )
                {
                    /* Is Parent Loopback Indication field in cell set? */
                    if ( (IX_OAM_LOOPBACK_INDICATION_GET(rxCell)) == IX_OAM_ITU610_LB_INDICATION_PARENT )
                    {
                        ixOamParentLbCellRx (port, rxMbuf);
                    }
                    /* Otherwise child cell, i.e. response to loopback cell sent earlier */
                    else 
                    {
                        ixOamChildLbCellRx(port, rxMbuf);
                    }
                }
                else
                {
                    unsupportedOamRxCount[port]++;
                    /* free the buffer */
                    ixAtmUtilsMbufFree (rxMbuf);
                }
            }
        }
    
	/* share the CPU */
        ixOsalSleep(IX_OAM_RX_QUEUE_POLL_INTERVAL);
    }
}
Example #13
0
PRIVATE void 
ixOamQueuePut (IxOamCodeletSwMbufQ *s, IX_OSAL_MBUF *buf)
{
    IX_OSAL_ASSERT (s->head - s->tail != s->size);
    s->array[s->head++ & s->mask] = buf;
}
Example #14
0
PRIVATE IX_OSAL_MBUF * 
ixOamMBufQueueGet (IxOamCodeletSwMbufQ *s)
{
    IX_OSAL_ASSERT (s->head != s->tail);
    return (s->array[s->tail++ & s->mask]);
}
Example #15
0
void
ixQMgrDispatcherLoopRunA0 (IxQMgrDispatchGroup group)
{
    UINT32 intRegVal;                /* Interrupt reg val */
    UINT32 intRegValAfterWrite;      /* Interrupt reg val after writing back */
    UINT32 intRegCheckMask;          /* Mask for checking interrupt bits */
    UINT32 qStatusWordsB4Write[MAX_Q_STATUS_WORDS];  /* Status b4 interrupt write */
    UINT32 qStatusWordsAfterWrite[MAX_Q_STATUS_WORDS]; /* Status after interrupt write */
    IxQMgrQInfo *currDispatchQInfo;
    BOOL statusChangeFlag;

    int priorityTableIndex;/* Priority table index */
    int qIndex;            /* Current queue being processed */
    int endIndex;          /* Index of last queue to process */

#ifndef NDEBUG
    IX_OSAL_ASSERT((group == IX_QMGR_QUEUPP_GROUP) || 
	      (group == IX_QMGR_QUELOW_GROUP));
#endif

    /* Read Q status registers before interrupt status read/write */
    ixQMgrAqmIfQStatusRegsRead (group, qStatusWordsB4Write);

    /* Read the interrupt register */
    ixQMgrAqmIfQInterruptRegRead (group, &intRegVal);

    /* No bit set : nothing to process (the reaminder of the algorithm is
    * based on the fact that the interrupt register value contains at
    * least one bit set
    */
    if (intRegVal == 0) 
    {
#ifndef NDEBUG
	/* Update statistics */
	dispatcherStats.loopRunCnt++;
#endif

	/* Rebuild the priority table if needed */
	if (rebuildTable)
	{
	    ixQMgrDispatcherReBuildPriorityTable ();
	}

	return;
    }
   
    /* Write it back to clear the interrupt */
    ixQMgrAqmIfQInterruptRegWrite (group, intRegVal);

    /* Read Q status registers after interrupt status read/write */
    ixQMgrAqmIfQStatusRegsRead (group, qStatusWordsAfterWrite);
 
    /* get the first queue Id from the interrupt register value */
    qIndex = (BITS_PER_WORD - 1) - ixQMgrCountLeadingZeros(intRegVal);

    /* check if any change occured during hw register modifications */ 
    if (IX_QMGR_QUELOW_GROUP == group)
    {
	statusChangeFlag = 
	    (qStatusWordsB4Write[0] != qStatusWordsAfterWrite[0]) ||
	    (qStatusWordsB4Write[1] != qStatusWordsAfterWrite[1]) ||
	    (qStatusWordsB4Write[2] != qStatusWordsAfterWrite[2]) ||
	    (qStatusWordsB4Write[3] != qStatusWordsAfterWrite[3]);
    }
    else
    {
	statusChangeFlag = 
	    (qStatusWordsB4Write[0] != qStatusWordsAfterWrite[0]);
	/* Set the queue range based on the queue group to proccess */
	qIndex += IX_QMGR_MIN_QUEUPP_QID;
    }

    if (statusChangeFlag == FALSE)
    {
	/* check if the interrupt register contains 
	 * only 1 bit set (happy day scenario)
	 */
	currDispatchQInfo = &dispatchQInfo[qIndex];
	if (intRegVal == currDispatchQInfo->intRegCheckMask)
	{
	    /* only 1 queue event triggered a notification *
	     * Call the callback function for this queue 
	     */
	    currDispatchQInfo->callback (qIndex,
					 currDispatchQInfo->callbackId);  
#ifndef NDEBUG
	    /* Update statistics */
	    dispatcherStats.queueStats[qIndex].callbackCnt++;
#endif
	}
	else 
	{
	    /* the event is triggered by more than 1 queue, 
	     * the queue search will be starting from the beginning
	     * or the middle of the priority table
	     *
	     * the serach will end when all the bits of the interrupt
	     * register are cleared. There is no need to maintain
	     * a seperate value and test it at each iteration.
	     */
	    if (IX_QMGR_QUELOW_GROUP == group)
	    {
		/* check if any bit related to queues in the first
		 * half of the priority table is set
		 */
		if (intRegVal & lowPriorityTableFirstHalfMask)
		{
		    priorityTableIndex = IX_QMGR_MIN_LOW_QUE_PRIORITY_TABLE_INDEX;
		}
		else
		{
		    priorityTableIndex = IX_QMGR_MID_LOW_QUE_PRIORITY_TABLE_INDEX;
		}
	    }
	    else 
	    {
		/* check if any bit related to queues in the first
		 * half of the priority table is set
		 */
		if (intRegVal & uppPriorityTableFirstHalfMask)
		{
		    priorityTableIndex = IX_QMGR_MIN_UPP_QUE_PRIORITY_TABLE_INDEX;
		}
		else
		{
		    priorityTableIndex = IX_QMGR_MID_UPP_QUE_PRIORITY_TABLE_INDEX;
		}
	    }
	    
	    /* iterate following the priority table until all the bits 
	     * of the interrupt register are cleared.
	     */
	    do
	    {
		qIndex = priorityTable[priorityTableIndex++];
		currDispatchQInfo = &dispatchQInfo[qIndex];
		intRegCheckMask = currDispatchQInfo->intRegCheckMask;
		
		/* If this queue caused this interrupt to be raised */
		if (intRegVal & intRegCheckMask)
		{
		    /* Call the callback function for this queue */
		    currDispatchQInfo->callback (qIndex,
						 currDispatchQInfo->callbackId);
#ifndef NDEBUG
		    /* Update statistics */
		    dispatcherStats.queueStats[qIndex].callbackCnt++;
#endif
		    
		    /* Clear the interrupt register bit */
		    intRegVal &= ~intRegCheckMask;
		}
	    }
	    while(intRegVal);
	}
    }
    else
    {
    /* A change in queue status occured during the hw interrupt
     * register update. To maintain the interrupt consistency, it
     * is necessary to iterate through all queues of the queue group.
     */

    /* Read interrupt status again */
    ixQMgrAqmIfQInterruptRegRead (group, &intRegValAfterWrite);

    if (IX_QMGR_QUELOW_GROUP == group)
    {
	priorityTableIndex = IX_QMGR_MIN_LOW_QUE_PRIORITY_TABLE_INDEX;
	endIndex = IX_QMGR_MAX_LOW_QUE_PRIORITY_TABLE_INDEX;
    }
    else
    {
	priorityTableIndex = IX_QMGR_MIN_UPP_QUE_PRIORITY_TABLE_INDEX;
	endIndex = IX_QMGR_MAX_UPP_QUE_PRIORITY_TABLE_INDEX;
    }

    for ( ; priorityTableIndex<=endIndex; priorityTableIndex++)
    {
	qIndex = priorityTable[priorityTableIndex];
	currDispatchQInfo = &dispatchQInfo[qIndex];
	intRegCheckMask = currDispatchQInfo->intRegCheckMask;

	/* If this queue caused this interrupt to be raised */
	if (intRegVal & intRegCheckMask)
	{  
	    /* Call the callback function for this queue */
	    currDispatchQInfo->callback (qIndex,
					 currDispatchQInfo->callbackId);
#ifndef NDEBUG
	    /* Update statistics */
	    dispatcherStats.queueStats[qIndex].callbackCnt++;
#endif
	    
	} /* if (intRegVal .. */

	/* 
	 * If interrupt bit is set in intRegValAfterWrite don't
	 * proceed as this will be caught in next interrupt
	 */
	else if ((intRegValAfterWrite & intRegCheckMask) == 0)
	{
	    /* Check if an interrupt was lost for this Q */
	    if (ixQMgrAqmIfQStatusCheck(qStatusWordsB4Write,
					qStatusWordsAfterWrite,
					currDispatchQInfo->statusWordOffset,
					currDispatchQInfo->statusCheckValue,
					currDispatchQInfo->statusMask))
	    {
		/* Call the callback function for this queue */
		currDispatchQInfo->callback (qIndex, 
					     dispatchQInfo[qIndex].callbackId);                 
#ifndef NDEBUG
		/* Update statistics */
		dispatcherStats.queueStats[qIndex].callbackCnt++;
		dispatcherStats.queueStats[qIndex].intLostCallbackCnt++;
#endif
	    } /* if ixQMgrAqmIfQStatusCheck(.. */
	} /* else if ((intRegValAfterWrite ... */
    } /* for (priorityTableIndex=0 ... */
    }

    /* Rebuild the priority table if needed */
    if (rebuildTable)
    {
	ixQMgrDispatcherReBuildPriorityTable ();
    }

#ifndef NDEBUG
    /* Update statistics */
    dispatcherStats.loopRunCnt++;
#endif
}
/**
 * @fn ixEthAccQMgrQueuesConfig(void)
 *
 * @brief Setup all the queues and register all callbacks required
 * by this component to the QMgr
 *
 * The RxFree queues, tx queues, rx queues are configured statically
 *
 * Rx queues configuration is driven by QoS setup.
 * Many Rx queues may be required when QoS is enabled (this depends
 * on IxEthDB setup and the images being downloaded). The configuration
 * of the rxQueues is done in many steps as follows:
 *
 * @li select all Rx queues as configured by ethDB for all ports
 * @li sort the queues by traffic class
 * @li build the priority dependency for all queues
 * @li fill the configuration for all rx queues
 * @li configure all statically configured queues
 * @li configure all dynamically configured queues
 *
 * @param none
 *
 * @return IxEthAccStatus
 *
 * @internal
 */
IX_ETH_ACC_PUBLIC
IxEthAccStatus ixEthAccQMgrQueuesConfig(void)
{
    struct
    {
	int npeCount;
	UINT32 npeId;
	IxQMgrQId qId;
	IxEthDBProperty trafficClass;
    } rxQueues[IX_ETHACC_MAX_RX_QUEUES];

    UINT32 rxQueue = 0;
    UINT32 rxQueueCount = 0;
    IxQMgrQId ixQId =IX_QMGR_MAX_NUM_QUEUES;
    IxEthDBStatus ixEthDBStatus = IX_ETH_DB_SUCCESS;
    IxEthDBPortId ixEthDbPortId = 0;
    IxEthAccPortId ixEthAccPortId = 0;
    UINT32 ixNpeId = 0;
    UINT32 ixHighestNpeId = 0;
    UINT32 sortIterations = 0;
    IxEthAccStatus ret = IX_ETH_ACC_SUCCESS;
    IxEthAccQregInfo *qInfoDes = NULL;
    IxEthDBProperty ixEthDBTrafficClass = IX_ETH_DB_QOS_TRAFFIC_CLASS_0_RX_QUEUE_PROPERTY;
    IxEthDBPropertyType ixEthDBPropertyType = IX_ETH_DB_INTEGER_PROPERTY;
    UINT32 ixEthDBParameter = 0;
    BOOL completelySorted = FALSE;

    /* Fill the corspondance between ports and queues
     * This defines the mapping from port to queue Ids.
     */

    ixEthAccPortData[IX_ETH_PORT_1].ixEthAccRxData.rxFreeQueue
	= IX_ETH_ACC_RX_FREE_BUFF_ENET0_Q;
    ixEthAccPortData[IX_ETH_PORT_2].ixEthAccRxData.rxFreeQueue
	= IX_ETH_ACC_RX_FREE_BUFF_ENET1_Q;
#ifdef __ixp46X
    ixEthAccPortData[IX_ETH_PORT_3].ixEthAccRxData.rxFreeQueue
	= IX_ETH_ACC_RX_FREE_BUFF_ENET2_Q;
#endif
    ixEthAccPortData[IX_ETH_PORT_1].ixEthAccTxData.txQueue
	= IX_ETH_ACC_TX_FRAME_ENET0_Q;
    ixEthAccPortData[IX_ETH_PORT_2].ixEthAccTxData.txQueue
	= IX_ETH_ACC_TX_FRAME_ENET1_Q;
#ifdef __ixp46X
    ixEthAccPortData[IX_ETH_PORT_3].ixEthAccTxData.txQueue
	= IX_ETH_ACC_TX_FRAME_ENET2_Q;
#endif
    /* Fill the corspondance between ports and NPEs
     * This defines the mapping from port to npeIds.
     */

    ixEthAccPortData[IX_ETH_PORT_1].npeId = IX_NPEMH_NPEID_NPEB;
    ixEthAccPortData[IX_ETH_PORT_2].npeId = IX_NPEMH_NPEID_NPEC;
#ifdef __ixp46X
    ixEthAccPortData[IX_ETH_PORT_3].npeId = IX_NPEMH_NPEID_NPEA;
#endif
    /* set the default rx scheduling discipline */
    ixEthAccDataInfo.schDiscipline = FIFO_NO_PRIORITY;

    /*
     * Queue Selection step:
     *
     * The following code selects all the queues and build
     * a temporary array which contains for each queue
     * - the queue Id,
     * - the highest traffic class (in case of many
     * priorities configured for the same queue on different
     * ports)
     * - the number of different Npes which are
     * configured to write to this queue.
     *
     * The output of this loop is a temporary array of RX queues
     * in any order.
     *
     */
#ifdef CONFIG_IXP425_COMPONENT_ETHDB
    for (ixEthAccPortId = 0;
	 (ixEthAccPortId < IX_ETH_ACC_NUMBER_OF_PORTS)
	     && (ret == IX_ETH_ACC_SUCCESS);
	 ixEthAccPortId++)
    {
	/* map between ethDb and ethAcc port Ids */
	ixEthDbPortId = (IxEthDBPortId)ixEthAccPortId;

	/* map between npeId and ethAcc port Ids */
	ixNpeId = IX_ETH_ACC_PORT_TO_NPE_ID(ixEthAccPortId);

	/* Iterate thru the different priorities */
	for (ixEthDBTrafficClass = IX_ETH_DB_QOS_TRAFFIC_CLASS_0_RX_QUEUE_PROPERTY;
	     ixEthDBTrafficClass <= IX_ETH_DB_QOS_TRAFFIC_CLASS_7_RX_QUEUE_PROPERTY;
	     ixEthDBTrafficClass++)
	{
	    ixEthDBStatus = ixEthDBFeaturePropertyGet(
	      ixEthDbPortId,
	      IX_ETH_DB_VLAN_QOS,
	      ixEthDBTrafficClass,
	      &ixEthDBPropertyType,
	      (void *)&ixEthDBParameter);

	    if (ixEthDBStatus == IX_ETH_DB_SUCCESS)
	    {
		/* This port and QoS class are mapped to
		 * a RX queue.
		 */
		if (ixEthDBPropertyType == IX_ETH_DB_INTEGER_PROPERTY)
		{
		    /* remember the highest npe Id supporting ethernet */
		    if (ixNpeId > ixHighestNpeId)
		    {
			ixHighestNpeId = ixNpeId;
		    }

		    /* search the queue in the list of queues
		     * already used by an other port or QoS
		     */
		    for (rxQueue = 0;
			 rxQueue < rxQueueCount;
			 rxQueue++)
		    {
			if (rxQueues[rxQueue].qId == (IxQMgrQId)ixEthDBParameter)
			{
			    /* found an existing setup, update the number of ports
			     * for this queue if the port maps to
			     * a different NPE.
			     */
			    if (rxQueues[rxQueue].npeId != ixNpeId)
			    {
				rxQueues[rxQueue].npeCount++;
				rxQueues[rxQueue].npeId = ixNpeId;
			    }
			    /* get the highest traffic class for this queue */
			    if (rxQueues[rxQueue].trafficClass > ixEthDBTrafficClass)
			    {
				rxQueues[rxQueue].trafficClass = ixEthDBTrafficClass;
			    }
			    break;
			}
		    }
		    if (rxQueue == rxQueueCount)
		    {
			/* new queue not found in the current list,
			 * add a new entry.
			 */
			IX_OSAL_ASSERT(rxQueueCount < IX_ETHACC_MAX_RX_QUEUES);
			rxQueues[rxQueueCount].qId = ixEthDBParameter;
			rxQueues[rxQueueCount].npeCount = 1;
			rxQueues[rxQueueCount].npeId = ixNpeId;
			rxQueues[rxQueueCount].trafficClass = ixEthDBTrafficClass;
			rxQueueCount++;
		    }
		}
		else
		{
		    /* unexpected property type (not Integer) */
		    ret = IX_ETH_ACC_FAIL;

                    IX_ETH_ACC_WARNING_LOG("ixEthAccQMgrQueuesConfig: unexpected property type returned by EthDB\n", 0, 0, 0, 0, 0, 0);

		    /* no point to continue to iterate */
		    break;
		}
	    }
	    else
	    {
		/* No Rx queue configured for this port
		 * and this traffic class. Do nothing.
		 */
	    }
	}

        /* notify EthDB that queue initialization is complete and traffic class allocation is frozen */
        ixEthDBFeaturePropertySet(ixEthDbPortId,
            IX_ETH_DB_VLAN_QOS,
            IX_ETH_DB_QOS_QUEUE_CONFIGURATION_COMPLETE,
            NULL /* ignored */);
    }

#else

    ixNpeId = IX_ETH_ACC_PORT_TO_NPE_ID(ixEthAccPortId);
    rxQueues[0].qId = 4;
    rxQueues[0].npeCount = 1;
    rxQueues[0].npeId = ixNpeId;
    rxQueues[0].trafficClass = IX_ETH_DB_QOS_TRAFFIC_CLASS_0_RX_QUEUE_PROPERTY;
    rxQueueCount++;

#endif

    /* check there is at least 1 rx queue : there is no point
     * to continue if there is no rx queue configured
     */
    if ((rxQueueCount == 0) || (ret == IX_ETH_ACC_FAIL))
    {
        IX_ETH_ACC_WARNING_LOG("ixEthAccQMgrQueuesConfig: no queues configured, bailing out\n", 0, 0, 0, 0, 0, 0);
	return (IX_ETH_ACC_FAIL);
    }

    /* Queue sort step:
     *
     * Re-order the array of queues by decreasing traffic class
     * using a bubble sort. (trafficClass 0 is the lowest
     * priority traffic, trafficClass 7 is the highest priority traffic)
     *
     * Primary sort order is traffic class
     * Secondary sort order is npeId
     *
     * Note that a bubble sort algorithm is not very efficient when
     * the number of queues grows . However, this is not a very bad choice
     * considering the very small number of entries to sort. Also, bubble
     * sort is extremely fast when the list is already sorted.
     *
     * The output of this loop is a sorted array of queues.
     *
     */
    sortIterations = 0;
    do
    {
	sortIterations++;
	completelySorted = TRUE;
	for (rxQueue = 0;
	     rxQueue < rxQueueCount - sortIterations;
	     rxQueue++)
	{
	    /* compare adjacent elements */
	    if ((rxQueues[rxQueue].trafficClass <
		rxQueues[rxQueue+1].trafficClass)
		|| ((rxQueues[rxQueue].trafficClass ==
		     rxQueues[rxQueue+1].trafficClass)
		    &&(rxQueues[rxQueue].npeId <
		       rxQueues[rxQueue+1].npeId)))
	    {
		/* swap adjacent elements */
		int npeCount = rxQueues[rxQueue].npeCount;
		UINT32 npeId = rxQueues[rxQueue].npeId;
		IxQMgrQId qId = rxQueues[rxQueue].qId;
		IxEthDBProperty trafficClass = rxQueues[rxQueue].trafficClass;
		rxQueues[rxQueue].npeCount = rxQueues[rxQueue+1].npeCount;
		rxQueues[rxQueue].npeId = rxQueues[rxQueue+1].npeId;
		rxQueues[rxQueue].qId = rxQueues[rxQueue+1].qId;
		rxQueues[rxQueue].trafficClass = rxQueues[rxQueue+1].trafficClass;
		rxQueues[rxQueue+1].npeCount = npeCount;
		rxQueues[rxQueue+1].npeId = npeId;
		rxQueues[rxQueue+1].qId = qId;
		rxQueues[rxQueue+1].trafficClass = trafficClass;
		completelySorted = FALSE;
	    }
	}
    }
    while (!completelySorted);

    /* Queue traffic class list:
     *
     * Fill an array of rx queues linked by ascending traffic classes.
     *
     * If the queues are configured as follows
     *   qId 6 -> traffic class 0 (lowest)
     *   qId 7 -> traffic class 0
     *   qId 8 -> traffic class 6
     *   qId 12 -> traffic class 7 (highest)
     *
     * Then the output of this loop will be
     *
     * higherPriorityQueue[6] = 8
     * higherPriorityQueue[7] = 8
     * higherPriorityQueue[8] = 12
     * higherPriorityQueue[12] = Invalid queueId
     * higherPriorityQueue[...] = Invalid queueId
     *
     * Note that this queue ordering does not handle all possibilities
     * that could result from different rules associated with different
     * ports, and inconsistencies in the rules. In all cases, the
     * output of this  algorithm is a simple linked list of queues,
     * without closed circuit.

     * This list is implemented as an array with invalid values initialized
     * with an "invalid" queue id which is the maximum number of queues.
     *
     */

    /*
     * Initialise the rx queue list.
     */
    for (rxQueue = 0; rxQueue < IX_QMGR_MAX_NUM_QUEUES; rxQueue++)
    {
	ixEthAccDataInfo.higherPriorityQueue[rxQueue] = IX_QMGR_MAX_NUM_QUEUES;
    }

    /* build the linked list for this NPE.
     */
    for (ixNpeId = 0;
	 ixNpeId <= ixHighestNpeId;
	 ixNpeId++)
    {
	/* iterate thru the sorted list of queues
	 */
	ixQId = IX_QMGR_MAX_NUM_QUEUES;
	for (rxQueue = 0;
	     rxQueue < rxQueueCount;
	     rxQueue++)
	{
	    if (rxQueues[rxQueue].npeId == ixNpeId)
	    {
		ixEthAccDataInfo.higherPriorityQueue[rxQueues[rxQueue].qId] = ixQId;
		/* iterate thru queues with the same traffic class
		 * than the current queue. (queues are ordered by descending
		 * traffic classes and npeIds).
		 */
		while ((rxQueue < rxQueueCount - 1)
		       && (rxQueues[rxQueue].trafficClass
			   == rxQueues[rxQueue+1].trafficClass)
		       && (ixNpeId == rxQueues[rxQueue].npeId))
		{
		    rxQueue++;
		    ixEthAccDataInfo.higherPriorityQueue[rxQueues[rxQueue].qId] = ixQId;
		}
		ixQId = rxQueues[rxQueue].qId;
	    }
	}
    }

    /* point on the first dynamic queue description */
    qInfoDes = ixEthAccQmgrRxQueuesInfo;

    /* update the list of queues with the rx queues */
    for (rxQueue = 0;
	 (rxQueue < rxQueueCount) && (ret == IX_ETH_ACC_SUCCESS);
	 rxQueue++)
    {
	/* Don't utilize more than IX_ETHACC_MAX_LARGE_RX_QUEUES queues
	 * with the full 128 entries.  For the lower priority queues, use
	 * a smaller number of entries.  This ensures queue resources
	 * remain available for other components.
	 */
	if( (rxQueueCount > IX_ETHACC_MAX_LARGE_RX_QUEUES) &&
	    (rxQueue < rxQueueCount - IX_ETHACC_MAX_LARGE_RX_QUEUES) )
	{
	    /* add the small RX Queue setup template to the list of queues */
	    memcpy(qInfoDes, &ixEthAccQmgrRxSmallTemplate, sizeof(*qInfoDes));
	} else {
	    /* add the default RX Queue setup template to the list of queues */
	    memcpy(qInfoDes, &ixEthAccQmgrRxDefaultTemplate, sizeof(*qInfoDes));
	}

	/* setup the RxQueue ID */
	qInfoDes->qId = rxQueues[rxQueue].qId;

	/* setup the RxQueue watermark level
	 *
	 * Each queue can be filled by many NPEs. To avoid the
	 * NPEs to write to a full queue, need to set the
	 * high watermark level for nearly full condition.
	 * (the high watermark level are a power of 2
	 * starting from the top of the queue)
	 *
	 * Number of     watermark
         *   ports        level
         *    1             0
	 *    2             1
	 *    3             2
	 *    4             4
	 *    5             4
	 *    6             8
	 *    n          approx. 2**ceil(log2(n))
	 */
	if (rxQueues[rxQueue].npeCount == 1)
	{
	    qInfoDes->AlmostFullThreshold = IX_QMGR_Q_WM_LEVEL0;
	}
	else if (rxQueues[rxQueue].npeCount == 2)
	{
	    qInfoDes->AlmostFullThreshold = IX_QMGR_Q_WM_LEVEL1;
	}
	else if (rxQueues[rxQueue].npeCount == 3)
	{
	    qInfoDes->AlmostFullThreshold = IX_QMGR_Q_WM_LEVEL2;
	}
	else
	{
	    /* reach the maximum number for CSR 2.0 */
            IX_ETH_ACC_WARNING_LOG("ixEthAccQMgrQueuesConfig: maximum number of NPEs per queue reached, bailing out\n", 0, 0, 0, 0, 0, 0);
	    ret = IX_ETH_ACC_FAIL;
	    break;
	}

	/* move to next queue entry */
	++qInfoDes;
    }

    /* configure the static list (RxFree, Tx and TxDone queues) */
    for (qInfoDes = ixEthAccQmgrStaticInfo;
	 (qInfoDes->qCallback != (IxQMgrCallback) NULL )
	     && (ret == IX_ETH_ACC_SUCCESS);
	 ++qInfoDes)
    {
	ret = ixEthAccQMgrQueueSetup(qInfoDes);
    }

    /* configure the dynamic list (Rx queues) */
    for (qInfoDes = ixEthAccQmgrRxQueuesInfo;
	 (qInfoDes->qCallback != (IxQMgrCallback) NULL )
	     && (ret == IX_ETH_ACC_SUCCESS);
	 ++qInfoDes)
    {
	ret = ixEthAccQMgrQueueSetup(qInfoDes);
    }

    return(ret);
}
Example #17
0
PRIVATE IX_STATUS
ixAtmmVcDemandUpdate (IxAtmLogicalPort port, IxAtmSchedulerVcId vcId, unsigned numberOfCells)
{
    IX_STATUS retval = IX_FAIL;
    IxAtmScheduleTable *retTable;
    UINT32 lockKey;
    
    /*
     * N.B. Running from a task or interrupt level.
     * From the task level we could be interrupted
     * so need to protect VcQueueUpdate, SchTableUpdate
     * and PortTxProcess
     */
    lockKey = ixOsalIrqLock();

    /*
     * If we can get the PORT fast mutex it means the port was
     * UP so can initiate transmission.
     */
    if (IX_ATMM_PORT_MUTEX_TRY_LOCK(port) == IX_SUCCESS)
    {

        /*
         * Inform Scheduler of the demand
         * N.B. This is data path so IxAtmSch will validate parameters
         * rather than do this twice.
         */
        retval = ixAtmSchVcQueueUpdate (port, vcId, numberOfCells);
        if( retval == IX_SUCCESS )
        {
            /*
             * If we can get the BUSY fast mutex it means the port was
             * IDLE so we need to initiate transmission.
             */
            if (IX_ATMM_TX_MUTEX_TRY_LOCK(port) == IX_SUCCESS)
            {
                /* 
                 * Build a new schedule table, N.B. system was idle so can potentially
                 * generate a table with ixAtmmTxQSize cells
                 */
                retval = ixAtmSchTableUpdate (port,
            				  ixAtmmTxQSize[port],
            				  &retTable);
                
                /*
                 * VcQueueUpdate above placed cells in the scheduler
                 * so the tableUpdate should succeed
                 */
                IX_OSAL_ASSERT(retval == IX_SUCCESS);
                
                /*
                 * Tell Atmd to send the cells in the schedule table.
                 */
                retval = ixAtmdAccPortTxProcess (port, retTable);
                

                if ( (retval != IX_SUCCESS) && (retval != IX_ATMDACC_WARNING))
                {
            	    retval = IX_FAIL;
    	        }
            }
        } 
	IX_ATMM_PORT_MUTEX_UNLOCK(port);
    }

    ixOsalIrqUnlock (lockKey);
    return retval;
}
/*
 * Fills a memory zone with a given constant byte,
 * returns pointer to the memory zone.
 */
void *
ixOsalMemSet (void *ptr, UINT8 filler, UINT32 count)
{
    IX_OSAL_ASSERT (ptr != NULL);
    return (memset (ptr, filler, count));
}
Example #19
0
void
ixQMgrDispatcherLoopRunB0LLP (IxQMgrDispatchGroup group)
{
    UINT32 intRegVal =0;                /* Interrupt reg val */
    UINT32 intRegCheckMask;          /* Mask for checking interrupt bits */
    IxQMgrQInfo *currDispatchQInfo;

    int priorityTableIndex; /* Priority table index */
    int qIndex;             /* Current queue being processed */

    UINT32 intRegValCopy = 0;
    UINT32 intEnableRegVal = 0;
    UINT8 i = 0;

#ifndef NDEBUG
    IX_OSAL_ASSERT((group == IX_QMGR_QUEUPP_GROUP) ||
              (group == IX_QMGR_QUELOW_GROUP));
#endif

    /* Read the interrupt register */
    ixQMgrAqmIfQInterruptRegRead (group, &intRegVal);

    /* 
     * mask any interrupts that are not enabled 
     */
    ixQMgrAqmIfQInterruptEnableRegRead (group, &intEnableRegVal);
    intRegVal &= intEnableRegVal;

    /* No queue has interrupt register set */
    if (intRegVal != 0)
    {
        if (IX_QMGR_QUELOW_GROUP == group)
        {
            /*
             * As the sticky bit is set, the interrupt register will 
             * not clear if write back at this point because the condition
             * has not been cleared. Take a copy and write back later after
             * the condition has been cleared
             */
            intRegValCopy = intRegVal;
        }
        else
        {
            /* no sticky for upper Q's, so write back now */
            ixQMgrAqmIfQInterruptRegWrite (group, intRegVal);
        }

        /* get the first queue Id from the interrupt register value */
        qIndex = (BITS_PER_WORD - 1) - ixQMgrCountLeadingZeros(intRegVal);

        if (IX_QMGR_QUEUPP_GROUP == group)
        {
            /* Set the queue range based on the queue group to proccess */
            qIndex += IX_QMGR_MIN_QUEUPP_QID;
        }

        /* check if the interrupt register contains
        * only 1 bit set
        * For example:
        *                                        intRegVal = 0x0010
        *               currDispatchQInfo->intRegCheckMask = 0x0010
        *    intRegVal == currDispatchQInfo->intRegCheckMask is TRUE.
        */
        currDispatchQInfo = &dispatchQInfo[qIndex];
        if (intRegVal == currDispatchQInfo->intRegCheckMask)
        {

            /* 
             * check if Q type periodic -  only lower queues can
             * have there type set to periodic 
             */
            if (IX_QMGR_TYPE_REALTIME_PERIODIC == ixQMgrQTypes[qIndex])
            {
                /* 
                 * Disable the notifications on any sporadics 
                 */
                for (i=0; i <= IX_QMGR_MAX_LOW_QUE_TABLE_INDEX; i++)
                {
                    if (IX_QMGR_TYPE_REALTIME_SPORADIC == ixQMgrQTypes[i])
                    {
                        ixQMgrNotificationDisable(i);
#ifndef NDEBUG
                        /* Update statistics */
                        dispatcherStats.queueStats[i].disableCount++;
#endif
                    }
                }
            }

            currDispatchQInfo->callback (qIndex,
                                         currDispatchQInfo->callbackId);
#ifndef NDEBUG
            /* Update statistics */
            dispatcherStats.queueStats[qIndex].callbackCnt++;
#endif
        }
        else
        {
            /* the event is triggered by more than 1 queue,
            * the queue search will be starting from the beginning
            * or the middle of the priority table
            *
            * the serach will end when all the bits of the interrupt
            * register are cleared. There is no need to maintain
            * a seperate value and test it at each iteration.
            */
            if (IX_QMGR_QUELOW_GROUP == group)
            {
                /* check if any bit related to queues in the first
                 * half of the priority table is set
                 */
                if (intRegVal & lowPriorityTableFirstHalfMask)
                {
                    priorityTableIndex =
                                       IX_QMGR_MIN_LOW_QUE_PRIORITY_TABLE_INDEX;
                }
                else
                {
                    priorityTableIndex =
                                       IX_QMGR_MID_LOW_QUE_PRIORITY_TABLE_INDEX;
                }
            }
            else
            {
                /* check if any bit related to queues in the first
                 * half of the priority table is set
                 */
                if (intRegVal & uppPriorityTableFirstHalfMask)
                {
                    priorityTableIndex =
                                       IX_QMGR_MIN_UPP_QUE_PRIORITY_TABLE_INDEX;
                }
                else
                {
                    priorityTableIndex =
                                       IX_QMGR_MID_UPP_QUE_PRIORITY_TABLE_INDEX;
                }
            }

            /* iterate following the priority table until all the bits
             * of the interrupt register are cleared.
             */
            do
            {
                qIndex = priorityTable[priorityTableIndex++];
                currDispatchQInfo = &dispatchQInfo[qIndex];
                intRegCheckMask = currDispatchQInfo->intRegCheckMask;

                /* If this queue caused this interrupt to be raised */
                if (intRegVal & intRegCheckMask)
                {
                    /* 
                     * check if Q type periodic - only lower queues can
                     * have there type set to periodic. There can only be one
                     * periodic queue, so the sporadics are only disabled once.
                     */
                    if (IX_QMGR_TYPE_REALTIME_PERIODIC == ixQMgrQTypes[qIndex])
                    {
                        /* 
                         * Disable the notifications on any sporadics 
                         */
                        for (i=0; i <= IX_QMGR_MAX_LOW_QUE_TABLE_INDEX; i++)
                        {
                            if (IX_QMGR_TYPE_REALTIME_SPORADIC == 
                                    ixQMgrQTypes[i])
                            {
                                ixQMgrNotificationDisable(i);
                                /* 
                                 * remove from intRegVal as we don't want 
                                 * to service any sporadics now
                                 */
                                intRegVal &= ~dispatchQInfo[i].intRegCheckMask;
#ifndef NDEBUG
                                /* Update statistics */
                                dispatcherStats.queueStats[i].disableCount++;
#endif
                            }
                        }
                    }

                    currDispatchQInfo->callback (qIndex,
                                                 currDispatchQInfo->callbackId);
#ifndef NDEBUG
                    /* Update statistics */
                    dispatcherStats.queueStats[qIndex].callbackCnt++;
#endif
                    /* Clear the interrupt register bit */
                    intRegVal &= ~intRegCheckMask;
                }
            }
            while(intRegVal);
        } /*End of intRegVal == currDispatchQInfo->intRegCheckMask */
    } /* End of intRegVal != 0 */

#ifndef NDEBUG
    /* Update statistics */
    dispatcherStats.loopRunCnt++;
#endif

    if ((intRegValCopy != 0) && (IX_QMGR_QUELOW_GROUP == group))
    {
        /* 
         * lower groups (therefore sticky) AND at least one enabled interrupt
         * Write back to clear the interrupt 
         */
        ixQMgrAqmIfQInterruptRegWrite (IX_QMGR_QUELOW_GROUP, intRegValCopy);
    }

    /* Rebuild the priority table if needed */
    if (rebuildTable)
    {
        ixQMgrDispatcherReBuildPriorityTable ();
    }
}
Example #20
0
void
ixQMgrDispatcherLoopRunB0 (IxQMgrDispatchGroup group)
{
    UINT32 intRegVal;                /* Interrupt reg val */
    UINT32 intRegCheckMask;          /* Mask for checking interrupt bits */
    IxQMgrQInfo *currDispatchQInfo;


    int priorityTableIndex; /* Priority table index */
    int qIndex;             /* Current queue being processed */

#ifndef NDEBUG
    IX_OSAL_ASSERT((group == IX_QMGR_QUEUPP_GROUP) ||
              (group == IX_QMGR_QUELOW_GROUP));
    IX_OSAL_ASSERT((group == IX_QMGR_QUEUPP_GROUP) || 
	      (group == IX_QMGR_QUELOW_GROUP));
#endif

    /* Read the interrupt register */
    ixQMgrAqmIfQInterruptRegRead (group, &intRegVal);


    /* No queue has interrupt register set */
    if (intRegVal != 0)
    {

            /* Write it back to clear the interrupt */
            ixQMgrAqmIfQInterruptRegWrite (group, intRegVal);

            /* get the first queue Id from the interrupt register value */
            qIndex = (BITS_PER_WORD - 1) - ixQMgrCountLeadingZeros(intRegVal);

            if (IX_QMGR_QUEUPP_GROUP == group)
            {
                /* Set the queue range based on the queue group to proccess */
                qIndex += IX_QMGR_MIN_QUEUPP_QID;
            }

            /* check if the interrupt register contains
             * only 1 bit set
             * For example:
             *                                        intRegVal = 0x0010
             *               currDispatchQInfo->intRegCheckMask = 0x0010
             *    intRegVal == currDispatchQInfo->intRegCheckMask is TRUE.
             */
             currDispatchQInfo = &dispatchQInfo[qIndex];
             if (intRegVal == currDispatchQInfo->intRegCheckMask)
             {
                /* only 1 queue event triggered a notification *
                 * Call the callback function for this queue
                 */
                currDispatchQInfo->callback (qIndex,
                                     currDispatchQInfo->callbackId);
#ifndef NDEBUG
                /* Update statistics */
                dispatcherStats.queueStats[qIndex].callbackCnt++;
#endif
             }
             else
             {
                 /* the event is triggered by more than 1 queue,
                  * the queue search will be starting from the beginning
                  * or the middle of the priority table
                  *
                  * the serach will end when all the bits of the interrupt
                  * register are cleared. There is no need to maintain
                  * a seperate value and test it at each iteration.
                  */
                 if (IX_QMGR_QUELOW_GROUP == group)
                 {
                     /* check if any bit related to queues in the first
                      * half of the priority table is set
                      */
                     if (intRegVal & lowPriorityTableFirstHalfMask)
                     {
                         priorityTableIndex = IX_QMGR_MIN_LOW_QUE_PRIORITY_TABLE_INDEX;
                     }
                     else
                     {
                         priorityTableIndex = IX_QMGR_MID_LOW_QUE_PRIORITY_TABLE_INDEX;
                     }
                 }
                else
                 {
                     /* check if any bit related to queues in the first
                      * half of the priority table is set
                      */
                     if (intRegVal & uppPriorityTableFirstHalfMask)
                     {
                         priorityTableIndex = IX_QMGR_MIN_UPP_QUE_PRIORITY_TABLE_INDEX;
                     }
                     else
                     {
                         priorityTableIndex = IX_QMGR_MID_UPP_QUE_PRIORITY_TABLE_INDEX;
                     }
                 }

                 /* iterate following the priority table until all the bits
                  * of the interrupt register are cleared.
                  */
                 do
                 {
                     qIndex = priorityTable[priorityTableIndex++];
                     currDispatchQInfo = &dispatchQInfo[qIndex];
                     intRegCheckMask = currDispatchQInfo->intRegCheckMask;

                     /* If this queue caused this interrupt to be raised */
                     if (intRegVal & intRegCheckMask)
                     {
                         /* Call the callback function for this queue */
                         currDispatchQInfo->callback (qIndex,
                                              currDispatchQInfo->callbackId);
#ifndef NDEBUG
                         /* Update statistics */
                         dispatcherStats.queueStats[qIndex].callbackCnt++;
#endif

                         /* Clear the interrupt register bit */
                         intRegVal &= ~intRegCheckMask;
                     }
                  }
                  while(intRegVal);
             } /*End of intRegVal == currDispatchQInfo->intRegCheckMask */
     } /* End of intRegVal != 0 */

#ifndef NDEBUG
    /* Update statistics */
    dispatcherStats.loopRunCnt++;
#endif

    /* Rebuild the priority table if needed */
    if (rebuildTable)
    {
        ixQMgrDispatcherReBuildPriorityTable ();
    }
}
Example #21
0
/*
 * Function definition: ixNpeDlNpeMgrBaseAddressGet
 */
PRIVATE UINT32
ixNpeDlNpeMgrBaseAddressGet (IxNpeDlNpeId npeId)
{
    IX_OSAL_ASSERT (ixNpeDlMemInitialised);
    return ixNpeDlNpeInfo[npeId].baseAddress;
}
Example #22
0
PUBLIC IX_OSAL_MBUF_POOL *
ixOsalPoolInit (UINT32 count, UINT32 size, const char *name)
{

    /* These variables are only used if UX_OSAL_BUFFER_ALLOC_SEPERATELY
     * is defined .
     */
#ifdef IX_OSAL_BUFFER_ALLOC_SEPARATELY
    UINT32 i, mbufSizeAligned, dataSizeAligned;
    IX_OSAL_MBUF *currentMbufPtr = NULL;
#else
    void *poolBufPtr;
    void *poolDataPtr;
    int mbufMemSize;
    int dataMemSize;
#endif

    IX_OSAL_MBUF_POOL *poolPtr = NULL;
    
    if (count <= 0)
    {
        ixOsalLog (IX_OSAL_LOG_LVL_ERROR,
            IX_OSAL_LOG_DEV_STDOUT,
            "ixOsalPoolInit(): " "count = 0 \n", 0, 0, 0, 0, 0, 0);
        return NULL;        
    }

    if (name == NULL)
    {
        ixOsalLog (IX_OSAL_LOG_LVL_ERROR,
            IX_OSAL_LOG_DEV_STDOUT,
            "ixOsalPoolInit(): " "NULL name \n", 0, 0, 0, 0, 0, 0);
        return NULL;        
    }
    
    if (strlen (name) > IX_OSAL_MBUF_POOL_NAME_LEN)
    {
        ixOsalLog (IX_OSAL_LOG_LVL_ERROR,
            IX_OSAL_LOG_DEV_STDOUT,
            "ixOsalPoolInit(): "
            "ERROR - name length should be no greater than %d  \n",
            IX_OSAL_MBUF_POOL_NAME_LEN, 0, 0, 0, 0, 0);
        return NULL;
    }

/* OS can choose whether to allocate all buffers all together (if it 
 * can handle a huge single alloc request), or to allocate buffers 
 * separately by the defining IX_OSAL_BUFFER_ALLOC_SEPARATELY.
 */
#ifdef IX_OSAL_BUFFER_ALLOC_SEPARATELY
    /* Get a pool Ptr */
    poolPtr = ixOsalPoolAlloc ();

    if (poolPtr == NULL)
    {
        ixOsalLog (IX_OSAL_LOG_LVL_ERROR,
            IX_OSAL_LOG_DEV_STDOUT,
            "ixOsalPoolInit(): " "Fail to Get PoolPtr \n", 0, 0, 0, 0, 0, 0);    
        return NULL;
    }

    mbufSizeAligned = IX_OSAL_MBUF_POOL_SIZE_ALIGN (sizeof (IX_OSAL_MBUF));
    dataSizeAligned = IX_OSAL_MBUF_POOL_SIZE_ALIGN(size);

    poolPtr->nextFreeBuf = NULL;    
    poolPtr->mbufMemPtr = NULL;    
    poolPtr->dataMemPtr = NULL;
    poolPtr->bufDataSize = dataSizeAligned;
    poolPtr->totalBufsInPool = count;
    poolPtr->poolAllocType = IX_OSAL_MBUF_POOL_TYPE_SYS_ALLOC;
    strcpy (poolPtr->name, name);


    for (i = 0; i < count; i++)
    {
	    /* create an mbuf */
	    currentMbufPtr = ixOsalBuffPoolMbufInit (mbufSizeAligned,
					         dataSizeAligned,
					         poolPtr);

#ifdef IX_OSAL_BUFFER_FREE_PROTECTION 		
/* Set the Buffer USED Flag. If not, ixOsalMBufFree will fail.
   ixOsalMbufFree used here is in a special case whereby, it's 
   used to add MBUF to the Pool. By specification, ixOsalMbufFree 
   deallocates an allocated MBUF from Pool.
*/ 			         
      IX_OSAL_MBUF_SET_USED_FLAG(currentMbufPtr);
#endif                             
	    /* Add it to the pool */
	    ixOsalMbufFree (currentMbufPtr);

	    /* flush the pool information to RAM */
	    IX_OSAL_CACHE_FLUSH (currentMbufPtr, mbufSizeAligned);
    }
    
    /*
     * update the number of free buffers in the pool 
     */
    poolPtr->freeBufsInPool = count;

#else 
/* Otherwise allocate buffers in a continuous block fashion */    
    poolBufPtr = IX_OSAL_MBUF_POOL_MBUF_AREA_ALLOC (count, mbufMemSize);
    IX_OSAL_ASSERT (poolBufPtr != NULL);
    poolDataPtr =
        IX_OSAL_MBUF_POOL_DATA_AREA_ALLOC (count, size, dataMemSize);
    IX_OSAL_ASSERT (poolDataPtr != NULL);

    poolPtr = ixOsalNoAllocPoolInit (poolBufPtr, poolDataPtr,
        count, size, name);
    if (poolPtr == NULL)
    {
        ixOsalLog (IX_OSAL_LOG_LVL_ERROR,
            IX_OSAL_LOG_DEV_STDOUT,
            "ixOsalPoolInit(): " "Fail to get pool ptr \n", 0, 0, 0, 0, 0, 0);
        return NULL;
    }

    poolPtr->poolAllocType = IX_OSAL_MBUF_POOL_TYPE_SYS_ALLOC;

#endif /* IX_OSAL_BUFFER_ALLOC_SEPARATELY */
    return poolPtr;
}
Example #23
0
IX_STATUS
ixQMgrQConfig (char *qName,
	      IxQMgrQId qId,
	      IxQMgrQSizeInWords qSizeInWords,
	      IxQMgrQEntrySizeInWords qEntrySizeInWords)
{
    UINT32 aqmLocalBaseAddress;

    if (!cfgInitialized)
    {
        return IX_FAIL;
    }
    
    if (!IX_QMGR_QID_IS_VALID(qId))
    {
	return IX_QMGR_INVALID_Q_ID;
    }
    
    else if (NULL == qName)
    {
	return IX_QMGR_PARAMETER_ERROR;
    }
    
    else if (strlen (qName) > IX_QMGR_MAX_QNAME_LEN)
    {
	return IX_QMGR_PARAMETER_ERROR;
    }

    else if (!qSizeInWordsIsOk (qSizeInWords))
    {
	return IX_QMGR_INVALID_QSIZE;
    }

    else if (!qEntrySizeInWordsIsOk (qEntrySizeInWords))
    {
	return IX_QMGR_INVALID_Q_ENTRY_SIZE;
    }
    
    else if (cfgQueueInfo[qId].isConfigured)
    {
	return IX_QMGR_Q_ALREADY_CONFIGURED;
    }
   
    ixOsalMutexLock(&ixQMgrQCfgMutex, IX_OSAL_WAIT_FOREVER);

    /* Write the config register */
    ixQMgrAqmIfQueCfgWrite (qId,
			   qSizeInWords,
			   qEntrySizeInWords,
			   freeSramAddress);


    strcpy (cfgQueueInfo[qId].qName, qName);
    cfgQueueInfo[qId].qSizeInWords = qSizeInWords;
    cfgQueueInfo[qId].qEntrySizeInWords = qEntrySizeInWords;

    /* store pre-computed information in the same cache line
     * to facilitate inlining of QRead and QWrite functions 
     * in IxQMgr.h
     */
    ixQMgrQInlinedReadWriteInfo[qId].qReadCount = 0;
    ixQMgrQInlinedReadWriteInfo[qId].qWriteCount = 0;
    ixQMgrQInlinedReadWriteInfo[qId].qEntrySizeInWords = qEntrySizeInWords;
    ixQMgrQInlinedReadWriteInfo[qId].qSizeInEntries = 
		(UINT32)qSizeInWords / (UINT32)qEntrySizeInWords;

    /* Calculate the new freeSramAddress from the size of the queue
     * currently being configured.
     */
    freeSramAddress += (qSizeInWords * IX_QMGR_NUM_BYTES_PER_WORD);

    /* Get the virtual SRAM address */
    ixQMgrAqmIfBaseAddressGet (&aqmLocalBaseAddress);

    IX_OSAL_ASSERT((freeSramAddress - (aqmLocalBaseAddress + (IX_QMGR_QUEBUFFER_SPACE_OFFSET))) <=
	      IX_QMGR_QUE_BUFFER_SPACE_SIZE);

    /* The queue is now configured */
    cfgQueueInfo[qId].isConfigured = TRUE;

    ixOsalMutexUnlock(&ixQMgrQCfgMutex);

#ifndef NDEBUG
    /* Update statistics */
    stats.qStats[qId].isConfigured = TRUE;
    stats.qStats[qId].qName = cfgQueueInfo[qId].qName;
#endif
    return IX_SUCCESS;
}
Example #24
0
/**
 * @fn IxCryptoAccStatus ixCryptoPkeInit
 * @brief   Initialize RNG, HASH, and EAU uints.
 */
IxCryptoAccStatus
ixCryptoPkeInit (void)
{
    if (initDone)        /* Check if PKE has been initialized */
    {
        return IX_CRYPTO_ACC_STATUS_FAIL;
    } /* end of if (initDone) */
    
    /* Mapping EAU base address */
    pkeBaseAddress[IX_CRYPTO_PKE_EAU] = (UINT32) IX_CRYPTO_ACC_MEM_MAP (
        IX_CRYPTO_PKE_EAU_BASE_ADDR, 
        IX_CRYPTO_PKE_EAU_SIZE);
        
    /* Assert if NULL */
    IX_OSAL_ASSERT (pkeBaseAddress[IX_CRYPTO_PKE_EAU]);
    
    /* Set flag to TRUE to indicate mem map success */
    pkeBaseAddrMappedFlag[IX_CRYPTO_PKE_EAU] = TRUE;
    
    /* Mapping RNG and HASH engine base address */
    pkeBaseAddress[IX_CRYPTO_PKE_RNG] = (UINT32) IX_CRYPTO_ACC_MEM_MAP (
        IX_CRYPTO_PKE_RNG_HASH_BASE_ADDR, 
        IX_CRYPTO_PKE_RNG_HASH_SIZE);
    
    /* Assert if NULL */
    IX_OSAL_ASSERT (pkeBaseAddress[IX_CRYPTO_PKE_RNG]);
   
    /* Set flag to TRUE to indicate mem map success */
    pkeBaseAddrMappedFlag[IX_CRYPTO_PKE_RNG] = TRUE;
    
    /* Mapping Hash engine base address */
    pkeBaseAddress[IX_CRYPTO_PKE_HASH] = pkeBaseAddress[IX_CRYPTO_PKE_RNG] 
        + IX_CRYPTO_PKE_HASH_BASE_ADDR_OFFSET;

    /* Initialize fast mutex for EAU */
    if (IX_CRYPTO_ACC_STATUS_SUCCESS != 
        IX_CRYPTO_ACC_FAST_MUTEX_INIT (&pkeFastMutex[IX_CRYPTO_PKE_EAU]))
    {
        /* Log error message in debugging mode */
        IX_CRYPTO_ACC_LOG (
            IX_OSAL_LOG_LVL_ERROR, 
            IX_OSAL_LOG_DEV_STDOUT,
            "Cannot init EAU fast mutex.\n", 
            0, 0, 0, 0, 0, 0);

        /* Release all resources which has been allocated.
         * Note: return status of this function is ignore, no atter it is 
         * success or fail, we will still return fail since the init failed. 
         */
        ixCryptoPkeResourcesRelease ();
       
        return IX_CRYPTO_ACC_STATUS_FAIL;
    } /* end of if (IX_CRYPTO_ACC_FAST_MUTEX_INIT) */
    
    /* Set flag to TRUE to indicate mutex initialization success */   
    pkeFastMutexInitFlag[IX_CRYPTO_PKE_EAU] = TRUE;
    
    /* Initialize fast mutex for RNG */
    if (IX_CRYPTO_ACC_STATUS_SUCCESS != 
        IX_CRYPTO_ACC_FAST_MUTEX_INIT (&(pkeFastMutex[IX_CRYPTO_PKE_RNG])))
    {
        /* Log error message in debugging mode */
        IX_CRYPTO_ACC_LOG (
            IX_OSAL_LOG_LVL_ERROR, 
            IX_OSAL_LOG_DEV_STDOUT,
            "Cannot init RNG fast mutex.\n", 
            0, 0, 0, 0, 0, 0);
        
        /* Release all resources which has been allocated.
         * Note: return status of this function is ignore, no atter it is 
         * success or fail, we will still return fail since the init failed. 
         */
        ixCryptoPkeResourcesRelease ();
                
        return IX_CRYPTO_ACC_STATUS_FAIL;
    } /* end of if (IX_CRYPTO_ACC_FAST_MUTEX_INIT) */

    /* Set flag to TRUE to indicate mutex initialization success */   
    pkeFastMutexInitFlag[IX_CRYPTO_PKE_RNG] = TRUE;
   
    /* Initialize fast mutex for HASH engine */
    if (IX_CRYPTO_ACC_STATUS_SUCCESS != 
        IX_CRYPTO_ACC_FAST_MUTEX_INIT (&(pkeFastMutex[IX_CRYPTO_PKE_HASH])))
    {
        /* Log error message in debugging mode */
        IX_CRYPTO_ACC_LOG (
            IX_OSAL_LOG_LVL_ERROR, 
            IX_OSAL_LOG_DEV_STDOUT,
            "Cannot init Hash fast mutex.\n", 
            0, 0, 0, 0, 0, 0);
        
        /* Release all resources which has been allocated.
         * Note: return status of this function is ignore, no atter it is 
         * success or fail, we will still return fail since the init failed. 
         */
        ixCryptoPkeResourcesRelease ();
                
        return IX_CRYPTO_ACC_STATUS_FAIL;
    } /* end of if (IX_CRYPTO_ACC_FAST_MUTEX_INIT) */

    /* Set flag to TRUE to indicate mutex initialization success */   
    pkeFastMutexInitFlag[IX_CRYPTO_PKE_HASH] = TRUE;
    
    /* Initialize PKE EAU sub-module */
    if (IX_CRYPTO_ACC_STATUS_SUCCESS != ixCryptoPkeEauInit())
    {
        /* Log error message in debugging mode */
        IX_CRYPTO_ACC_LOG (
            IX_OSAL_LOG_LVL_ERROR, 
            IX_OSAL_LOG_DEV_STDOUT,
            "Cannot initialize EAU module.\n", 
            0, 0, 0, 0, 0, 0);

        /* Release all resources which has been allocated.
         * Note: return status of this function is ignore, no atter it is 
         * success or fail, we will still return fail since the init failed. 
         */
        ixCryptoPkeResourcesRelease ();
                    
        return IX_CRYPTO_ACC_STATUS_FAIL;    
    } /* end of if(ixCryptoPkeEauInit()) */
    
        /* Initialize PKE EAU sub-module */
    if (IX_CRYPTO_ACC_STATUS_SUCCESS != ixCryptoPkeHashInit())
    {
        /* Log error message in debugging mode */
        IX_CRYPTO_ACC_LOG (
            IX_OSAL_LOG_LVL_ERROR, 
            IX_OSAL_LOG_DEV_STDOUT,
            "Cannot initialize HASH module.\n", 
            0, 0, 0, 0, 0, 0);
            
        /* Uninit PKE EAU sub-module if failed */
        ixCryptoPkeEauUninit ();

        /* Release all resources which has been allocated.
         * Note: return status of this function is ignore, no atter it is 
         * success or fail, we will still return fail since the init failed. 
         */
        ixCryptoPkeResourcesRelease ();
        
        return IX_CRYPTO_ACC_STATUS_FAIL;    
    } /* end of if (ixCryptoPkeHashInit ()) */
    
    initDone = TRUE;
   
    return IX_CRYPTO_ACC_STATUS_SUCCESS;
} /* end of ixCryptoPkeInit() function */