/** ============================================================================ * @n@b Setup_Tx * * @b Description * @n This API sets up all relevant data structures and configuration required * for sending data to PASS/Ethernet. It sets up a Tx free descriptor queue, * PASS Tx queues required for send. * * @param[in] * @n None * * @return Int32 * -1 - Error * 0 - Success * ============================================================================= */ Int32 Setup_Tx (Void) { UInt8 isAllocated; Qmss_Queue qInfo; Ptr pCppiDesc; UInt32 i; /* Open all Transmit (Tx) queues. * * These queues are used to send data to PA PDSP/CPSW. */ for (i = 0; i < NUM_PA_TX_QUEUES; i ++) { if ((gPaTxQHnd[i] = Qmss_queueOpen (Qmss_QueueType_PASS_QUEUE, QMSS_PARAM_NOT_SPECIFIED, &isAllocated)) < 0) { uart_write ("Error opening PA Tx queue \n"); return -1; } else { //platform_write("opened TX queue for PA %d\n",gPaTxQHnd[i]); } } /* Open a Tx Free Descriptor Queue (Tx FDQ). * * This queue will be used to hold Tx free decriptors that can be filled * later with data buffers for transmission onto wire. */ if ((gTxFreeQHnd = Qmss_queueOpen (Qmss_QueueType_STARVATION_COUNTER_QUEUE, QMSS_PARAM_NOT_SPECIFIED, &isAllocated)) < 0) { uart_write ("Error opening Tx Free descriptor queue \n"); return -1; } else { //platform_write("opened TX Free queue for PA %d\n",gTxFreeQHnd); } qInfo = Qmss_getQueueNumber (gTxFreeQHnd); /* Attach some free descriptors to the Tx free queue we just opened. */ for (i = 0; i < NUM_TX_DESC; i++) { /* Get a free descriptor from the global free queue we setup * during initialization. */ if ((pCppiDesc = Qmss_queuePop (gGlobalFreeQHnd)) == NULL) { break; } /* The descriptor address returned from the hardware has the * descriptor size appended to the address in the last 4 bits. * * To get the true descriptor size, always mask off the last * 4 bits of the address. */ pCppiDesc = (Ptr) ((UInt32) pCppiDesc & 0xFFFFFFF0); /* Setup the Completion queue: * * Setup the return policy for this desc to return to the free q we just * setup instead of the global free queue. */ Cppi_setReturnQueue ((Cppi_DescType) Cppi_DescType_HOST, pCppiDesc, qInfo); /* Push descriptor to Tx free queue */ Qmss_queuePushDescSize (gTxFreeQHnd, pCppiDesc, SIZE_CPSW_HOST_DESC); } if (i != NUM_TX_DESC) { uart_write ("Error allocating Tx free descriptors. only %d queues allotted \n",Qmss_getQueueEntryCount(gTxFreeQHnd)); return -1; } //count=Qmss_getQueueEntryCount(gTxFreeQHnd); //platform_write("Total %d found entries in queue %d\n",count,gTxFreeQHnd); /* All done with Rx configuration. Return success. */ return 0; }
/** ============================================================================ * @n@b Setup_Rx * * @b Description * @n This API sets up all relevant data structures and configuration required * for receiving data from PASS/Ethernet. It sets up a Rx free descriptor queue * with some empty pre-allocated buffers to receive data, and an Rx queue * to which the Rxed data is streamed for the example application. This API * also sets up the QM high priority accumulation interrupts required to * receive data from the Rx queue. * * @param[in] * @n None * * @return Int32 * -1 - Error * 0 - Success * ============================================================================= */ Int32 Setup_Rx (Ethernet *pThis) { Int32 result; UInt8 isAllocated, accChannelNum; UInt16 numAccEntries, intThreshold, i; Qmss_Queue rxFreeQInfo, rxQInfo; Ptr pCppiDesc; Qmss_AccCmdCfg accCfg; Cppi_RxFlowCfg rxFlowCfg; Ptr pDataBuffer; Error_Block eb; Uint32 mySWInfo[] = {0x11112222, 0x33334444}; /* Open a Receive (Rx) queue. * * This queue will be used to hold all the packets received by PASS/CPSW * * Open the next available High Priority Accumulation queue for Rx. */ if ((gRxQHnd = Qmss_queueOpen (Qmss_QueueType_HIGH_PRIORITY_QUEUE, QMSS_PARAM_NOT_SPECIFIED, &isAllocated)) < 0) { uart_write ("Error opening a High Priority Accumulation Rx queue \n"); return -1; } rxQInfo = Qmss_getQueueNumber (gRxQHnd); uart_write ("Opened RX queue Number %d \n",rxQInfo.qNum); /* Setup high priority accumulation interrupts on the Rx queue. * * Let's configure the accumulator with the following settings: * (1) Interrupt pacing disabled. * (2) Interrupt on every received packet */ intThreshold = RX_INT_THRESHOLD; numAccEntries = (intThreshold + 1) * 2; accChannelNum = PA_ACC_CHANNEL_NUM; /* Initialize the accumulator list memory */ memset ((Void *) gHiPriAccumList, 0, numAccEntries * 4); /* Ensure that the accumulator channel we are programming is not * in use currently. */ result = Qmss_disableAccumulator (Qmss_PdspId_PDSP1, accChannelNum); if (result != QMSS_ACC_SOK && result != QMSS_ACC_CHANNEL_NOT_ACTIVE) { uart_write ("Error Disabling high priority accumulator for channel : %d error code: %d\n", accChannelNum, result); return -1; } /* Setup the accumulator settings */ accCfg.channel = accChannelNum; accCfg.command = Qmss_AccCmd_ENABLE_CHANNEL; accCfg.queueEnMask = 0; accCfg.listAddress = Convert_CoreLocal2GlobalAddr((Uint32) gHiPriAccumList); // accCfg.listAddress = gHiPriAccumList; accCfg.queMgrIndex = gRxQHnd; accCfg.maxPageEntries = (intThreshold + 1); /* Add an extra entry for holding the entry count */ accCfg.timerLoadCount = 0; accCfg.interruptPacingMode = Qmss_AccPacingMode_LAST_INTERRUPT; accCfg.listEntrySize = Qmss_AccEntrySize_REG_D; accCfg.listCountMode = Qmss_AccCountMode_ENTRY_COUNT; accCfg.multiQueueMode = Qmss_AccQueueMode_SINGLE_QUEUE; /* Program the accumulator */ if ((result = Qmss_programAccumulator (Qmss_PdspId_PDSP1, &accCfg)) != QMSS_ACC_SOK) { uart_write ("Error Programming high priority accumulator for channel : %d queue : %d error code : %d\n", accCfg.channel, accCfg.queMgrIndex, result); return -1; } Error_init(&eb); pThis->RxEventHandle = Event_create(NULL,&eb); if (pThis->RxEventHandle == NULL) { uart_write("Event create failed"); return -1; } memset(pThis->pRxDataPtr,0,sizeof(pThis->pRxDataPtr)); pThis->RxDataHead = 0; pThis->RxDataTail = 0; Intr_Init(&pThis->oEthIntr, INTR_ITEM_ETH_RX,(Intr_Handler)Cpsw_RxISR, (void*)pThis); /* Open a Rx Free Descriptor Queue (Rx FDQ). * * This queue will hold all the Rx free decriptors. These descriptors will be * used by the PASS CPDMA to hold data received via CPSW. */ if ((gRxFreeQHnd = Qmss_queueOpen (Qmss_QueueType_STARVATION_COUNTER_QUEUE, QMSS_PARAM_NOT_SPECIFIED, &isAllocated)) < 0) { uart_write ("Error opening Rx Free descriptor queue \n"); return -1; } rxFreeQInfo = Qmss_getQueueNumber (gRxFreeQHnd); uart_write("Opened RX Free queue Number %d\n",gRxFreeQHnd); /* Attach some free descriptors to the Rx free queue we just opened. */ for (i = 0; i < NUM_RX_DESC; i++) { /* Get a free descriptor from the global free queue we setup * during initialization. */ if ((pCppiDesc = Qmss_queuePop (gGlobalFreeQHnd)) == NULL) { break; } /* The descriptor address returned from the hardware has the * descriptor size appended to the address in the last 4 bits. * * To get the true descriptor size, always mask off the last * 4 bits of the address. */ pCppiDesc = (Ptr) ((UInt32) pCppiDesc & 0xFFFFFFF0); if ((pDataBuffer = (Ptr) Memory_alloc((IHeap_Handle)heap2, ETHER_MAX_SIZE, 0, NULL)) == NULL) { uart_write ("Error allocating memory for Rx data buffer \n"); break; } /* Populate the Rx free descriptor with the buffer we just allocated. */ Cppi_setData (Cppi_DescType_HOST, pCppiDesc, (UInt8 *)Convert_CoreLocal2GlobalAddr((UInt32)pDataBuffer), ETHER_MAX_SIZE); /* Save original buffer information */ Cppi_setOriginalBufInfo (Cppi_DescType_HOST, pCppiDesc, (UInt8 *)Convert_CoreLocal2GlobalAddr((UInt32)pDataBuffer), ETHER_MAX_SIZE); /* Setup the Completion queue: * * Setup the return policy for this desc to return to the free q we just * setup instead of the global free queue. */ Cppi_setReturnQueue (Cppi_DescType_HOST, pCppiDesc, rxFreeQInfo); Cppi_setSoftwareInfo (Cppi_DescType_HOST, pCppiDesc, (UInt8 *) mySWInfo); Cppi_setPacketLen (Cppi_DescType_HOST, pCppiDesc, ETHER_MAX_SIZE); /* Push descriptor to Tx free queue */ Qmss_queuePushDescSize (gRxFreeQHnd, pCppiDesc, SIZE_CPSW_HOST_DESC); } if (i != NUM_RX_DESC) { uart_write ("Error allocating Rx free descriptors \n"); return -1; } //count=Qmss_getQueueEntryCount(gRxFreeQHnd); //platform_write("Total %d entries in queue %d\n",count,gRxFreeQHnd); /* Setup a Rx Flow. * * A Rx flow encapsulates all relevant data properties that CPDMA would * have to know in order to succefully receive data. */ /* Initialize the flow configuration */ memset (&rxFlowCfg, 0, sizeof(Cppi_RxFlowCfg)); /* Let CPPI pick the next available flow */ rxFlowCfg.flowIdNum = CPPI_PARAM_NOT_SPECIFIED; rxFlowCfg.rx_dest_qmgr = rxQInfo.qMgr; rxFlowCfg.rx_dest_qnum = rxQInfo.qNum; rxFlowCfg.rx_desc_type = Cppi_DescType_HOST; rxFlowCfg.rx_ps_location = Cppi_PSLoc_PS_IN_DESC; rxFlowCfg.rx_psinfo_present = 1; /* Enable PS info */ rxFlowCfg.rx_error_handling = 0; /* Drop the packet, do not retry on starvation by default */ rxFlowCfg.rx_einfo_present = 1; /* EPIB info present */ rxFlowCfg.rx_dest_tag_lo_sel = 0; /* Disable tagging */ rxFlowCfg.rx_dest_tag_hi_sel = 0; rxFlowCfg.rx_src_tag_lo_sel = 0; rxFlowCfg.rx_src_tag_hi_sel = 0; rxFlowCfg.rx_size_thresh0_en = 0; /* By default, we disable Rx Thresholds */ rxFlowCfg.rx_size_thresh1_en = 0; /* By default, we disable Rx Thresholds */ rxFlowCfg.rx_size_thresh2_en = 0; /* By default, we disable Rx Thresholds */ rxFlowCfg.rx_size_thresh0 = 0x0; rxFlowCfg.rx_size_thresh1 = 0x0; rxFlowCfg.rx_size_thresh2 = 0x0; rxFlowCfg.rx_fdq0_sz0_qmgr = rxFreeQInfo.qMgr; /* Setup the Receive free queue for the flow */ rxFlowCfg.rx_fdq0_sz0_qnum = rxFreeQInfo.qNum; rxFlowCfg.rx_fdq0_sz1_qnum = 0x0; rxFlowCfg.rx_fdq0_sz1_qmgr = 0x0; rxFlowCfg.rx_fdq0_sz2_qnum = 0x0; rxFlowCfg.rx_fdq0_sz2_qmgr = 0x0; rxFlowCfg.rx_fdq0_sz3_qnum = 0x0; rxFlowCfg.rx_fdq0_sz3_qmgr = 0x0; rxFlowCfg.rx_fdq1_qnum = rxFreeQInfo.qNum; /* Use the Rx Queue to pick descriptors */ rxFlowCfg.rx_fdq1_qmgr = rxFreeQInfo.qMgr; rxFlowCfg.rx_fdq2_qnum = rxFreeQInfo.qNum; /* Use the Rx Queue to pick descriptors */ rxFlowCfg.rx_fdq2_qmgr = rxFreeQInfo.qMgr; rxFlowCfg.rx_fdq3_qnum = rxFreeQInfo.qNum; /* Use the Rx Queue to pick descriptors */ rxFlowCfg.rx_fdq3_qmgr = rxFreeQInfo.qMgr; /* Configure the Rx flow */ if ((gRxFlowHnd = Cppi_configureRxFlow (gCpdmaHnd, &rxFlowCfg, &isAllocated)) == NULL) { uart_write ("Error configuring Rx flow \n"); return -1; } else { //platform_write("Rx flow configured. handle %p Id %d \n",gRxFlowHnd,Cppi_getFlowId (gRxFlowHnd)); } /* All done with Rx configuration. Return success. */ return 0; }
/** * Init Queue Manager SUbSystem (QMSS) * - Configure QMSS Driver * - Define Memory regions * - */ void init_qmss(int useMsmc){ int i, result; Qmss_InitCfg qmss_initCfg; Cppi_CpDmaInitCfg cpdmaCfg; Qmss_GlobalConfigParams qmss_globalCfg; /* Descriptor base addresses */ void* data_desc_base = (void*)align((int)msmc_mem_base); void* ctrl_desc_base = (void*)align((int)data_desc_base + DATA_DESC_NUM*DATA_DESC_SIZE); void* trace_desc_base = (void*)align((int)ctrl_desc_base + CTRL_DESC_NUM*CTRL_DESC_SIZE); void* fftc_desc_base = (void*)align((int)trace_desc_base + TRACE_DESC_NUM*TRACE_DESC_SIZE); if(useMsmc){ data_mem_base = align((int)fftc_desc_base + FFTC_DESC_NUM*FFTC_DESC_SIZE); }else{ data_mem_base = align((int)ddr_mem_base); } /* Initialize QMSS Driver */ memset (&qmss_initCfg, 0, sizeof (Qmss_InitCfg)); /* Use internal linking RAM */ qmss_initCfg.linkingRAM0Base = 0; qmss_initCfg.linkingRAM0Size = 0; qmss_initCfg.linkingRAM1Base = 0; qmss_initCfg.maxDescNum = DATA_DESC_NUM + CTRL_DESC_NUM + TRACE_DESC_NUM + FFTC_DESC_NUM; qmss_initCfg.pdspFirmware[0].pdspId = Qmss_PdspId_PDSP1; qmss_initCfg.pdspFirmware[0].firmware = &acc48_le; qmss_initCfg.pdspFirmware[0].size = sizeof (acc48_le); /* Bypass hardware initialization as it is done within Kernel */ qmss_initCfg.qmssHwStatus = QMSS_HW_INIT_COMPLETE; qmss_globalCfg = qmssGblCfgParams; /* Convert address to Virtual address */ for(i=0;i < (int)qmss_globalCfg.maxQueMgrGroups;i++){ TranslateAddress(qmss_globalCfg.groupRegs[i].qmConfigReg, qmss_cfg_regs-CSL_QMSS_CFG_BASE, CSL_Qm_configRegs*); TranslateAddress(qmss_globalCfg.groupRegs[i].qmDescReg, qmss_cfg_regs-CSL_QMSS_CFG_BASE, CSL_Qm_descriptor_region_configRegs*); TranslateAddress(qmss_globalCfg.groupRegs[i].qmQueMgmtReg, qmss_cfg_regs-CSL_QMSS_CFG_BASE, CSL_Qm_queue_managementRegs*); TranslateAddress(qmss_globalCfg.groupRegs[i].qmQueMgmtProxyReg, qmss_cfg_regs-CSL_QMSS_CFG_BASE, CSL_Qm_queue_managementRegs*); TranslateAddress(qmss_globalCfg.groupRegs[i].qmQueStatReg, qmss_cfg_regs-CSL_QMSS_CFG_BASE, CSL_Qm_queue_status_configRegs*); TranslateAddress(qmss_globalCfg.groupRegs[i].qmStatusRAM, qmss_cfg_regs-CSL_QMSS_CFG_BASE, CSL_Qm_Queue_Status*); TranslateAddress(qmss_globalCfg.groupRegs[i].qmQueMgmtDataReg, qmss_cfg_regs-CSL_QMSS_DATA_BASE, CSL_Qm_queue_managementRegs*); /* not supported on k2 hardware, and not used by lld */ qmss_globalCfg.groupRegs[i].qmQueMgmtProxyDataReg = 0; } for(i=0;i < QMSS_MAX_INTD;i++) TranslateAddress(qmss_globalCfg.regs.qmQueIntdReg[i], qmss_cfg_regs-CSL_QMSS_CFG_BASE, CSL_Qm_intdRegs*); for(i=0;i < QMSS_MAX_PDSP;i++){ TranslateAddress(qmss_globalCfg.regs.qmPdspCmdReg[i], qmss_cfg_regs-CSL_QMSS_CFG_BASE, volatile uint32_t*); TranslateAddress(qmss_globalCfg.regs.qmPdspCtrlReg[i], qmss_cfg_regs-CSL_QMSS_CFG_BASE, CSL_PdspRegs*); TranslateAddress(qmss_globalCfg.regs.qmPdspIRamReg[i], qmss_cfg_regs-CSL_QMSS_CFG_BASE, volatile uint32_t*); } TranslateAddress(qmss_globalCfg.regs.qmLinkingRAMReg, qmss_cfg_regs-CSL_QMSS_CFG_BASE, volatile uint32_t*); TranslateAddress(qmss_globalCfg.regs.qmBaseAddr, qmss_cfg_regs-CSL_QMSS_CFG_BASE, void*); if ((result = Qmss_init (&qmss_initCfg, &qmss_globalCfg)) != QMSS_SOK){ printf ("initQmss: Error initializing Queue Manager SubSystem, Error code : %d\n", result); abort(); } if ((result = Qmss_start ()) != QMSS_SOK){ printf ("initQmss: Error starting Queue Manager SubSystem, Error code : %d\n", result); abort(); } Cppi_GlobalCPDMAConfigParams translatedCppiGblCpdmaCfgParams[Cppi_CpDma_LAST+1]; Cppi_GlobalConfigParams translatedCppiGblCfgParams = cppiGblCfgParams; translatedCppiGblCfgParams.cpDmaCfgs = translatedCppiGblCpdmaCfgParams; #define translateCpdma(reg, type) (translatedCppiGblCfgParams.reg = (type)(((int) cppiGblCfgParams.reg ) + cppi_regs - CPPI_BASE_REG)) Cppi_CpDma cpdma; for(cpdma = Cppi_CpDma_SRIO_CPDMA; cpdma <= Cppi_CpDma_LAST; cpdma++){ translatedCppiGblCfgParams.cpDmaCfgs[cpdma] = cppiGblCfgParams.cpDmaCfgs[cpdma]; translateCpdma(cpDmaCfgs[cpdma].gblCfgRegs, CSL_Cppidma_global_configRegs*); translateCpdma(cpDmaCfgs[cpdma].txChRegs, CSL_Cppidma_tx_channel_configRegs*); translateCpdma(cpDmaCfgs[cpdma].rxChRegs, CSL_Cppidma_rx_channel_configRegs*); translateCpdma(cpDmaCfgs[cpdma].txSchedRegs,CSL_Cppidma_tx_scheduler_configRegs*); translateCpdma(cpDmaCfgs[cpdma].rxFlowRegs, CSL_Cppidma_rx_flow_configRegs*); } if ((result = Cppi_init (&translatedCppiGblCfgParams)) != CPPI_SOK){ printf ("Error initializing CPPI LLD, Error code : %d\n", result); abort(); } /* Setup memory regions */ /* Setup DATA region */ result = setup_region( data_desc_base, DATA_DESC_SIZE, DATA_DESC_NUM, 0, DATA_REG_NUM); if (result) abort(); /* Setup CTRL region */ result = setup_region( ctrl_desc_base, CTRL_DESC_SIZE, CTRL_DESC_NUM, DATA_DESC_NUM, CTRL_REG_NUM); if (result) abort(); /* Setup TRACE region */ result = setup_region( trace_desc_base, TRACE_DESC_SIZE, TRACE_DESC_NUM, DATA_DESC_NUM+CTRL_DESC_NUM, TRACE_REG_NUM); if (result) abort(); /* Setup FFTC region */ result = setup_region( fftc_desc_base, FFTC_DESC_SIZE, FFTC_DESC_NUM, DATA_DESC_NUM+CTRL_DESC_NUM+TRACE_DESC_NUM, FFTC_REG_NUM); if (result) abort(); /* Setup the driver for this FFTC peripheral instance number. */ /* Set up the FFTC CPDMA configuration */ memset (&cpdmaCfg, 0, sizeof (Cppi_CpDmaInitCfg)); cpdmaCfg.dmaNum = Cppi_CpDma_FFTC_A_CPDMA; /* Initialize FFTC CPDMA */ if ((hCppi[0] = Cppi_open (&cpdmaCfg)) == NULL){ printf ("Error initializing CPPI for FFTC CPDMA %d\n", cpdmaCfg.dmaNum); abort(); } /* Disable FFTC CDMA loopback */ if (Cppi_setCpdmaLoopback (hCppi[0], 0) != CPPI_SOK){ printf ("Error disabling loopback for FFTC CPDMA %d\n", cpdmaCfg.dmaNum); abort(); } memset (&cpdmaCfg, 0, sizeof (Cppi_CpDmaInitCfg)); cpdmaCfg.dmaNum = Cppi_CpDma_FFTC_B_CPDMA; if ((hCppi[1] = Cppi_open (&cpdmaCfg)) == NULL){ printf ("Error initializing CPPI for FFTC CPDMA %d\n", cpdmaCfg.dmaNum); abort(); } /* Disable FFTC CDMA loopback */ if (Cppi_setCpdmaLoopback (hCppi[1], 0) != CPPI_SOK){ printf ("Error disabling loopback for FFTC CPDMA %d\n", cpdmaCfg.dmaNum); abort(); } fftc_a_cfg_regs->CONFIG = 0; fftc_b_cfg_regs->CONFIG = 0; // CSL_FMK (FFTC_CONFIG_Q3_FLOWID_OVERWRITE, 0) | // CSL_FMK (FFTC_CONFIG_Q2_FLOWID_OVERWRITE, 0) | // CSL_FMK (FFTC_CONFIG_Q1_FLOWID_OVERWRITE, 0) | // CSL_FMK (FFTC_CONFIG_Q0_FLOWID_OVERWRITE, 0) | // CSL_FMK (FFTC_CONFIG_STARVATION_PERIOD, 0) | // CSL_FMK (FFTC_CONFIG_QUEUE_3_PRIORITY, 0) | // CSL_FMK (FFTC_CONFIG_QUEUE_2_PRIORITY, 0) | // CSL_FMK (FFTC_CONFIG_QUEUE_1_PRIORITY, 0) | // CSL_FMK (FFTC_CONFIG_QUEUE_0_PRIORITY, 0) | // CSL_FMK (FFTC_CONFIG_FFT_DISABLE, 0); /* Emptying Queues */ /* Tx FFTC */ Qmss_queueEmpty(QMSS_FFTC_A_QUEUE_BASE); for(i=QUEUE_FIRST; i<=QUEUE_LAST; i++){ Qmss_queueEmpty(i); } /* Populate free queues */ for(i=0; i<DATA_DESC_NUM; i++){ Cppi_Desc* mono_pkt = (Cppi_Desc *) ((int)data_desc_base + i*DATA_DESC_SIZE); Osal_DescBeginMemAccess(mono_pkt, DATA_DESC_SIZE); Qmss_Queue freeQueue = {0, QUEUE_FREE_DATA}; Cppi_setDescType( mono_pkt, Cppi_DescType_MONOLITHIC); Cppi_setDataOffset( Cppi_DescType_MONOLITHIC, mono_pkt, PACKET_HEADER); Cppi_setPacketLen( Cppi_DescType_MONOLITHIC, mono_pkt, DATA_DESC_SIZE); Cppi_setReturnQueue(Cppi_DescType_MONOLITHIC, mono_pkt, freeQueue); /* Sync Descriptor */ Osal_DescEndMemAccess(mono_pkt, DATA_DESC_SIZE); Qmss_queuePushDescSize(QUEUE_FREE_DATA, mono_pkt, DATA_DESC_SIZE); } for(i=0; i<CTRL_DESC_NUM; i++){ Cppi_Desc* mono_pkt = (Cppi_Desc *) ((int)ctrl_desc_base + i*CTRL_DESC_SIZE); Osal_DescBeginMemAccess(mono_pkt, CTRL_DESC_SIZE); Qmss_Queue freeQueue = {0, QUEUE_FREE_DATA}; Cppi_setDescType( mono_pkt, Cppi_DescType_MONOLITHIC); Cppi_setDataOffset( Cppi_DescType_MONOLITHIC, mono_pkt, PACKET_HEADER); Cppi_setPacketLen( Cppi_DescType_MONOLITHIC, mono_pkt, CTRL_DESC_SIZE); Cppi_setReturnQueue(Cppi_DescType_MONOLITHIC, mono_pkt, freeQueue); /* Sync Descriptor */ Osal_DescEndMemAccess(mono_pkt, CTRL_DESC_SIZE); Qmss_queuePushDescSize(QUEUE_FREE_CTRL, mono_pkt, CTRL_DESC_SIZE); } for(i=0; i<TRACE_DESC_NUM; i++){ Cppi_Desc* mono_pkt = (Cppi_Desc *) ((int)trace_desc_base + i*TRACE_DESC_SIZE); Osal_DescBeginMemAccess(mono_pkt, TRACE_DESC_SIZE); Qmss_Queue freeQueue = {0, QUEUE_FREE_DATA}; Cppi_setDescType( mono_pkt, Cppi_DescType_MONOLITHIC); Cppi_setDataOffset( Cppi_DescType_MONOLITHIC, mono_pkt, PACKET_HEADER); Cppi_setPacketLen( Cppi_DescType_MONOLITHIC, mono_pkt, TRACE_DESC_SIZE); Cppi_setReturnQueue(Cppi_DescType_MONOLITHIC, mono_pkt, freeQueue); /* Sync Descriptor */ Osal_DescEndMemAccess(mono_pkt, TRACE_DESC_SIZE); Qmss_queuePushDescSize(QUEUE_FREE_TRACE, mono_pkt, TRACE_DESC_SIZE); } for(i=0; i<FFTC_DESC_NUM; i++){ Cppi_Desc * host_pkt = (Cppi_Desc *) ((int)fftc_desc_base + i*FFTC_DESC_SIZE); Osal_DescBeginMemAccess(host_pkt, FFTC_DESC_SIZE); memset(host_pkt, 0, FFTC_DESC_SIZE); Qmss_Queue queue = {0, QUEUE_FREE_FFTC}; Cppi_setDescType( host_pkt, Cppi_DescType_HOST); Cppi_setReturnPolicy( Cppi_DescType_HOST, host_pkt, Cppi_ReturnPolicy_RETURN_BUFFER); Cppi_setReturnPushPolicy( Cppi_DescType_HOST, host_pkt, Qmss_Location_TAIL); Cppi_setPSLocation( Cppi_DescType_HOST, host_pkt, Cppi_PSLoc_PS_IN_DESC); Cppi_setReturnQueue( Cppi_DescType_HOST, host_pkt, queue); ((Cppi_HostDesc*)host_pkt)->nextBDPtr = 0; /* Sync Descriptor */ Osal_DescEndMemAccess(host_pkt, FFTC_DESC_SIZE); Qmss_queuePushDescSize(QUEUE_FREE_FFTC,host_pkt,FFTC_DESC_SIZE); } configureRxFlow(0); configureRxFlow(1); configureTxChan(0); configureTxChan(1); configureRxChan(0); configureRxChan(1); /* Finally, enable the Tx channel so that we can start sending * data blocks to FFTC engine. */ Cppi_channelEnable (hCppiTxChan[0]); Cppi_channelEnable (hCppiTxChan[1]); Cppi_channelEnable (hCppiRxChan[0]); Cppi_channelEnable (hCppiRxChan[1]); configureFFTRegs(fftc_a_cfg_regs); configureFFTRegs(fftc_b_cfg_regs); }