/** * \fn tmr_Init * \brief Init required handles * * Init required handles and module variables, create the init-queue and * operational-queue, and register as the context-engine client. * * \note * \param hTimerModule - The queue object * \param hOs - Handle to Os Abstraction Layer * \param hReport - Handle to report module * \param hContext - Handle to context module * \return void * \sa */ void tmr_Init (TI_HANDLE hTimerModule, TI_HANDLE hOs, TI_HANDLE hReport, TI_HANDLE hContext) { TTimerModule *pTimerModule = (TTimerModule *)hTimerModule; TI_UINT32 uNodeHeaderOffset; pTimerModule->hOs = hOs; pTimerModule->hReport = hReport; pTimerModule->hContext = hContext; pTimerModule->bOperState = TI_FALSE; pTimerModule->uTimersCount = 0; pTimerModule->uTwdInitCount = 0; /* The offset of the queue-node-header from timer structure entry is needed by the queue */ uNodeHeaderOffset = TI_FIELD_OFFSET(TTimerInfo, tQueNodeHdr); /* Create and initialize the Init and Operational queues (for timers expiry events) */ pTimerModule->hInitQueue = que_Create (pTimerModule->hOs, pTimerModule->hReport, EXPIRY_QUE_SIZE, uNodeHeaderOffset); pTimerModule->hOperQueue = que_Create (pTimerModule->hOs, pTimerModule->hReport, EXPIRY_QUE_SIZE, uNodeHeaderOffset); /* Register to the context engine and get the client ID */ pTimerModule->uContextId = context_RegisterClient (pTimerModule->hContext, tmr_HandleExpiry, hTimerModule, TI_TRUE, "TIMER", sizeof("TIMER")); }
static void create_thread ( plc_sThread *tp, plc_sProctbl *ptp, plc_sProcess *pp ) { pwr_tStatus sts; long int phase; tp->aref.Objid = ptp->thread; tp->init = ptp->init; tp->exec = ptp->exec; tp->first_scan = 1; tp->PlcThread = pwrb_PlcThread_Init(&sts, tp); tp->csup_lh = csup_Init(&sts, ptp->thread, tp->f_scan_time); tp->i_scan_time = tp->f_scan_time * 1000.0 + 0.5; time_FloatToD(&tp->scan_time, tp->f_scan_time); tp->pp = pp; plc_inittimer(tp); tp->exit = FALSE; link_io_copy_areas(tp); que_Create(&sts, &tp->q_in); que_Create(&sts, &tp->q_out); sts = gdh_ObjidToName(ptp->thread, tp->name, sizeof(tp->name), cdh_mNName); if (EVEN(sts)) { errh_Error("Get name of thread object %s, %m", cdh_ObjidToString(NULL, ptp->thread, 1), sts); return; } sts = gdh_ObjidToPointer(ptp->thread, (void *)&tp->PlcThread); if (EVEN(sts)) { errh_Error("Direct link to thread object \"%s\", %m", tp->name, sts); return; } #if defined OS_LYNX && USE_RT_TIMER sem_init(&tp->ScanSem, 0, 0); tp->ScanMultiple = tp->i_scan_time / (CLK_TCK/1000); tp->IntervalCount = tp->ScanMultiple; #endif sts = thread_Create(&tp->tid, tp->name, (void *(*)())&plc_thread, tp); if (EVEN(sts)) { errh_Error("Creating thread \"%s\", %m", tp->name, sts); return; } /* Wait for thread to initialize. */ phase = (long int)que_Get(&sts, &tp->q_out, NULL, NULL); pwr_Assert(phase == 1); }
/** * \fn cmdHndlr_Init * \brief Init required handles and registries * * Init required handles and module variables, create the commands-queue and * register as the context-engine client. * * \note * \param pStadHandles - The driver modules handles * \return void * \sa */ void cmdHndlr_Init (TStadHandlesList *pStadHandles) { TCmdHndlrObj *pCmdHndlr = (TCmdHndlrObj *)(pStadHandles->hCmdHndlr); TI_UINT32 uNodeHeaderOffset; pCmdHndlr->hReport = pStadHandles->hReport; pCmdHndlr->hContext = pStadHandles->hContext; cmdInterpret_Init (pCmdHndlr->hCmdInterpret, pStadHandles); /* The offset of the queue-node-header from the commands structure entry is needed by the queue */ uNodeHeaderOffset = TI_FIELD_OFFSET(TConfigCommand, tQueNodeHdr); /* Create and initialize the commands queue */ pCmdHndlr->hCmdQueue = que_Create (pCmdHndlr->hOs, pCmdHndlr->hReport, COMMANDS_QUE_SIZE, uNodeHeaderOffset); /* Register to the context engine and get the client ID */ pCmdHndlr->uContextId = context_RegisterClient (pCmdHndlr->hContext, cmdHndlr_HandleCommands, (TI_HANDLE)pCmdHndlr, TI_FALSE, "COMMAND", sizeof("COMMAND")); if(pCmdHndlr->hReport != NULL) { os_setDebugOutputToLogger(TI_FALSE); } }
/** * \fn txnQ_Open * \brief Register functional driver to TxnQ * * Called by each functional driver using the TxnQ. * Save driver's info and create its queues. * Perform in critical section to prevent preemption from TxnDone. * * \note * \param hTxnQ - The module's object * \param uFuncId - The calling functional driver * \param uNumPrios - The number of queues/priorities * \param fTxnQueueDoneCb - The callback to call upon full transaction completion. * \param hCbHandle - The callback handle * \return RES_OK / RES_ERROR * \sa txnQ_Close */ EMcpfRes txnQ_Open (handle_t hTxnQ, const McpU32 uFuncId, const McpU32 uNumPrios, const TTxnQueueDoneCb fTxnQueueDoneCb, const handle_t hCbHandle) { TTxnQObj *pTxnQ = (TTxnQObj*) hTxnQ; McpU32 uNodeHeaderOffset; McpU32 i; if (uFuncId > TXN_MAX_FUNCTIONS || uNumPrios > TXN_MAX_PRIORITY) { MCPF_REPORT_ERROR(pTxnQ->hMcpf, QUEUE_MODULE_LOG, ("%s: Invalid Params! uFuncId = %d, uNumPrios = %d\n", __FUNCTION__, uFuncId, uNumPrios)); return RES_ERROR; } MCPF_ENTER_CRIT_SEC (pTxnQ->hMcpf); /* Save functional driver info */ pTxnQ->aFuncInfo[uFuncId].uNumPrios = uNumPrios; pTxnQ->aFuncInfo[uFuncId].fTxnQueueDoneCb = fTxnQueueDoneCb; pTxnQ->aFuncInfo[uFuncId].hCbHandle = hCbHandle; /* Set state as running, since the chip init state is awake. */ pTxnQ->aFuncInfo[uFuncId].eState = FUNC_STATE_RUNNING; /* Create the functional driver's queues. */ uNodeHeaderOffset = MCPF_FIELD_OFFSET(TTxnStruct, tTxnQNode); for (i = 0; i < uNumPrios; i++) { pTxnQ->aTxnQueues[uFuncId][i] = que_Create (pTxnQ->hMcpf, TXN_QUE_SIZE, uNodeHeaderOffset); if (pTxnQ->aTxnQueues[uFuncId][i] == NULL) { MCPF_REPORT_ERROR(pTxnQ->hMcpf, QUEUE_MODULE_LOG, ("%s: Queues creation failed!\n", __FUNCTION__)); return RES_ERROR; } } /* Update functions actual range (to optimize Txn selection loops - see txnQ_SelectTxn) */ if (uFuncId < pTxnQ->uMinFuncId) { pTxnQ->uMinFuncId = uFuncId; } if (uFuncId > pTxnQ->uMaxFuncId) { pTxnQ->uMaxFuncId = uFuncId; } MCPF_EXIT_CRIT_SEC (pTxnQ->hMcpf); MCPF_REPORT_INFORMATION(pTxnQ->hMcpf, QUEUE_MODULE_LOG, ("%s: Function %d registered successfully, uNumPrios = %d\n", __FUNCTION__, uFuncId, uNumPrios)); return RES_OK; }
TI_STATUS txnQ_Open (TI_HANDLE hTxnQ, TI_UINT32 uFuncId, TI_UINT32 uNumPrios, TTxnQueueDoneCb fTxnQueueDoneCb, TI_HANDLE hCbHandle) { TTxnQObj *pTxnQ = (TTxnQObj*) hTxnQ; TI_UINT32 uNodeHeaderOffset; TI_UINT32 i; if (uFuncId >= MAX_FUNCTIONS || uNumPrios > MAX_PRIORITY) { TRACE2(pTxnQ->hReport, REPORT_SEVERITY_ERROR, ": Invalid Params! uFuncId = %d, uNumPrios = %d\n", uFuncId, uNumPrios); return TI_NOK; } context_EnterCriticalSection (pTxnQ->hContext); /* Save functional driver info */ pTxnQ->aFuncInfo[uFuncId].uNumPrios = uNumPrios; pTxnQ->aFuncInfo[uFuncId].fTxnQueueDoneCb = fTxnQueueDoneCb; pTxnQ->aFuncInfo[uFuncId].hCbHandle = hCbHandle; pTxnQ->aFuncInfo[uFuncId].eState = FUNC_STATE_STOPPED; /* Create the functional driver's queues. */ uNodeHeaderOffset = TI_FIELD_OFFSET(TTxnStruct, tTxnQNode); for (i = 0; i < uNumPrios; i++) { pTxnQ->aTxnQueues[uFuncId][i] = que_Create (pTxnQ->hOs, pTxnQ->hReport, TXN_QUE_SIZE, uNodeHeaderOffset); if (pTxnQ->aTxnQueues[uFuncId][i] == NULL) { TRACE0(pTxnQ->hReport, REPORT_SEVERITY_ERROR, ": Queues creation failed!\n"); context_LeaveCriticalSection (pTxnQ->hContext); return TI_NOK; } } /* Update functions actual range (to optimize Txn selection loops - see txnQ_SelectTxn) */ if (uFuncId < pTxnQ->uMinFuncId) { pTxnQ->uMinFuncId = uFuncId; } if (uFuncId > pTxnQ->uMaxFuncId) { pTxnQ->uMaxFuncId = uFuncId; } context_LeaveCriticalSection (pTxnQ->hContext); TRACE2(pTxnQ->hReport, REPORT_SEVERITY_INFORMATION, ": Function %d registered successfully, uNumPrios = %d\n", uFuncId, uNumPrios); return TI_OK; }
/** * \fn txMgmtQ_Init * \brief Configure module with default settings * * Get other modules handles. * Init the Tx Mgmt queues. * Register as the context-engine client. * * \note * \param pStadHandles - The driver modules handles * \return void * \sa */ void txMgmtQ_Init (TStadHandlesList *pStadHandles) { TTxMgmtQ *pTxMgmtQ = (TTxMgmtQ *)(pStadHandles->hTxMgmtQ); TI_UINT32 uNodeHeaderOffset = TI_FIELD_OFFSET(TTxnStruct, tTxnQNode); int uQueId; /* configure modules handles */ pTxMgmtQ->hOs = pStadHandles->hOs; pTxMgmtQ->hReport = pStadHandles->hReport; pTxMgmtQ->hTxCtrl = pStadHandles->hTxCtrl; pTxMgmtQ->hTxPort = pStadHandles->hTxPort; pTxMgmtQ->hContext = pStadHandles->hContext; pTxMgmtQ->hTWD = pStadHandles->hTWD; pTxMgmtQ->bMgmtPortEnable = TI_TRUE; /* Port Default status is open (data-queues are disabled). */ pTxMgmtQ->eSmState = SM_STATE_CLOSE; /* SM default state is CLOSE. */ pTxMgmtQ->eTxConnState = TX_CONN_STATE_CLOSE; /* initialize tx Mgmt queues */ for (uQueId = 0; uQueId < NUM_OF_MGMT_QUEUES; uQueId++) { pTxMgmtQ->aQueues[uQueId] = que_Create (pTxMgmtQ->hOs, pTxMgmtQ->hReport, MGMT_QUEUES_DEPTH, uNodeHeaderOffset); /* If any Queues' allocation failed, print error, free TxMgmtQueue module and exit */ if (pTxMgmtQ->aQueues[uQueId] == NULL) { TRACE0(pTxMgmtQ->hReport, REPORT_SEVERITY_CONSOLE , "Failed to create queue\n"); WLAN_OS_REPORT(("Failed to create queue\n")); os_memoryFree (pTxMgmtQ->hOs, pTxMgmtQ, sizeof(TTxMgmtQ)); return; } pTxMgmtQ->aQueueBusy[uQueId] = TI_FALSE; /* aQueueBusy default is not busy. */ pTxMgmtQ->aQueueEnabledBySM[uQueId] = TI_FALSE; /* Queue is disabled by the SM (state is CLOSE). */ } /* Register to the context engine and get the client ID */ pTxMgmtQ->uContextId = context_RegisterClient (pTxMgmtQ->hContext, txMgmtQ_QueuesNotEmpty, (TI_HANDLE)pTxMgmtQ, TI_TRUE, "TX_MGMT", sizeof("TX_MGMT")); TRACE0(pTxMgmtQ->hReport, REPORT_SEVERITY_INIT, ".....Tx Mgmt Queue configured successfully\n"); }
void txnQ_Init (TI_HANDLE hTxnQ, TI_HANDLE hOs, TI_HANDLE hReport, TI_HANDLE hContext) { TTxnQObj *pTxnQ = (TTxnQObj*)hTxnQ; TI_UINT32 uNodeHeaderOffset; pTxnQ->hOs = hOs; pTxnQ->hReport = hReport; pTxnQ->hContext = hContext; /* Create the TxnDone queue. */ uNodeHeaderOffset = TI_FIELD_OFFSET(TTxnStruct, tTxnQNode); pTxnQ->hTxnDoneQueue = que_Create (pTxnQ->hOs, pTxnQ->hReport, TXN_DONE_QUE_SIZE, uNodeHeaderOffset); busDrv_Init (pTxnQ->hBusDrv, hReport); }
/** * \fn txnQ_Init * \brief Init module * * Init required handles and module variables, and create the TxnDone-queue. * * \note * \param hTxnQ - The module's object * \param hMcpf - Handle to Os framework * \return void * \sa */ void txnQ_Init (handle_t hTxnQ, const handle_t hMcpf) { TTxnQObj *pTxnQ = (TTxnQObj*)hTxnQ; McpU32 uNodeHeaderOffset; pTxnQ->hMcpf = hMcpf; /* Create the TxnDone queue. */ uNodeHeaderOffset = MCPF_FIELD_OFFSET(TTxnStruct, tTxnQNode); pTxnQ->hTxnDoneQueue = que_Create (pTxnQ->hMcpf, TXN_DONE_QUE_SIZE, uNodeHeaderOffset); if (pTxnQ->hTxnDoneQueue == NULL) { MCPF_REPORT_ERROR(pTxnQ->hMcpf, QUEUE_MODULE_LOG, ("%s: TxnDone queue creation failed!\n", __FUNCTION__)); } busDrv_Init (pTxnQ->hBusDrv); }
void txnQ_Init (TI_HANDLE hTxnQ, TI_HANDLE hOs, TI_HANDLE hReport, TI_HANDLE hContext) { TTxnQObj *pTxnQ = (TTxnQObj*)hTxnQ; TI_UINT32 uNodeHeaderOffset; pTxnQ->hOs = hOs; pTxnQ->hReport = hReport; pTxnQ->hContext = hContext; /* Create the TxnDone queue. */ uNodeHeaderOffset = TI_FIELD_OFFSET(TTxnStruct, tTxnQNode); pTxnQ->hTxnDoneQueue = que_Create (pTxnQ->hOs, pTxnQ->hReport, TXN_DONE_QUE_SIZE, uNodeHeaderOffset); if (pTxnQ->hTxnDoneQueue == NULL) { TRACE0(pTxnQ->hReport, REPORT_SEVERITY_ERROR, ": TxnDone queue creation failed!\n"); } busDrv_Init (pTxnQ->hBusDrv, hReport); }
/** * \fn txDataQ_Init * \brief Save required modules handles * * Save other modules handles. * * \note * \param pStadHandles - The driver modules handles * \return void * \sa */ void txDataQ_Init (TStadHandlesList *pStadHandles) { TTxDataQ *pTxDataQ = (TTxDataQ *)(pStadHandles->hTxDataQ); TI_UINT32 uNodeHeaderOffset = TI_FIELD_OFFSET(TTxnStruct, tTxnQNode); TI_UINT8 uQueId; /* save modules handles */ pTxDataQ->hContext = pStadHandles->hContext; pTxDataQ->hTxCtrl = pStadHandles->hTxCtrl; pTxDataQ->hOs = pStadHandles->hOs; pTxDataQ->hReport = pStadHandles->hReport; pTxDataQ->hTxMgmtQ = pStadHandles->hTxMgmtQ; pTxDataQ->hTWD = pStadHandles->hTWD; /* Configures the Port Default status to Close */ pTxDataQ->bDataPortEnable = TI_FALSE; /* Configures the LastQueId to zero => scheduler will strart from Queue 1*/ pTxDataQ->uLastQueId = 0; /* init the number of the Data queue to be used */ pTxDataQ->uNumQueues = MAX_NUM_OF_AC; /* init the max size of the Data queues */ pTxDataQ->aQueueMaxSize[QOS_AC_BE] = DATA_QUEUE_DEPTH_BE; pTxDataQ->aQueueMaxSize[QOS_AC_BK] = DATA_QUEUE_DEPTH_BK; pTxDataQ->aQueueMaxSize[QOS_AC_VI] = DATA_QUEUE_DEPTH_VI; pTxDataQ->aQueueMaxSize[QOS_AC_VO] = DATA_QUEUE_DEPTH_VO; /* Create the tx data queues */ for (uQueId = 0; uQueId < pTxDataQ->uNumQueues; uQueId++) { pTxDataQ->aQueues[uQueId] = que_Create (pTxDataQ->hOs, pTxDataQ->hReport, pTxDataQ->aQueueMaxSize[uQueId], uNodeHeaderOffset); /* If any Queues' allocation failed, print error, free TxDataQueue module and exit */ if (pTxDataQ->aQueues[uQueId] == NULL) { TRACE0(pTxDataQ->hReport, REPORT_SEVERITY_CONSOLE , "Failed to create queue\n"); WLAN_OS_REPORT(("Failed to create queue\n")); os_memoryFree (pTxDataQ->hOs, pTxDataQ, sizeof(TTxDataQ)); return; } /* Configure the Queues default values */ pTxDataQ->aQueueBusy[uQueId] = TI_FALSE; pTxDataQ->aNetStackQueueStopped[uQueId] = TI_FALSE; pTxDataQ->aTxSendPaceThresh[uQueId] = 1; } pTxDataQ->hTxSendPaceTimer = tmr_CreateTimer (pStadHandles->hTimer); if (pTxDataQ->hTxSendPaceTimer == NULL) { TRACE0(pTxDataQ->hReport, REPORT_SEVERITY_ERROR, "txDataQ_Init(): Failed to create hTxSendPaceTimer!\n"); return; } /* Register to the context engine and get the client ID */ pTxDataQ->uContextId = context_RegisterClient (pTxDataQ->hContext, txDataQ_RunScheduler, (TI_HANDLE)pTxDataQ, TI_TRUE, "TX_DATA", sizeof("TX_DATA")); }
/** * \fn twIf_Init * \brief Init module * * - Init required handles and module variables * - Create the TxnDone-queue * - Register to TxnQ * - Register to context module * * \note * \param hTwIf - The module's object * \param hXxx - Handles to other modules * \param fRecoveryCb - Callback function for recovery completed after TxnDone * \param hRecoveryCb - Handle for fRecoveryCb * \return void * \sa */ void twIf_Init(TI_HANDLE hTwIf, TI_HANDLE hReport, TI_HANDLE hContext, TI_HANDLE hTimer, TI_HANDLE hTxnQ, TRecoveryCb fRecoveryCb, TI_HANDLE hRecoveryCb) { TTwIfObj *pTwIf = (TTwIfObj *) hTwIf; TI_UINT32 uNodeHeaderOffset; TTxnStruct *pTxnHdr; /* The ELP transactions header (as used in the TxnQ API) */ pTwIf->hReport = hReport; pTwIf->hContext = hContext; pTwIf->hTimer = hTimer; pTwIf->hTxnQ = hTxnQ; pTwIf->fRecoveryCb = fRecoveryCb; pTwIf->hRecoveryCb = hRecoveryCb; /* Prepare ELP sleep transaction */ pTwIf->tElpTxnSleep.uElpData = ELP_CTRL_REG_SLEEP; pTxnHdr = &(pTwIf->tElpTxnSleep.tHdr); TXN_PARAM_SET(pTxnHdr, TXN_LOW_PRIORITY, TXN_FUNC_ID_WLAN, TXN_DIRECTION_WRITE, TXN_INC_ADDR) TXN_PARAM_SET_MORE(pTxnHdr, 0); /* Sleep is the last transaction! */ /* NOTE: Function id for single step will be replaced to 0 by the bus driver */ TXN_PARAM_SET_SINGLE_STEP(pTxnHdr, 1); /* ELP write is always single step (TxnQ is topped)! */ BUILD_TTxnStruct(pTxnHdr, ELP_CTRL_REG_ADDR, &(pTwIf->tElpTxnSleep.uElpData), sizeof(TI_UINT8), NULL, NULL) /* Prepare ELP awake transaction */ pTwIf->tElpTxnAwake.uElpData = ELP_CTRL_REG_AWAKE; pTxnHdr = &(pTwIf->tElpTxnAwake.tHdr); TXN_PARAM_SET(pTxnHdr, TXN_LOW_PRIORITY, TXN_FUNC_ID_WLAN, TXN_DIRECTION_WRITE, TXN_INC_ADDR) TXN_PARAM_SET_MORE(pTxnHdr, 1); /* NOTE: Function id for single step will be replaced to 0 by the bus driver */ TXN_PARAM_SET_SINGLE_STEP(pTxnHdr, 1); /* ELP write is always single step (TxnQ is topped)! */ BUILD_TTxnStruct(pTxnHdr, ELP_CTRL_REG_ADDR, &(pTwIf->tElpTxnAwake.uElpData), sizeof(TI_UINT8), NULL, NULL) /* Create the TxnDone queue. */ uNodeHeaderOffset = TI_FIELD_OFFSET(TTxnStruct, tTxnQNode); pTwIf->hTxnDoneQueue = que_Create(pTwIf->hOs, pTwIf->hReport, TXN_DONE_QUE_SIZE, uNodeHeaderOffset); if (pTwIf->hTxnDoneQueue == NULL) { TRACE0(pTwIf->hReport, REPORT_SEVERITY_ERROR, "twIf_Init: TxnDone queue creation failed!\n"); } /* Register to the context engine and get the client ID */ pTwIf->uContextId = context_RegisterClient(pTwIf->hContext, twIf_HandleTxnDone, hTwIf, TI_TRUE, "TWIF", sizeof("TWIF")); /* Allocate timer */ pTwIf->hPendRestartTimer = tmr_CreateTimer(hTimer); if (pTwIf->hPendRestartTimer == NULL) { TRACE0(pTwIf->hReport, REPORT_SEVERITY_ERROR, "twIf_Init: Failed to create PendRestartTimer!\n"); return; } pTwIf->bPendRestartTimerRunning = TI_FALSE; /* Register to TxnQ */ txnQ_Open(pTwIf->hTxnQ, TXN_FUNC_ID_WLAN, TXN_NUM_PRIORITYS, (TTxnQueueDoneCb) twIf_TxnDoneCb, hTwIf); /* Restart TwIf and TxnQ modules */ twIf_Restart(hTwIf); }
/** * \fn txMgmtQ_Init * \brief Configure module with default settings * * Get other modules handles. * Init the Tx Mgmt queues. * Register as the context-engine client. * * \note * \param pStadHandles - The driver modules handles * \return void * \sa */ void txMgmtQ_Init (TStadHandlesList *pStadHandles) { TTxMgmtQ *pTxMgmtQ = (TTxMgmtQ *)(pStadHandles->hTxMgmtQ); TI_UINT32 uNodeHeaderOffset = TI_FIELD_OFFSET(TTxnStruct, tTxnQNode); int uQueId; TMgmtLinkQ *pLinkQ; TI_UINT32 uHlid; /* configure modules handles */ pTxMgmtQ->hOs = pStadHandles->hOs; pTxMgmtQ->hReport = pStadHandles->hReport; pTxMgmtQ->hTxCtrl = pStadHandles->hTxCtrl; pTxMgmtQ->hTxDataQ = pStadHandles->hTxDataQ; pTxMgmtQ->hTxPort = pStadHandles->hTxPort; pTxMgmtQ->hContext = pStadHandles->hContext; pTxMgmtQ->hTWD = pStadHandles->hTWD; pTxMgmtQ->bMgmtPortEnable = TI_TRUE; /* Port Default status is open (data-queues are disabled). */ pTxMgmtQ->aMgmtAcBusy = TI_FALSE; /* Init busy flag per for Mgmt ccess category (same for MGMT and EAPOL) */ /* * init all queues in all links */ for (uHlid = 0; uHlid < WLANLINKS_MAX_LINKS; uHlid++) { pLinkQ = &pTxMgmtQ->aMgmtLinkQ[uHlid]; /* Link queues */ pLinkQ->eTxConnState = TX_CONN_STATE_CLOSE; pLinkQ->eState = SM_STATE_CLOSE; /* SM default state is CLOSE. */ pLinkQ->bSendEvent_NotEmpty = TI_FALSE; pLinkQ->bBusy = TI_FALSE; /* default is not busy */ pLinkQ->bEnabled = TI_FALSE; /* default is not enabled */ for (uQueId = 0; uQueId < NUM_OF_MGMT_QUEUES; uQueId++) { pLinkQ->aQueues[uQueId] = que_Create (pTxMgmtQ->hOs, pTxMgmtQ->hReport, LINK_MGMT_QUEUES_DEPTH, uNodeHeaderOffset); /* If any Queues' allocation failed, print error, free TxMgmtQueue module and exit */ if (pLinkQ->aQueues[uQueId] == NULL) { TRACE1(pTxMgmtQ->hReport, REPORT_SEVERITY_CONSOLE , "Failed to create queue for link %d\n", uHlid); WLAN_OS_REPORT(("Failed to create queue for link %d\n", uHlid)); os_memoryFree (pTxMgmtQ->hOs, pTxMgmtQ, sizeof(TTxMgmtQ)); return; } pLinkQ->aQenabled[uQueId] = TI_FALSE; /* Queue is disabled */ } } pTxMgmtQ->uLastHlid = 0; /* scheduler starts from first link */ /* Register to the context engine and get the client ID */ pTxMgmtQ->uContextId = context_RegisterClient (pTxMgmtQ->hContext, txMgmtQ_QueuesNotEmpty, (TI_HANDLE)pTxMgmtQ, TI_TRUE, "TX_MGMT", sizeof("TX_MGMT")); TRACE0(pTxMgmtQ->hReport, REPORT_SEVERITY_INIT, ".....Tx Mgmt Queue configured successfully\n"); }
/** * \fn txDataQ_Init * \brief Save required modules handles * * Save other modules handles. * * \note * \param pStadHandles - The driver modules handles * \return void * \sa */ void txDataQ_Init (TStadHandlesList *pStadHandles) { TTxDataQ *pTxDataQ = (TTxDataQ *)(pStadHandles->hTxDataQ); TI_UINT32 uNodeHeaderOffset = TI_FIELD_OFFSET(TTxnStruct, tTxnQNode); TI_UINT8 uQueId; TDataLinkQ *pLinkQ; TI_UINT32 uHlid; /* save modules handles */ pTxDataQ->hContext = pStadHandles->hContext; pTxDataQ->hTxCtrl = pStadHandles->hTxCtrl; pTxDataQ->hOs = pStadHandles->hOs; pTxDataQ->hReport = pStadHandles->hReport; pTxDataQ->hTxMgmtQ = pStadHandles->hTxMgmtQ; pTxDataQ->hTWD = pStadHandles->hTWD; /* Configures the Port Default status to Close */ pTxDataQ->bDataPortEnable = TI_FALSE; /* Configures the NextQueId to zero => scheduler will strart from Queue 1*/ pTxDataQ->uNextQueId = 0; pTxDataQ->uNextHlid = 0; /* init the number of the Data queue to be used */ pTxDataQ->uNumQueues = MAX_NUM_OF_AC; /* init the max size of the Data queues */ pTxDataQ->aQueueMaxSize[QOS_AC_BE] = DATA_QUEUE_DEPTH_BE; pTxDataQ->aQueueMaxSize[QOS_AC_BK] = DATA_QUEUE_DEPTH_BK; pTxDataQ->aQueueMaxSize[QOS_AC_VI] = DATA_QUEUE_DEPTH_VI; pTxDataQ->aQueueMaxSize[QOS_AC_VO] = DATA_QUEUE_DEPTH_VO; for (uQueId = 0; uQueId < pTxDataQ->uNumQueues; uQueId++) { pTxDataQ->aTxSendPaceThresh[uQueId] = 1; } /* * init all queues in all links */ for (uHlid = 0; uHlid < WLANLINKS_MAX_LINKS; uHlid++) { pLinkQ = &pTxDataQ->aDataLinkQ[uHlid]; /* Link queues */ pLinkQ->bBusy = TI_FALSE; /* default is not busy */ pLinkQ->bEnabled = TI_FALSE; /* default is not enabled */ /* Create the tx data queues */ for (uQueId = 0; uQueId < pTxDataQ->uNumQueues; uQueId++) { pLinkQ->aQueues[uQueId] = que_Create (pTxDataQ->hOs, pTxDataQ->hReport, pTxDataQ->aQueueMaxSize[uQueId], uNodeHeaderOffset); /* If any Queues' allocation failed, print error, free TxDataQueue module and exit */ if (pLinkQ->aQueues[uQueId] == NULL) { WLAN_OS_REPORT(("Failed to create queue\n")); os_memoryFree (pTxDataQ->hOs, pTxDataQ, sizeof(TTxDataQ)); return; } /* Configure the Queues default values */ pLinkQ->aNetStackQueueStopped[uQueId] = TI_FALSE; } } /* Init busy flag per AC (not also per link) */ for (uQueId = 0; uQueId < pTxDataQ->uNumQueues; uQueId++) { pTxDataQ->aQueueBusy[uQueId] = TI_FALSE; } pTxDataQ->hTxSendPaceTimer = tmr_CreateTimer (pStadHandles->hTimer); if (pTxDataQ->hTxSendPaceTimer == NULL) { return; } /* Register to the context engine and get the client ID */ pTxDataQ->uContextId = context_RegisterClient (pTxDataQ->hContext, txDataQ_RunScheduler, (TI_HANDLE)pTxDataQ, TI_TRUE, "TX_DATA", sizeof("TX_DATA")); }