static void EplEventkRxSignalHandlerCb ( tShbInstance pShbRxInstance_p, unsigned long ulDataSize_p) { tEplEvent *pEplEvent; tShbError ShbError; //unsigned long ulBlockCount; //unsigned long ulDataSize; BYTE abDataBuffer[sizeof(tEplEvent) + EPL_MAX_EVENT_ARG_SIZE]; // d.k.: abDataBuffer contains the complete tEplEvent structure // and behind this the argument // TGT_DBG_SIGNAL_TRACE_POINT(20); BENCHMARK_MOD_27_RESET(0); // copy data from event queue ShbError = ShbCirReadDataBlock (pShbRxInstance_p, &abDataBuffer[0], sizeof(abDataBuffer), &ulDataSize_p); if(ShbError != kShbOk) { // error goto exit goto Exit; } // resolve the pointer to the event structure pEplEvent = (tEplEvent *) abDataBuffer; // set Datasize pEplEvent->m_uiSize = (ulDataSize_p - sizeof(tEplEvent)); if(pEplEvent->m_uiSize > 0) { // set pointer to argument pEplEvent->m_pArg = &abDataBuffer[sizeof(tEplEvent)]; } else { //set pointer to NULL pEplEvent->m_pArg = NULL; } BENCHMARK_MOD_27_SET(0); // call processfunction EplEventkProcess(pEplEvent); Exit: return; }
static void EplEventkRxSignalHandlerCb ( tShbInstance pShbRxInstance_p, unsigned long ulDataSize_p) { tEplEvent *pEplEvent; tShbError ShbError; BYTE* pabDataBuffer; TGT_DBG_SIGNAL_TRACE_POINT(20); pabDataBuffer = &EplEventkInstance_g.m_abRxBuffer[0]; BENCHMARK_MOD_27_RESET(0); // copy data from event queue ShbError = ShbCirReadDataBlock (pShbRxInstance_p, pabDataBuffer, sizeof(EplEventkInstance_g.m_abRxBuffer), &ulDataSize_p); if(ShbError != kShbOk) { EplEventkPostError(kEplEventSourceEventk, kEplEventReadError, sizeof (ShbError), &ShbError); // error goto exit goto Exit; } // resolve the pointer to the event structure pEplEvent = (tEplEvent *) pabDataBuffer; // set Datasize pEplEvent->m_uiSize = (ulDataSize_p - sizeof(tEplEvent)); if(pEplEvent->m_uiSize > 0) { // set pointer to argument pEplEvent->m_pArg = &pabDataBuffer[sizeof(tEplEvent)]; } else { //set pointer to NULL pEplEvent->m_pArg = NULL; } BENCHMARK_MOD_27_SET(0); // call processfunction EplEventkProcess(pEplEvent); Exit: return; }
tEplKernel PUBLIC EplEventkPost(tEplEvent * pEvent_p) { tEplKernel Ret; #if EPL_USE_SHAREDBUFF != FALSE tShbError ShbError; tShbCirChunk ShbCirChunk; unsigned long ulDataSize; unsigned int fBufferCompleted; #endif Ret = kEplSuccessful; // the event must be posted by using the abBuffer // it is neede because the Argument must by copied // to the buffer too and not only the pointer #if EPL_USE_SHAREDBUFF != FALSE // 2006/08/03 d.k.: Event and argument are posted as separate chunks to the event queue. ulDataSize = sizeof(tEplEvent) + ((pEvent_p->m_pArg != NULL) ? pEvent_p->m_uiSize : 0); #endif // decide in which buffer the event have to write switch(pEvent_p->m_EventSink) { // kernelspace modules case kEplEventSinkSync: case kEplEventSinkNmtk: case kEplEventSinkDllk: case kEplEventSinkDllkCal: case kEplEventSinkPdok: case kEplEventSinkPdokCal: case kEplEventSinkErrk: { BENCHMARK_MOD_27_SET(2); #if (EPL_USE_SHAREDBUFF != FALSE) \ && (EPL_EVENT_USE_KERNEL_QUEUE != FALSE) // post message ShbError = ShbCirAllocDataBlock (EplEventkInstance_g.m_pShbKernelInternalInstance, &ShbCirChunk, ulDataSize); switch (ShbError) { case kShbOk: break; case kShbBufferFull: { EplEventkInstance_g.m_uiUserToKernelFullCount++; Ret = kEplEventPostError; goto Exit; } default: { EPL_DBGLVL_EVENTK_TRACE("EplEventkPost(): ShbCirAllocDataBlock(U2K) -> 0x%X\n", ShbError); Ret = kEplEventPostError; goto Exit; } } ShbError = ShbCirWriteDataChunk (EplEventkInstance_g.m_pShbKernelInternalInstance, &ShbCirChunk, pEvent_p, sizeof (tEplEvent), &fBufferCompleted); if (ShbError != kShbOk) { EPL_DBGLVL_EVENTK_TRACE("EplEventkPost(): ShbCirWriteDataChunk(U2K) -> 0x%X\n", ShbError); Ret = kEplEventPostError; goto Exit; } if (fBufferCompleted == FALSE) { ShbError = ShbCirWriteDataChunk (EplEventkInstance_g.m_pShbKernelInternalInstance, &ShbCirChunk, pEvent_p->m_pArg, (unsigned long) pEvent_p->m_uiSize, &fBufferCompleted); if ((ShbError != kShbOk) || (fBufferCompleted == FALSE)) { EPL_DBGLVL_EVENTK_TRACE("EplEventkPost(): ShbCirWriteDataChunk2(U2K) -> 0x%X\n", ShbError); Ret = kEplEventPostError; goto Exit; } } #else #if EPL_EVENT_USE_KERNEL_QUEUE == FALSE EplTgtEnableGlobalInterrupt(FALSE); #endif Ret = EplEventkProcess(pEvent_p); #if EPL_EVENT_USE_KERNEL_QUEUE == FALSE EplTgtEnableGlobalInterrupt(TRUE); #endif #endif BENCHMARK_MOD_27_RESET(2); break; } // userspace modules case kEplEventSinkNmtu: case kEplEventSinkNmtMnu: case kEplEventSinkSdoAsySeq: case kEplEventSinkApi: case kEplEventSinkDlluCal: case kEplEventSinkErru: { #if EPL_USE_SHAREDBUFF != FALSE // post message // BENCHMARK_MOD_27_SET(3); // 74 µs until reset ShbError = ShbCirAllocDataBlock (EplEventkInstance_g.m_pShbKernelToUserInstance, &ShbCirChunk, ulDataSize); if(ShbError != kShbOk) { EPL_DBGLVL_EVENTK_TRACE("EplEventkPost(): ShbCirAllocDataBlock(K2U) -> 0x%X\n", ShbError); Ret = kEplEventPostError; goto Exit; } ShbError = ShbCirWriteDataChunk (EplEventkInstance_g.m_pShbKernelToUserInstance, &ShbCirChunk, pEvent_p, sizeof (tEplEvent), &fBufferCompleted); if(ShbError != kShbOk) { EPL_DBGLVL_EVENTK_TRACE("EplEventkPost(): ShbCirWriteDataChunk(K2U) -> 0x%X\n", ShbError); Ret = kEplEventPostError; goto Exit; } if (fBufferCompleted == FALSE) { ShbError = ShbCirWriteDataChunk (EplEventkInstance_g.m_pShbKernelToUserInstance, &ShbCirChunk, pEvent_p->m_pArg, (unsigned long) pEvent_p->m_uiSize, &fBufferCompleted); if ((ShbError != kShbOk) || (fBufferCompleted == FALSE)) { EPL_DBGLVL_EVENTK_TRACE("EplEventkPost(): ShbCirWriteDataChunk2(K2U) -> 0x%X\n", ShbError); Ret = kEplEventPostError; goto Exit; } } // BENCHMARK_MOD_27_RESET(3); // 82 µs until ShbCirGetReadDataSize() in EplEventu #else Ret = EplEventuProcess(pEvent_p); #endif break; } default: { Ret = kEplEventUnknownSink; } }// end of switch(pEvent_p->m_EventSink) #if EPL_USE_SHAREDBUFF != FALSE Exit: #endif return Ret; }
tEplKernel PUBLIC EplEventkProcess(tEplEvent* pEvent_p) { tEplKernel Ret; tEplEventSource EventSource; Ret = kEplSuccessful; #if (EPL_USE_SHAREDBUFF != FALSE) \ && (EPL_EVENT_USE_KERNEL_QUEUE != FALSE) // error handling if event queue is full if (EplEventkInstance_g.m_uiUserToKernelFullCount > 0) { // UserToKernel event queue has run out of space -> kEplNmtEventInternComError #if(((EPL_MODULE_INTEGRATION) & (EPL_MODULE_NMTK)) != 0) tEplEvent Event; tEplNmtEvent NmtEvent; #endif // directly call NMTk process function, because event queue is full #if(((EPL_MODULE_INTEGRATION) & (EPL_MODULE_NMTK)) != 0) NmtEvent = kEplNmtEventInternComError; Event.m_EventSink = kEplEventSinkNmtk; Event.m_NetTime.m_dwNanoSec = 0; Event.m_NetTime.m_dwSec = 0; Event.m_EventType = kEplEventTypeNmtEvent; Event.m_pArg = &NmtEvent; Event.m_uiSize = sizeof(NmtEvent); Ret = EplNmtkProcess(&Event); #endif // NMT state machine changed to reset (i.e. NMT_GS_RESET_COMMUNICATION) // now, it is safe to reset the counter and empty the event queue ShbCirResetBuffer (EplEventkInstance_g.m_pShbUserToKernelInstance, 1000, NULL); EplEventkInstance_g.m_uiUserToKernelFullCount = 0; TGT_DBG_SIGNAL_TRACE_POINT(22); // also discard the current event (it doesn't matter if we lose another event) goto Exit; } #endif // check m_EventSink switch(pEvent_p->m_EventSink) { // NMT-Kernel-Modul case kEplEventSinkNmtk: { #if(((EPL_MODULE_INTEGRATION) & (EPL_MODULE_NMTK)) != 0) Ret = EplNmtkProcess(pEvent_p); if ((Ret != kEplSuccessful) && (Ret != kEplShutdown)) { EventSource = kEplEventSourceNmtk; // Error event for API layer EplEventkPostError(kEplEventSourceEventk, Ret, sizeof(EventSource), &EventSource); } #endif BENCHMARK_MOD_27_RESET(0); #if(((EPL_MODULE_INTEGRATION) & (EPL_MODULE_DLLK)) != 0) if ((pEvent_p->m_EventType == kEplEventTypeNmtEvent) && (*((tEplNmtEvent*)pEvent_p->m_pArg) == kEplNmtEventDllCeSoa)) { BENCHMARK_MOD_27_SET(0); #if(((EPL_MODULE_INTEGRATION) & (EPL_MODULE_DLLK)) != 0) // forward SoA event to DLLk module for cycle preprocessing Ret = EplDllkProcess(pEvent_p); if ((Ret != kEplSuccessful) && (Ret != kEplShutdown)) { EventSource = kEplEventSourceDllk; // Error event for API layer EplEventkPostError(kEplEventSourceEventk, Ret, sizeof(EventSource), &EventSource); } #endif BENCHMARK_MOD_27_RESET(0); } #endif break; } // events for Dllk module case kEplEventSinkDllk: { #if(((EPL_MODULE_INTEGRATION) & (EPL_MODULE_DLLK)) != 0) Ret = EplDllkProcess(pEvent_p); if ((Ret != kEplSuccessful) && (Ret != kEplShutdown)) { EventSource = kEplEventSourceDllk; // Error event for API layer EplEventkPostError(kEplEventSourceEventk, Ret, sizeof(EventSource), &EventSource); } #endif break; } // events for DllkCal module case kEplEventSinkDllkCal: { #if(((EPL_MODULE_INTEGRATION) & (EPL_MODULE_DLLK)) != 0) Ret = EplDllkCalProcess(pEvent_p); if ((Ret != kEplSuccessful) && (Ret != kEplShutdown)) { EventSource = kEplEventSourceDllk; // Error event for API layer EplEventkPostError(kEplEventSourceEventk, Ret, sizeof(EventSource), &EventSource); } #endif break; } #if (((EPL_MODULE_INTEGRATION) & (EPL_MODULE_PDOK)) != 0) // events for PDO CAL module case kEplEventSinkPdokCal: { Ret = EplPdokCalProcess(pEvent_p); if ((Ret != kEplSuccessful) && (Ret != kEplShutdown)) { EventSource = kEplEventSourcePdok; // Error event for API layer EplEventkPostError(kEplEventSourceEventk, Ret, sizeof(EventSource), &EventSource); } break; } #endif #if(((EPL_MODULE_INTEGRATION) & (EPL_MODULE_DLLK)) != 0) // events for Error handler module case kEplEventSinkErrk: { // only call error handler if DLL is present Ret = EplErrorHandlerkProcess(pEvent_p); if ((Ret != kEplSuccessful) && (Ret != kEplShutdown)) { EventSource = kEplEventSourceErrk; // Error event for API layer EplEventkPostError(kEplEventSourceEventk, Ret, sizeof(EventSource), &EventSource); } break; } #endif // unknown sink default: { Ret = kEplEventUnknownSink; // Error event for API layer EplEventkPostError(kEplEventSourceEventk, Ret, sizeof(pEvent_p->m_EventSink), &pEvent_p->m_EventSink); } } // end of switch(pEvent_p->m_EventSink) #if (EPL_USE_SHAREDBUFF != FALSE) \ && (EPL_EVENT_USE_KERNEL_QUEUE != FALSE) Exit: #endif return Ret; }