//------------------------------------------------------------------------------
static void * eventThread(void *arg)
{
    struct timespec         curTime, timeout;
    tEventkCalInstance*     pInstance = (tEventkCalInstance*)arg;

    while (!pInstance->fStopThread)
    {
        clock_gettime(CLOCK_REALTIME, &curTime);
        timeout.tv_sec = 0;
        timeout.tv_nsec = 50000 * 1000;
        TIMESPECADD(&timeout, &curTime);

        if (sem_timedwait(pInstance->semKernelData, &timeout) == 0)
        {
            /* first handle kernel internal events --> higher priority! */
            if (eventkcal_getEventCountCircbuf(kEventQueueKInt) > 0)
            {
                eventkcal_processEventCircbuf(kEventQueueKInt);
            }
            else
            {
                if (eventkcal_getEventCountCircbuf(kEventQueueU2K) > 0)
                {
                    eventkcal_processEventCircbuf(kEventQueueU2K);
                }
            }
        }
    }

    pInstance->fStopThread = FALSE;
    return 0;
}
//------------------------------------------------------------------------------
void eventkcal_process(void)
{
    tOplkError  ret;
    tEvent*     pEvent;
    size_t      readSize = sizeof(aRxBuffer_l);

    if (instance_l.fInitialized == FALSE)
        return;

    if (eventkcal_getEventCountCircbuf(kEventQueueKInt) > 0)
    {
        ret = eventkcal_getEventCircbuf(kEventQueueKInt, aRxBuffer_l, &readSize);
        if (ret == kErrorOk)
        {
            pEvent = (tEvent*)aRxBuffer_l;
            pEvent->eventArgSize = (readSize - sizeof(tEvent));

            if (pEvent->eventArgSize > 0)
                pEvent->eventArg.pEventArg = &aRxBuffer_l[sizeof(tEvent)];
            else
                pEvent->eventArg.pEventArg = NULL;

            if (!eventSinkIsKernel(pEvent->eventSink))
            {
                // Events from the kernel-internal queue to the user layer
                // have to be posted to the kernel-to-user queue finally.
                ret = eventkcal_postEventCircbuf(kEventQueueK2U, pEvent);
                if (ret != kErrorOk)
                {
                    tEventQueue eventQueue = kEventQueueK2U;
                    // Forward error to API
                    eventk_postError(kEventSourceEventk,
                                     ret,
                                     sizeof(eventQueue),
                                     &eventQueue);
                }
            }
            else
            {
                // Events from the kernel-internal queue to kernel event sinks
                // can be processed directly.
                ret = eventk_process(pEvent);
            }
        }
    }
    else
    {
        if (eventkcal_getEventCountCircbuf(kEventQueueU2K) > 0)
        {
            ret = eventkcal_processEventCircbuf(kEventQueueU2K);
        }
    }
}
//------------------------------------------------------------------------------
void eventkcal_getEventForUser(void* pEvent_p, size_t* pSize_p)
{
    tOplkError    error;
    BOOL          fRet;
    UINT32        timeout = 500;

    // Check parameter validity
    ASSERT(pEvent_p != NULL);
    ASSERT(pSize_p != NULL);

    if (!instance_l.fInitialized)
        return;

    fRet = NdisWaitEvent(&instance_l.userWaitEvent, timeout);

    if (fRet && (instance_l.userEventCount == 0))
    {
        NdisResetEvent(&instance_l.userWaitEvent);
        return;
    }

    NdisResetEvent(&instance_l.userWaitEvent);

    // Kernel-to-user queue is processed with higher priority.
    if (eventkcal_getEventCountCircbuf(kEventQueueK2U) > 0)
    {
        NdisInterlockedDecrement(&instance_l.userEventCount);

        error = eventkcal_getEventCircbuf(kEventQueueK2U, pEvent_p, pSize_p);
        if ((error != kErrorOk) || (pEvent_p == NULL))
        {
            DEBUG_LVL_ERROR_TRACE("%s() Error reading K2U events %d!\n", __func__, error);
        }

        return;
    }
    else if (eventkcal_getEventCountCircbuf(kEventQueueUInt) > 0)
    {
        NdisInterlockedDecrement(&instance_l.userEventCount);

        error = eventkcal_getEventCircbuf(kEventQueueUInt, pEvent_p, pSize_p);
        if (error != kErrorOk)
        {
            DEBUG_LVL_ERROR_TRACE("%s() Error reading UINT events %d!\n", __func__, error);
            return;
        }
    }
}
//------------------------------------------------------------------------------
static void eventThread(void* pArg)
{
    INT         timeout = 50;
    PKTHREAD    thread;
    BOOL        fRet = FALSE;

    UNUSED_PARAMETER(pArg);

    // increase the priority of the thread
    thread = KeGetCurrentThread();
    KeSetPriorityThread(thread, HIGH_PRIORITY);

    instance_l.fThreadIsRunning = TRUE;

    while (!instance_l.fStopThread)
    {
        fRet = NdisWaitEvent(&instance_l.kernelWaitEvent, timeout);
        if (fRet && (instance_l.kernelEventCount == 0))
        {
            // Timeout occurred, continue to wait.
            NdisResetEvent(&instance_l.kernelWaitEvent);
            continue;
        }

        NdisResetEvent(&instance_l.kernelWaitEvent);

        /* first handle all kernel internal events --> higher priority! */
        while (eventkcal_getEventCountCircbuf(kEventQueueKInt) > 0)
        {
            eventkcal_processEventCircbuf(kEventQueueKInt);
            NdisInterlockedDecrement(&instance_l.kernelEventCount);
        }

        if (eventkcal_getEventCountCircbuf(kEventQueueU2K) > 0)
        {
            eventkcal_processEventCircbuf(kEventQueueU2K);
            NdisInterlockedDecrement(&instance_l.kernelEventCount);
        }
    }

    instance_l.fThreadIsRunning = FALSE;
}