void drawUserMessage() { const float fMessageTypeColors[NUM_DRAW_MESSAGE_TYPES][3] = { { 0, 1, 0 }, /*NOTIFICATION_MESSAGE*/ { 1, 1, 0 }, /*WARNING_MESSAGE*/ { 1, 0, 0 }, /*ERROR_MESSAGE*/ { 1, 0, 0 }, /*FATAL_MESSAGE*/ }; if (isInKeyboardInputMode()) { drawCenteredMessage(GLUT_BITMAP_TIMES_ROMAN_24, WIN_SIZE_Y * 4 / 5, getCurrentKeyboardInputMessage(), 0, 1, 0); } static XnUInt64 nStartShowMessage = 0; if (g_DrawConfig.bShowMessage) { g_DrawConfig.bShowMessage = false; xnOSGetTimeStamp(&nStartShowMessage); } XnUInt64 nNow; xnOSGetTimeStamp(&nNow); if (nNow - nStartShowMessage < 3000) { drawCenteredMessage(GLUT_BITMAP_TIMES_ROMAN_24, WIN_SIZE_Y * 4 / 5, g_csUserMessage, fMessageTypeColors[g_DrawConfig.messageType][0], fMessageTypeColors[g_DrawConfig.messageType][1], fMessageTypeColors[g_DrawConfig.messageType][2]); } }
void XnAudioProcessor::ProcessWholePacket(const XnSensorProtocolResponseHeader* pHeader, const XnUChar* pData) { xnOSEnterCriticalSection(&m_pDevicePrivateData->hAudioBufferCriticalSection); // take write packet XnUChar* pWritePacket = m_pDevicePrivateData->pAudioBuffer + (m_pDevicePrivateData->nAudioWriteIndex * m_pDevicePrivateData->nAudioPacketSize); if (m_bDeleteChannel) { XnUInt16* pSamples = (XnUInt16*)pData; XnUInt16* pSamplesEnd = (XnUInt16*)(pData + pHeader->nBufSize); XnUInt16* pOutput = (XnUInt16*)pWritePacket; while (pSamples < pSamplesEnd) { *pOutput = *pSamples; pOutput++; // skip a sample pSamples += 2; } } else { // copy data xnOSMemCopy(pWritePacket, pData, pHeader->nBufSize); } // mark timestamp m_pDevicePrivateData->pAudioPacketsTimestamps[m_pDevicePrivateData->nAudioWriteIndex] = GetTimeStamp(pHeader->nTimeStamp); if (m_nLastPacketID % 10 == 0) { XnUInt64 nSysTime; xnOSGetTimeStamp(&nSysTime); xnDumpFileWriteString(m_pDevicePrivateData->BandwidthDump, "%llu,%s,%d,%d\n", nSysTime, "Audio", -1, m_nBytesReceived); m_nBytesReceived = 0; } // move write index forward m_pDevicePrivateData->nAudioWriteIndex = (m_pDevicePrivateData->nAudioWriteIndex + 1) % m_pDevicePrivateData->nAudioBufferNumOfPackets; // if write index got to read index (end of buffer), move read index forward (and loose a packet) if (m_pDevicePrivateData->nAudioWriteIndex == m_pDevicePrivateData->nAudioReadIndex) { m_pDevicePrivateData->nAudioReadIndex = (m_pDevicePrivateData->nAudioReadIndex + 1) % m_pDevicePrivateData->nAudioBufferNumOfPackets; } xnOSLeaveCriticalSection(&m_pDevicePrivateData->hAudioBufferCriticalSection); xnDumpFileWriteBuffer(m_AudioInDump, pData, pHeader->nBufSize); if (m_pDevicePrivateData->pAudioCallback != NULL) { m_pDevicePrivateData->pAudioCallback(m_pDevicePrivateData->pAudioCallbackCookie); } }
void XnSensorsManager::CleanUp() { // go over sensors list. each sensor that is not open by any session, and timeout has passed should // be closed and removed XnAutoCSLocker locker(m_hLock); XnUInt64 nNow; xnOSGetTimeStamp(&nNow); XnSensorsHash::Iterator it = m_sensors.Begin(); while (it != m_sensors.End()) { XnSensorsHash::Iterator curr = it; ++it; ReferencedSensor& sensor = curr->Value(); if (sensor.nRefCount == 0) { // if timeout have passed, or the device was disconnected, remote this sensor XnUInt64 nErrorState = XN_STATUS_OK; sensor.pInvoker->GetIntProperty(XN_MODULE_NAME_DEVICE, XN_MODULE_PROPERTY_ERROR_STATE, &nErrorState); if (nErrorState == XN_STATUS_DEVICE_NOT_CONNECTED || (nNow - sensor.nNoClientsTime) > m_noClientTimeout.GetValue()) { xnLogInfo(XN_MASK_SENSOR_SERVER, "No session holding sensor '%s'. Shutting down the sensor...", curr->Key(), m_noClientTimeout.GetValue()); XN_DELETE(sensor.pInvoker); m_sensors.Remove(curr); } } } }
void getCaptureMessage(char* pMessage) { switch (g_Capture.State) { case SHOULD_CAPTURE: { XnUInt64 nNow; xnOSGetTimeStamp(&nNow); nNow /= 1000; sprintf(pMessage, "Capturing will start in %u seconds...", g_Capture.nStartOn - (XnUInt32)nNow); } break; case CAPTURING: { int nChars = sprintf(pMessage, "* Recording! Press any key or use menu to stop *\nRecorded Frames: "); for (int i = 0; i < CAPTURE_STREAM_COUNT; ++i) { if (g_Capture.streams[i].bRecording) { nChars += sprintf(pMessage + nChars, "%s-%d ", g_Capture.streams[i].name, g_Capture.streams[i].getFrameFunc().getFrameIndex() - g_Capture.streams[i].startFrame); } } } break; default: pMessage[0] = 0; } }
XnStatus XnSensorServer::RemoveSession(XnSessionsList::ConstIterator it) { XnStatus nRetVal = XN_STATUS_OK; XnServerSession* pSession = *it; XnUInt32 nID = pSession->ID(); xnLogVerbose(XN_MASK_SENSOR_SERVER, "Removing client %u...", nID); { XnAutoCSLocker locker(m_hSessionsLock); nRetVal = m_sessions.Remove(it); XN_IS_STATUS_OK(nRetVal); if (m_sessions.IsEmpty()) { xnOSGetTimeStamp(&m_nLastSessionActivity); } } pSession->Free(); XN_DELETE(pSession); xnLogVerbose(XN_MASK_SENSOR_SERVER, "Client %u removed", nID); return (XN_STATUS_OK); }
void captureStart(int nDelay) { if ('\0' == g_Capture.csFileName[0]) { captureBrowse(0); // On some platforms a user can cancel capturing. Whenever he cancels // capturing, the gs_filePath[0] remains empty. if ('\0' == g_Capture.csFileName[0]) { return; } } openni::Status rc = g_Capture.recorder.create(g_Capture.csFileName); if (rc != openni::STATUS_OK) { displayError("Failed to create recorder!"); return; } XnUInt64 nNow; xnOSGetTimeStamp(&nNow); nNow /= 1000; g_Capture.nStartOn = (XnUInt32)nNow + nDelay; g_Capture.State = SHOULD_CAPTURE; }
void XnFrameStreamProcessor::OnEndOfFrame(const XnSensorProtocolResponseHeader* pHeader) { // write dump XnBuffer* pCurWriteBuffer = m_pTripleBuffer->GetWriteBuffer(); xnDumpWriteBuffer(m_InternalDump, pCurWriteBuffer->GetData(), pCurWriteBuffer->GetSize()); xnDumpClose(&m_InternalDump); xnDumpClose(&m_InDump); if (!m_bFrameCorrupted) { // mark the buffer as stable XnUInt64 nTimestamp = GetTimeStamp(pHeader->nTimeStamp); XnUInt32 nFrameID; m_pTripleBuffer->MarkWriteBufferAsStable(nTimestamp, &nFrameID); // let inheriting classes do their stuff OnFrameReady(nFrameID, nTimestamp); } else { // restart m_pTripleBuffer->GetWriteBuffer()->Reset(); } // log bandwidth XnUInt64 nSysTime; xnOSGetTimeStamp(&nSysTime); xnDumpWriteString(m_pDevicePrivateData->BandwidthDump, "%llu,%s,%d,%d\n", nSysTime, m_csName, GetCurrentFrameID(), m_nBytesReceived); // re-init dumps xnDumpInit(&m_InDump, m_csInDumpMask, NULL, "%s_%d.raw", m_csInDumpMask, GetCurrentFrameID()); xnDumpInit(&m_InternalDump, m_csInternalDumpMask, NULL, "%s_%d.raw", m_csInternalDumpMask, GetCurrentFrameID()); m_nBytesReceived = 0; }
XnStatus xnOSWaitForCondition(const XN_EVENT_HANDLE EventHandle, XnUInt32 nMilliseconds, XnConditionFunc pConditionFunc, void* pConditionData) { XnStatus nRetVal = XN_STATUS_OK; // take read start time (for timeout purposes) XnUInt64 nStarted; nRetVal = xnOSGetTimeStamp(&nStarted); XN_IS_STATUS_OK(nRetVal); XnBool bTimeout = FALSE; // as long as condition isn't met while (!pConditionFunc(pConditionData)) { // check if timeout occurred XnUInt64 nNow; nRetVal = xnOSGetTimeStamp(&nNow); XN_IS_STATUS_OK(nRetVal); if (nNow - nStarted > nMilliseconds) { bTimeout = TRUE; } else { // not yet. Wait for event to be set nRetVal = xnOSWaitEvent(EventHandle, (XnUInt32)(nMilliseconds - (nNow - nStarted))); if (nRetVal == XN_STATUS_OS_EVENT_TIMEOUT) { bTimeout = TRUE; } else if (nRetVal != XN_STATUS_OK) { xnLogWarning(XN_MASK_OS, "Failed waiting on event for condition..."); } } if (bTimeout) { return (XN_STATUS_OS_EVENT_TIMEOUT); } } // condition was met return (XN_STATUS_OK); }
void drawUserMessage() { static XnUInt64 nStartShowMessage = 0; if (g_DrawConfig.bShowMessage) { g_DrawConfig.bShowMessage = false; xnOSGetTimeStamp(&nStartShowMessage); } XnUInt64 nNow; xnOSGetTimeStamp(&nNow); if (nNow - nStartShowMessage < 3000) { drawCenteredMessage(GLUT_BITMAP_TIMES_ROMAN_24, WIN_SIZE_Y * 4 / 5, g_csUserMessage, 0, 1, 0); } }
XnBool XnSensorServer::CanShutdown() { XnUInt64 nNow; xnOSGetTimeStamp(&nNow); XnAutoCSLocker locker(m_hSessionsLock); return (!m_sensorsManager.HasOpenSensors() && m_sessions.IsEmpty() && (nNow - m_nLastSessionActivity) > m_sensorsManager.GetTimeout()); }
XN_C_API XnStatus xnOSGetHighResTimeStamp(XnUInt64* nTimeStamp) { XnStatus nRetVal = XN_STATUS_OK; nRetVal = xnOSGetTimeStamp(nTimeStamp); XN_IS_STATUS_OK(nRetVal); *nTimeStamp *= 1000; return (XN_STATUS_OK); }
XN_C_API XnStatus xnOSSleep(XnUInt32 nMilliseconds) { XnStatus nRetVal = XN_STATUS_OK; // no OS, so just cycle this time passed XnUInt64 nTill; nRetVal = xnOSGetTimeStamp(&nTill); XN_IS_STATUS_OK(nRetVal); nTill += nMilliseconds; XnUInt64 nNow; do { nRetVal = xnOSGetTimeStamp(&nNow); XN_IS_STATUS_OK(nRetVal); } while (nNow < nTill); // All is good... return (XN_STATUS_OK); }
XN_C_API XnStatus xnSchedulerAddTask(XnScheduler* pScheduler, XnUInt64 nInterval, XnTaskCallbackFuncPtr pCallback, void* pCallbackArg, XnScheduledTask** ppTask) { XnStatus nRetVal = XN_STATUS_OK; XN_VALIDATE_INPUT_PTR(pScheduler); XN_VALIDATE_INPUT_PTR(pCallback); XN_VALIDATE_OUTPUT_PTR(ppTask); // create node XnScheduledTask* pTask; XN_VALIDATE_ALLOC(pTask, XnScheduledTask); pTask->nInterval = nInterval; pTask->pCallback = pCallback; pTask->pCallbackArg = pCallbackArg; // calculate next execution XnUInt64 nNow; xnOSGetTimeStamp(&nNow); pTask->nNextTime = nNow + nInterval; pTask->pNextTask = NULL; // enter critical section nRetVal = xnOSEnterCriticalSection(&pScheduler->hCriticalSection); if (nRetVal != XN_STATUS_OK) { xnOSFree(pTask); return (nRetVal); } xnSchedulerAddTaskInternal(pScheduler, pTask); // leave critical section nRetVal = xnOSLeaveCriticalSection(&pScheduler->hCriticalSection); if (nRetVal != XN_STATUS_OK) { xnOSFree(pTask); return (nRetVal); } // notify that the list has changed nRetVal = xnOSSetEvent(pScheduler->hWakeThreadEvent); if (nRetVal != XN_STATUS_OK) { xnLogWarning(XN_MASK_SCHEDULER, "Failed setting event when adding task: %s", xnGetStatusString(nRetVal)); } *ppTask = pTask; return (XN_STATUS_OK); }
void XnFrameStreamProcessor::OnEndOfFrame(const XnSensorProtocolResponseHeader* pHeader) { // write dump XnBuffer* pCurWriteBuffer = m_pTripleBuffer->GetWriteBuffer(); xnDumpFileWriteBuffer(m_InternalDump, pCurWriteBuffer->GetData(), pCurWriteBuffer->GetSize()); xnDumpFileClose(m_InternalDump); xnDumpFileClose(m_InDump); if (!m_bFrameCorrupted) { // mark the buffer as stable XnUInt64 nTimestamp; if (m_pDevicePrivateData->pSensor->ShouldUseHostTimestamps()) { // use the host timestamp of the first packet nTimestamp = m_nFirstPacketTimestamp; } else { // use timestamp in last packet nTimestamp = CreateTimestampFromDevice(pHeader->nTimeStamp); } OniFrame* pFrame = m_pTripleBuffer->GetWriteFrame(); pFrame->timestamp = nTimestamp; XnUInt32 nFrameID; m_pTripleBuffer->MarkWriteBufferAsStable(&nFrameID); // let inheriting classes do their stuff OnFrameReady(nFrameID, nTimestamp); } else { // restart m_pTripleBuffer->GetWriteBuffer()->Reset(); } // log bandwidth XnUInt64 nSysTime; xnOSGetTimeStamp(&nSysTime); xnDumpFileWriteString(m_pDevicePrivateData->BandwidthDump, "%llu,%s,%d,%d\n", nSysTime, m_csName, GetCurrentFrameID(), m_nBytesReceived); // re-init dumps m_InDump = xnDumpFileOpen(m_csInDumpMask, "%s_%d.raw", m_csInDumpMask, GetCurrentFrameID()); m_InternalDump = xnDumpFileOpen(m_csInternalDumpMask, "%s_%d.raw", m_csInternalDumpMask, GetCurrentFrameID()); m_nBytesReceived = 0; }
void XnFrameStreamProcessor::ProcessPacketChunk(const XnSensorProtocolResponseHeader* pHeader, const XnUChar* pData, XnUInt32 nDataOffset, XnUInt32 nDataSize) { XN_PROFILING_START_SECTION("XnFrameStreamProcessor::ProcessPacketChunk"); // if first data from SOF packet if (pHeader->nType == m_nTypeSOF && nDataOffset == 0) { if (!m_bAllowDoubleSOF || pHeader->nPacketID != (m_nLastSOFPacketID + 1)) { XnUInt64 currOSTime; xnOSGetTimeStamp(&currOSTime); static const XnUInt32 halfSensorPeriod = 33/2; // in milliseconds if(currOSTime - m_nLastSOFTimestamp > 1000 / XnSensor::ms_SoftVideoMode.fps - halfSensorPeriod) { xnLogVerbose(XN_MASK_SENSOR_PROTOCOL, "%s: Processing frame %d, t = %d (dt = %d)", m_csName, pHeader->nPacketID, int(currOSTime), int(currOSTime - m_nLastSOFTimestamp)); m_nLastSOFPacketID = pHeader->nPacketID; OnStartOfFrame(pHeader); m_nLastSOFTimestamp = currOSTime; m_bProcessNextFrame = TRUE; } else { m_bProcessNextFrame = FALSE; } } } if(!m_bProcessNextFrame) return; if (!m_bFrameCorrupted) { xnDumpFileWriteBuffer(m_InDump, pData, nDataSize); ProcessFramePacketChunk(pHeader, pData, nDataOffset, nDataSize); } // if last data from EOF packet if (pHeader->nType == m_nTypeEOF && (nDataOffset + nDataSize) == pHeader->nBufSize) { OnEndOfFrame(pHeader); } XN_PROFILING_END_SECTION }
void getCaptureMessage(char* pMessage) { switch (g_Capture.State) { case SHOULD_CAPTURE: { XnUInt64 nNow; xnOSGetTimeStamp(&nNow); nNow /= 1000; sprintf(pMessage, "Capturing will start in %u seconds...", g_Capture.nStartOn - (XnUInt32)nNow); } break; case CAPTURING: sprintf(pMessage, "* Recording! * | Frames: %d | Press any key or use menu to stop...", g_Capture.nCapturedFrames); break; default: pMessage[0] = 0; } }
void captureRun() { XnStatus nRetVal = XN_STATUS_OK; if (g_Capture.State != SHOULD_CAPTURE) { return; } XnUInt64 nNow; xnOSGetTimeStamp(&nNow); nNow /= 1000; // check if time has arrived if ((XnInt64)nNow >= g_Capture.nStartOn) { // check if we need to discard first frame if (g_Capture.bSkipFirstFrame) { g_Capture.bSkipFirstFrame = false; } else { // start recording for (int i = 0; i < CAPTURE_STREAM_COUNT; ++i) { g_Capture.streams[i].bRecording = false; if (g_Capture.streams[i].isStreamOn() && g_Capture.streams[i].captureType != STREAM_DONT_CAPTURE) { nRetVal = g_Capture.recorder.attach(g_Capture.streams[i].getStream(), g_Capture.streams[i].captureType == STREAM_CAPTURE_LOSSY); START_CAPTURE_CHECK_RC(nRetVal, "add stream"); g_Capture.streams[i].bRecording = TRUE; g_Capture.streams[i].startFrame = g_Capture.streams[i].getFrameFunc().getFrameIndex(); } } nRetVal = g_Capture.recorder.start(); START_CAPTURE_CHECK_RC(nRetVal, "start recording"); g_Capture.State = CAPTURING; } } }
void XnSensorsManager::CleanUp() { // go over sensors list. each sensor that is not open by any session, and timeout has passed should // be closed and removed XnAutoCSLocker locker(m_hLock); XnUInt64 nNow; xnOSGetTimeStamp(&nNow); XnSensorsHash::Iterator it = m_sensors.Begin(); while (it != m_sensors.End()) { XnSensorsHash::Iterator curr = it; ++it; ReferencedSensor& sensor = curr->Value(); if (sensor.nRefCount == 0 && (nNow - sensor.nNoClientsTime) > m_noClientTimeout.GetValue()) { xnLogInfo(XN_MASK_SENSOR_SERVER, "No session holding sensor '%s' for %u ms. Shutting down...", curr->Key(), m_noClientTimeout.GetValue()); XN_DELETE(sensor.pInvoker); m_sensors.Remove(curr); } } }
XN_C_API XnStatus xnSchedulerRescheduleTask(XnScheduler* pScheduler, XnScheduledTask* pTask, XnUInt64 nInterval) { XnStatus nRetVal = XN_STATUS_OK; XN_VALIDATE_INPUT_PTR(pScheduler); XN_VALIDATE_INPUT_PTR(pTask); // enter critical section nRetVal = xnOSEnterCriticalSection(&pScheduler->hCriticalSection); XN_IS_STATUS_OK(nRetVal); // remove it from list XnSchedulerRemoveTaskInternal(pScheduler, pTask); pTask->nInterval = nInterval; // update its next execution XnUInt64 nNow; xnOSGetTimeStamp(&nNow); pTask->nNextTime = nNow + nInterval; // and add it back to the queue xnSchedulerAddTaskInternal(pScheduler, pTask); // leave critical section nRetVal = xnOSLeaveCriticalSection(&pScheduler->hCriticalSection); XN_IS_STATUS_OK(nRetVal); // notify that the list has changed nRetVal = xnOSSetEvent(pScheduler->hWakeThreadEvent); if (nRetVal != XN_STATUS_OK) { xnLogWarning(XN_MASK_SCHEDULER, "Failed setting event when rescheduling task: %s", xnGetStatusString(nRetVal)); } return (XN_STATUS_OK); }
void captureStart(int nDelay) { if (g_Capture.csFileName[0] == 0) { captureBrowse(0); } if (g_Capture.csFileName[0] == 0) return; if (g_Capture.pRecorder == NULL) { if (!captureOpenWriteDevice()) return; } XnUInt64 nNow; xnOSGetTimeStamp(&nNow); nNow /= 1000; g_Capture.nStartOn = (XnUInt32)nNow + nDelay; g_Capture.State = SHOULD_CAPTURE; }
void XnSensorsManager::ReleaseSensor(XnServerSensorInvoker* pInvoker) { XnAutoCSLocker locker(m_hLock); ReferencedSensor* pSensor; XnStatus nRetVal = m_sensors.Get(pInvoker->GetDevicePath(), pSensor); if (nRetVal != XN_STATUS_OK) { xnLogError(XN_MASK_SENSOR_SERVER, "Trying to release a sensor that is not in the map!"); return; } --pSensor->nRefCount; xnLogVerbose(XN_MASK_SENSOR_SERVER, "Sensor '%s' now has %u sessions", pInvoker->GetDevicePath(), pSensor->nRefCount); if (pSensor->nRefCount == 0) { // store current time. Then, in CleanUp() if timeout passed, sensor will be closed xnOSGetTimeStamp(&pSensor->nNoClientsTime); // do some clean-up (so that next client will behave as if it started the server) // This is a bit ugly, but we need to manually set back to default DEVICE properties // (we know there aren't any streams and clients, but the Device module always remains) nRetVal = pSensor->pInvoker->SetIntProperty(XN_MODULE_NAME_DEVICE, XN_MODULE_PROPERTY_FRAME_SYNC, (XnUInt64)FALSE); if (nRetVal != XN_STATUS_OK) { xnLogError(XN_MASK_SENSOR_SERVER, "Failed resetting FrameSync: %s", xnGetStatusString(nRetVal)); } nRetVal = pSensor->pInvoker->ConfigureModuleFromGlobalFile(XN_MODULE_NAME_DEVICE); if (nRetVal != XN_STATUS_OK) { xnLogError(XN_MASK_SENSOR_SERVER, "Failed configuring device from global config file: %s", xnGetStatusString(nRetVal)); } } }
XnStatus captureFrame() { XnStatus nRetVal = XN_STATUS_OK; if (g_Capture.State == SHOULD_CAPTURE) { XnUInt64 nNow; xnOSGetTimeStamp(&nNow); nNow /= 1000; if (nNow >= g_Capture.nStartOn) { g_Capture.nCapturedFrames = 0; g_Capture.State = CAPTURING; } } if (g_Capture.State == CAPTURING) { nRetVal = g_Capture.pRecorder->Record(); XN_IS_STATUS_OK(nRetVal); g_Capture.nCapturedFrames++; } return XN_STATUS_OK; }
/* This is the actual scheduler function. It is being run in its own thread. */ XN_THREAD_PROC xnSchedulerThreadFunc(XN_THREAD_PARAM pThreadParam) { XnScheduler* pScheduler = (XnScheduler*)pThreadParam; XnUInt64 nNow; while (!pScheduler->bStopThread) { // check when next task should be executed XnUInt64 nWait = XN_WAIT_INFINITE; XnScheduledTask* pTask = NULL; XnTaskCallbackFuncPtr pCallback = NULL; void* pCallbackArg = NULL; // check if something is in the list if (pScheduler->pFirst != NULL) { // enter critical section xnOSEnterCriticalSection(&pScheduler->hCriticalSection); pTask = pScheduler->pFirst; if (pTask != NULL) { xnOSGetTimeStamp(&nNow); if (pTask->nNextTime < nNow) { // task should be executed pCallback = pTask->pCallback; pCallbackArg = pTask->pCallbackArg; // remove it from the list pScheduler->pFirst = pTask->pNextTask; // calculate next time pTask->nNextTime += pTask->nInterval; // add it to the list again xnSchedulerAddTaskInternal(pScheduler, pTask); } else { nWait = pTask->nNextTime - nNow; } } // leave critical section xnOSLeaveCriticalSection(&pScheduler->hCriticalSection); if (pCallback != NULL) { // execute task (outside critical section) pCallback(pCallbackArg); // no need to wait (we don't know how much time did callback take) nWait = 0; } } // wait for a change of the list, or the time of the next task xnOSWaitEvent(pScheduler->hWakeThreadEvent, (XnUInt32)nWait); } XN_THREAD_PROC_RETURN(XN_STATUS_OK); }
XnStatus XnSensorServer::InitServer() { XnStatus nRetVal = XN_STATUS_OK; XnBool bEnableMultiUsers = FALSE; XnUInt32 nValue; if (XN_STATUS_OK == xnOSReadIntFromINI(m_strConfigFile, XN_SENSOR_SERVER_CONFIG_FILE_SECTION, XN_MODULE_PROPERTY_ENABLE_MULTI_USERS, &nValue)) { bEnableMultiUsers = (nValue == TRUE); } nRetVal = xnOSCreateNamedMutexEx(&m_hServerRunningMutex, XN_SENSOR_SERVER_RUNNING_MUTEX_NAME, bEnableMultiUsers); XN_IS_STATUS_OK(nRetVal); XnAutoMutexLocker serverRunningLock(m_hServerRunningMutex, XN_SENSOR_SERVER_RUNNING_MUTEX_TIMEOUT); nRetVal = serverRunningLock.GetStatus(); if (nRetVal != XN_STATUS_OK) { //This could mean there's another server/client that's frozen and they're jamming the mutex... xnLogError(XN_MASK_SENSOR_SERVER, "Failed to lock server mutex: %s - exiting.", xnGetStatusString(nRetVal)); XN_ASSERT(FALSE); return XN_STATUS_OS_MUTEX_TIMEOUT; } //From now on we're protected by m_hServerRunningMutex until we return from this function /*Create the Server Running event. This is created as a manual-reset event, because only the server resets it when it's shutting down. */ nRetVal = xnOSOpenNamedEventEx(&m_hServerRunningEvent, XN_SENSOR_SERVER_RUNNING_EVENT_NAME, bEnableMultiUsers); if (nRetVal != XN_STATUS_OK) { nRetVal = xnOSCreateNamedEventEx(&m_hServerRunningEvent, XN_SENSOR_SERVER_RUNNING_EVENT_NAME, TRUE, bEnableMultiUsers); XN_IS_STATUS_OK(nRetVal); } if (IsServerRunning()) { //Another server is already running. xnLogInfo(XN_MASK_SENSOR_SERVER, "Detected another server running - exiting."); xnOSCloseEvent(&m_hServerRunningEvent); m_hServerRunningEvent = NULL; return XN_STATUS_DEVICE_SERVER_ALREADY_RUNNING; } nRetVal = m_sensorsManager.Init(); XN_IS_STATUS_OK(nRetVal); // init network nRetVal = xnOSInitNetwork(); XN_IS_STATUS_OK(nRetVal); // create lock nRetVal = xnOSCreateCriticalSection(&m_hSessionsLock); XN_IS_STATUS_OK(nRetVal); // create the listen socket nRetVal = xnOSCreateSocket(XN_OS_TCP_SOCKET, XN_SENSOR_SERVER_IP_ADDRESS, XN_SENSOR_SERVER_PORT, &m_hListenSocket); XN_IS_STATUS_OK(nRetVal); // bind it nRetVal = xnOSBindSocket(m_hListenSocket); XN_IS_STATUS_OK(nRetVal); // start listening nRetVal = xnOSListenSocket(m_hListenSocket); XN_IS_STATUS_OK(nRetVal); xnLogVerbose(XN_MASK_SENSOR_SERVER, "Server is now listening"); /*Set the event to signal that the server is ready for requests. We do this AFTER we start listening so the clients can wait on the event and then connect to the server socket. */ nRetVal = xnOSSetEvent(m_hServerRunningEvent); XN_IS_STATUS_OK(nRetVal); xnOSGetTimeStamp(&m_nLastSessionActivity); return (XN_STATUS_OK); }
XnStatus XnSensorClient::CreateIOStreamImpl(const XnChar *strConnectionString, XnIOStream *&pStream) { XnStatus nRetVal = XN_STATUS_OK; nRetVal = xnOSCreateSocket(XN_OS_TCP_SOCKET, XN_SENSOR_SERVER_IP_ADDRESS, XN_SENSOR_SERVER_PORT, &m_hSocket); XN_IS_STATUS_OK(nRetVal); // connect to server XnUInt64 nStart; xnOSGetTimeStamp(&nStart); nRetVal = XN_STATUS_OS_NETWORK_TIMEOUT; for (XnUInt32 nRetries = 0; (nRetries < XN_SENSOR_CLIENT_CONNECT_RETRIES) && (nRetVal != XN_STATUS_OK); nRetries++) { nRetVal = xnOSConnectSocket(m_hSocket, XN_SENSOR_CLIENT_WAIT_FOR_SERVER); } if (nRetVal == XN_STATUS_OS_NETWORK_TIMEOUT) { xnLogError(XN_MASK_SENSOR_CLIENT, "Got timeout waiting for server"); return nRetVal; } else if (nRetVal != XN_STATUS_OK) { xnLogError(XN_MASK_SENSOR_CLIENT, "Got an error trying to connect to server socket: %s", xnGetStatusString(nRetVal)); return nRetVal; } XnIONetworkStream *pNetworkStream = XN_NEW(XnIONetworkStream, m_hSocket); if (pNetworkStream == NULL) { xnOSCloseSocket(m_hSocket); return XN_STATUS_ALLOC_FAILED; } pNetworkStream->SetReadTimeout(XN_SENSOR_CLIENT_READ_TIMEOUT); pStream = pNetworkStream; // create outgoing data packer (incoming is created by base class) m_pOutgoingPacker = XN_NEW(XnDataPacker, pNetworkStream, XN_SENSOR_SERVER_CONFIG_PACKER_SIZE); if (m_pOutgoingPacker == NULL) { XN_DELETE(pNetworkStream); xnOSCloseSocket(m_hSocket); return XN_STATUS_ALLOC_FAILED; } nRetVal = m_pOutgoingPacker->Init(); if (nRetVal != XN_STATUS_OK) { XN_DELETE(pNetworkStream); XN_DELETE(m_pOutgoingPacker); xnOSCloseSocket(m_hSocket); return nRetVal; } // send server a request to open the sensor nRetVal = m_pOutgoingPacker->WriteCustomData(XN_SENSOR_SERVER_MESSAGE_OPEN_SENSOR, strConnectionString, strlen(strConnectionString) + 1); if (nRetVal != XN_STATUS_OK) { XN_DELETE(pNetworkStream); XN_DELETE(m_pOutgoingPacker); xnOSCloseSocket(m_hSocket); return nRetVal; } return (XN_STATUS_OK); }
void XnPSCompressedDepthProcessor::ProcessFramePacketChunk(const XnSensorProtocolResponseHeader* pHeader, const XnUChar* pData, XnUInt32 nDataOffset, XnUInt32 nDataSize) { XN_PROFILING_START_SECTION("XnPSCompressedDepthProcessor::ProcessFramePacketChunk") XnBuffer* pWriteBuffer = GetWriteBuffer(); const XnUChar* pBuf = NULL; XnUInt32 nBufSize = 0; // check if we have bytes stored from previous calls if (m_RawData.GetSize() > 0) { // we have no choice. We need to append current buffer to previous bytes if (m_RawData.GetFreeSpaceInBuffer() < nDataSize) { xnLogWarning(XN_MASK_SENSOR_PROTOCOL_DEPTH, "Bad overflow depth! %d", m_RawData.GetSize()); FrameIsCorrupted(); } else { m_RawData.UnsafeWrite(pData, nDataSize); } pBuf = m_RawData.GetData(); nBufSize = m_RawData.GetSize(); } else { // we can process the data directly pBuf = pData; nBufSize = nDataSize; } XnUInt32 nOutputSize = pWriteBuffer->GetFreeSpaceInBuffer(); XnUInt32 nWrittenOutput = nOutputSize; XnUInt32 nActualRead = 0; XnBool bLastPart = pHeader->nType == XN_SENSOR_PROTOCOL_RESPONSE_DEPTH_END && (nDataOffset + nDataSize) == pHeader->nBufSize; XnStatus nRetVal = UncompressDepthPS(pBuf, nBufSize, (XnUInt16*)pWriteBuffer->GetUnsafeWritePointer(), &nWrittenOutput, &nActualRead, bLastPart); if (nRetVal != XN_STATUS_OK) { FrameIsCorrupted(); static XnUInt64 nLastPrinted = 0; XnUInt64 nCurrTime; xnOSGetTimeStamp(&nCurrTime); if (nOutputSize != 0 || (nCurrTime - nLastPrinted) > 1000) { xnLogWarning(XN_MASK_SENSOR_PROTOCOL_DEPTH, "Uncompress depth failed: %s. Input Size: %u, Output Space: %u, Last Part: %d.", xnGetStatusString(nRetVal), nBufSize, nOutputSize, bLastPart); xnOSGetTimeStamp(&nLastPrinted); } } pWriteBuffer->UnsafeUpdateSize(nWrittenOutput); nBufSize -= nActualRead; m_RawData.Reset(); // if we have any bytes left, keep them for next time if (nBufSize > 0) { pBuf += nActualRead; m_RawData.UnsafeWrite(pBuf, nBufSize); } XN_PROFILING_END_SECTION }