// this function is called each frame void glutDisplay (void) { glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Setup the OpenGL viewpoint glMatrixMode(GL_PROJECTION); glPushMatrix(); glLoadIdentity(); xn::SceneMetaData sceneMD; xn::DepthMetaData depthMD; g_DepthGenerator.GetMetaData(depthMD); glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0); glDisable(GL_TEXTURE_2D); if (!g_bPause) { // Read next available data g_Context.WaitAndUpdateAll(); } // Process the data g_DepthGenerator.GetMetaData(depthMD); g_UserGenerator.GetUserPixels(0, sceneMD); DrawDepthMap(depthMD, sceneMD); glutSwapBuffers(); }
void StartCapture() { char recordFile[256] = {0}; time_t rawtime; struct tm *timeinfo; time(&rawtime); timeinfo = localtime(&rawtime); XnUInt32 size; xnOSStrFormat(recordFile, sizeof(recordFile)-1, &size, "%d_%02d_%02d[%02d_%02d_%02d].oni", timeinfo->tm_year + 1900, timeinfo->tm_mon + 1, timeinfo->tm_mday, timeinfo->tm_hour, timeinfo->tm_min, timeinfo->tm_sec); if (g_pRecorder != NULL) { StopCapture(); } XnStatus nRetVal = XN_STATUS_OK; g_pRecorder = new xn::Recorder; g_Context.CreateAnyProductionTree(XN_NODE_TYPE_RECORDER, NULL, *g_pRecorder); START_CAPTURE_CHECK_RC(nRetVal, "Create recorder"); nRetVal = g_pRecorder->SetDestination(XN_RECORD_MEDIUM_FILE, recordFile); START_CAPTURE_CHECK_RC(nRetVal, "set destination"); nRetVal = g_pRecorder->AddNodeToRecording(g_DepthGenerator, XN_CODEC_16Z_EMB_TABLES); START_CAPTURE_CHECK_RC(nRetVal, "add node"); g_bRecord = true; }
// this function is called each frame void glutDisplay (void) { glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Setup the OpenGL viewpoint glMatrixMode(GL_PROJECTION); glPushMatrix(); glLoadIdentity(); XnMapOutputMode mode; g_DepthGenerator.GetMapOutputMode(mode); #ifdef USE_GLUT glOrtho(0, mode.nXRes, mode.nYRes, 0, -1.0, 1.0); #elif defined(USE_GLES) glOrthof(0, mode.nXRes, mode.nYRes, 0, -1.0, 1.0); #endif glDisable(GL_TEXTURE_2D); if (!g_bPause) { // Read next available data g_Context.WaitOneUpdateAll(g_DepthGenerator); // Update NITE tree g_pSessionManager->Update(&g_Context); #ifdef USE_GLUT PrintSessionState(g_SessionState); #endif } #ifdef USE_GLUT glutSwapBuffers(); #endif }
// this function is called each frame void glutDisplay (void) { // Read next available data g_Context.WaitAnyUpdateAll(); // Process the data g_pSessionManager->Update(&g_Context); glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Setup the OpenGL viewpoint glMatrixMode(GL_PROJECTION); glPushMatrix(); glLoadIdentity(); #ifdef USE_GLUT glOrtho(0, GL_WIN_SIZE_X, 0, GL_WIN_SIZE_Y, -1.0, 1.0); #else glOrthof(0, GL_WIN_SIZE_X, 0, GL_WIN_SIZE_Y, -1.0, 1.0); #endif glDisable(GL_TEXTURE_2D); // Draw the TrackPad DrowTrackPad(); #ifdef USE_GLUT glutSwapBuffers(); #endif }
void Loop(void) { XnStatus nRetVal = XN_STATUS_OK; while (g_notDone) { if ((nRetVal = g_context.WaitOneUpdateAll(g_depth)) != XN_STATUS_OK) //if ((nRetVal = g_context.WaitAndUpdateAll()) != XN_STATUS_OK) { fprintf(stderr,"Could not update data: %s\n", xnGetStatusString(nRetVal)); continue; } if (g_haveDepth) { const XnDepthPixel* pDepthMap = g_depth.GetDepthMap(); ProcessDepthFrame(pDepthMap, g_depthWidth, g_depthHeight); FindFingertip(); } if (g_haveImage) { const XnRGB24Pixel* pImageMap = g_image.GetRGB24ImageMap(); ProcessImageFrame(pImageMap, g_depthWidth, g_depthHeight); } ShowFrame(); CheckKeys(); } }
bool SetupIR(xn::Context& g_context) { XnStatus nRetVal = XN_STATUS_OK; /* if ((nRetVal = g_ir.Create(g_context))!= XN_STATUS_OK) { printf("Could not create ir generator: %s\n", xnGetStatusString(nRetVal)); return FALSE; } */ if ((nRetVal = g_context.FindExistingNode(XN_NODE_TYPE_IR, g_ir)) != XN_STATUS_OK) { fprintf(stderr,"Could not find ir sensor: %s\n", xnGetStatusString(nRetVal)); return FALSE; } XnMapOutputMode mapMode; mapMode.nXRes = XN_VGA_X_RES; mapMode.nYRes = XN_VGA_Y_RES; mapMode.nFPS = 30; if ((nRetVal = g_ir.SetMapOutputMode(mapMode)) != XN_STATUS_OK) { fprintf(stderr,"Could not set ir mode: %s\n", xnGetStatusString(nRetVal)); return FALSE; } g_ir.GetMetaData(g_irMD); return TRUE; }
bool SetupImage(xn::Context& g_context) { XnStatus nRetVal = XN_STATUS_OK; fprintf(stderr,"Setting up the image generator\n"); if ((nRetVal = g_image.Create(g_context))!= XN_STATUS_OK) { printf("Could not create depth generator: %s\n", xnGetStatusString(nRetVal)); return FALSE; } if ((nRetVal = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image)) != XN_STATUS_OK) { fprintf(stderr,"Could not find image sensor: %s\n", xnGetStatusString(nRetVal)); return FALSE; } XnMapOutputMode mapMode; mapMode.nXRes = XN_VGA_X_RES; mapMode.nYRes = XN_VGA_Y_RES; mapMode.nFPS = 30; if ((nRetVal = g_image.SetMapOutputMode(mapMode)) != XN_STATUS_OK) { fprintf(stderr,"Could not set image mode: %s\n", xnGetStatusString(nRetVal)); return FALSE; } return TRUE; }
int main(int argc, char **argv) { XnStatus rc = XN_STATUS_OK; xn::EnumerationErrors errors; rc = g_Context.InitFromXmlFile(SAMPLE_XML_PATH); CHECK_ERRORS(rc, errors, "InitFromXmlFile"); CHECK_RC(rc, "InitFromXml"); rc = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator); CHECK_RC(rc, "Find depth generator"); rc = g_Context.FindExistingNode(XN_NODE_TYPE_SCENE, g_SceneAnalyzer); CHECK_RC(rc, "Find scene analyzer"); rc = g_Context.StartGeneratingAll(); CHECK_RC(rc, "StartGenerating"); #ifdef USE_GLUT glInit(&argc, argv); glutMainLoop(); #elif defined(USE_GLES) if (!opengles_init(GL_WIN_SIZE_X, GL_WIN_SIZE_Y, &display, &surface, &context)) { printf("Error initing opengles\n"); CleanupExit(); } glDisable(GL_DEPTH_TEST); // glEnable(GL_TEXTURE_2D); glEnableClientState(GL_VERTEX_ARRAY); glDisableClientState(GL_COLOR_ARRAY); while ((!_kbhit()) && (!g_bQuit)) { glutDisplay(); eglSwapBuffers(display, surface); } opengles_shutdown(display, surface, context); CleanupExit(); #endif }
bool open(void) { // 途中で初期化に失敗したときに、適切にクリーンアップされるようにすべき XnStatus status = context_.InitFromXmlFile(Config_file); if (status != XN_STATUS_OK) { fprintf(stderr, "InitFromXmlFile() failed.\n"); return false; } status = context_.FindExistingNode(XN_NODE_TYPE_DEPTH, depth_); if (status != XN_STATUS_OK) { fprintf(stderr, "FindExistingNode() failed.\n"); return false; } return true; }
void Context_Shutdown_wrapped(xn::Context& self) { #ifdef _DEBUG PyCout << "Shutting down OpenNI.." << std::endl; #endif self.Shutdown(); }
int initOpenNI(const XnChar* fname) { XnStatus nRetVal = XN_STATUS_OK; // initialize context nRetVal = xnContext.InitFromXmlFile(fname); CHECK_RC(nRetVal, "InitFromXmlFile"); // initialize depth generator nRetVal = xnContext.FindExistingNode(XN_NODE_TYPE_DEPTH, xnDepthGenerator); CHECK_RC(nRetVal, "FindExistingNode(XN_NODE_TYPE_DEPTH)"); // initialize image generator nRetVal = xnContext.FindExistingNode(XN_NODE_TYPE_IMAGE, xnImgeGenertor); CHECK_RC(nRetVal, "FindExistingNode(XN_NODE_TYPE_IMAGE)"); return nRetVal; }
bool kinect_reader2::init() { nRetVal = XN_STATUS_OK; nRetVal = g_Context.Init(); if (nRetVal != XN_STATUS_OK) { printf("Creating context failed: %s\n", xnGetStatusString(nRetVal)); return false; } nRetVal = g_DepthGenerator.Create(g_Context); if(nRetVal != XN_STATUS_OK) { printf("Creating depth generator failed: %s\n", xnGetStatusString(nRetVal)); return false; } g_DepthGenerator.GetMirrorCap().SetMirror(true); nRetVal = g_UserGenerator.Create(g_Context); if(nRetVal != XN_STATUS_OK) { printf("Creating user generator failed: %s\n", xnGetStatusString(nRetVal)); return false; } XnCallbackHandle hUserCallbacks, hCalibrationStart, hCalibrationComplete, hPoseDetected; if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON)) { printf("Supplied user generator doesn't support skeleton\n"); return false; } g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks); g_UserGenerator.GetSkeletonCap().RegisterToCalibrationStart(UserCalibration_CalibrationStart, NULL, hCalibrationStart); g_UserGenerator.GetSkeletonCap().RegisterToCalibrationComplete(UserCalibration_CalibrationComplete, NULL, hCalibrationComplete); if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION)) { printf("Pose required, but not supported\n"); return false; } g_UserGenerator.GetPoseDetectionCap().RegisterToPoseDetected(UserPose_PoseDetected, NULL, hPoseDetected); g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL); g_Context.StartGeneratingAll(); return true; }
void CleanupExit(){ g_scriptNode.Release(); g_DepthGenerator.Release(); g_UserGenerator.Release(); g_Player.Release(); g_Context.Release(); exit (1); }
void CleanupExit() { g_ScriptNode.Release(); g_DepthGenerator.Release(); g_GestureGenerator.Release(); g_Context.Release(); exit (1); }
DeviceONI::DeviceONI(xn::Context& context, const std::string& file_name, bool repeat, bool streaming) throw (OpenNIException) : OpenNIDevice(context) , streaming_ (streaming) , depth_stream_running_ (false) , image_stream_running_ (false) , ir_stream_running_ (false) { XnStatus status; status = context_.OpenFileRecording(file_name.c_str()); if (status != XN_STATUS_OK) THROW_OPENNI_EXCEPTION("Could not open ONI file. Reason: %s", xnGetStatusString(status)); status = context.FindExistingNode(XN_NODE_TYPE_DEPTH, depth_generator_); if (status != XN_STATUS_OK) THROW_OPENNI_EXCEPTION("could not find depth stream in file %s. Reason: %s", file_name.c_str(), xnGetStatusString(status)); else { available_depth_modes_.push_back(getDepthOutputMode()); depth_generator_.RegisterToNewDataAvailable ((xn::StateChangedHandler)NewONIDepthDataAvailable, this, depth_callback_handle_); } status = context.FindExistingNode(XN_NODE_TYPE_IMAGE, image_generator_); if (status == XN_STATUS_OK) { available_image_modes_.push_back(getImageOutputMode()); image_generator_.RegisterToNewDataAvailable ((xn::StateChangedHandler)NewONIImageDataAvailable, this, image_callback_handle_); } status = context.FindExistingNode(XN_NODE_TYPE_IR, ir_generator_); if (status == XN_STATUS_OK) ir_generator_.RegisterToNewDataAvailable ((xn::StateChangedHandler)NewONIIRDataAvailable, this, ir_callback_handle_); status = context.FindExistingNode(XN_NODE_TYPE_PLAYER, player_); if (status != XN_STATUS_OK) THROW_OPENNI_EXCEPTION("Failed to find player node: %s\n", xnGetStatusString(status)); device_node_info_ = player_.GetInfo(); Init (); player_.SetRepeat(repeat); if (streaming_) player_thread_ = boost::thread (&DeviceONI::PlayerThreadFunction, this); }
void xn_init(char *argv) { xn_call_and_check(gContext.InitFromXmlFile("../conf/SamplesConfig.xml"), "init from xml"); xn::ImageGenerator image; xn_call_and_check(gContext.FindExistingNode(XN_NODE_TYPE_IMAGE, image), "find image node"); xn_call_and_check(gContext.FindExistingNode(XN_NODE_TYPE_DEPTH, gDepthGenerator), "find depth node"); xn_call_and_check(gContext.FindExistingNode(XN_NODE_TYPE_USER, gUserGenerator), "find user node"); XnCallbackHandle userCB, poseCB, calibrationCB; gUserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL); gUserGenerator.RegisterUserCallbacks(xn_onFoundUser, xn_onLostUser, NULL, userCB); gUserGenerator.GetSkeletonCap().RegisterCalibrationCallbacks(NULL, xn_onCalibrated, NULL, calibrationCB); gUserGenerator.GetSkeletonCap().GetCalibrationPose(gPose); gUserGenerator.GetPoseDetectionCap().RegisterToPoseCallbacks(xn_onPoseDetected, NULL, NULL, poseCB); gImageDrawer = new ImageDrawer(image); gImageDrawer->setDrawRegion(WIN_SIZE_X/15, WIN_SIZE_Y/15, WIN_SIZE_X/3, WIN_SIZE_Y/3); gDepthDrawer = new DepthDrawer(gDepthGenerator, gUserGenerator); gDepthDrawer->setDrawRegion(0, 0, WIN_SIZE_X, WIN_SIZE_Y); gSkeletonDrawer = new SkeletonDrawer(gUserGenerator, gDepthGenerator); gSkeletonDrawer->setDrawRegion(0, 0, WIN_SIZE_X, WIN_SIZE_Y); capture = new Capture(&gContext); capture->setFileName(argv); XnSkeletonJoint joints[] = { XN_SKEL_HEAD, XN_SKEL_NECK, XN_SKEL_TORSO, XN_SKEL_LEFT_SHOULDER, XN_SKEL_LEFT_ELBOW, XN_SKEL_LEFT_HAND, XN_SKEL_RIGHT_SHOULDER, XN_SKEL_RIGHT_ELBOW, XN_SKEL_RIGHT_HAND, XN_SKEL_LEFT_HIP, XN_SKEL_LEFT_KNEE, XN_SKEL_LEFT_FOOT, XN_SKEL_RIGHT_HIP, XN_SKEL_RIGHT_KNEE, XN_SKEL_RIGHT_FOOT, }; for(int i = 0; i < sizeof(joints)/sizeof(XnSkeletonJoint); i++){ capture->addSkeletonJoint(joints[i]); } }
void CleanupExit() { if (g_pRecorder) g_pRecorder->RemoveNodeFromRecording(g_DepthGenerator); StopCapture(); g_Context.Shutdown(); exit (1); }
void xn_init(const char *filename) { xn_call_and_check(gContext.Init(), "init context"); xn_call_and_check(gContext.OpenFileRecording(filename), "set global mirror mode."); gContext.FindExistingNode(XN_NODE_TYPE_PLAYER, gPlayer); gPlayer.SetRepeat(false); XnStatus rc = XN_STATUS_OK; xn::ImageGenerator image; rc = gContext.FindExistingNode(XN_NODE_TYPE_IMAGE, image); gImageDrawer = (rc == XN_STATUS_OK)? new ImageDrawer(image) : NULL; xn::DepthGenerator depth; rc = gContext.FindExistingNode(XN_NODE_TYPE_DEPTH, depth); gDepthDrawer = (rc == XN_STATUS_OK)? new DepthDrawer(depth) : NULL; setMainWindow(IMAGE_WINDOW); }
void Loop(int sock) { XnStatus nRetVal = XN_STATUS_OK; struct timespec last,now; double nowtime, starttime; clock_gettime(CLOCK_REALTIME, &last); double lasttime = (double)(last.tv_sec) + ((double)(last.tv_nsec))/1000000000; int frames=0; while (g_notDone) { if ((nRetVal = g_context.WaitAndUpdateAll()) != XN_STATUS_OK) { fprintf(stderr,"Could not update ir: %s\n", xnGetStatusString(nRetVal)); continue; } const XnDepthPixel* pDepthMap = g_depth.GetDepthMap(); const XnRGB24Pixel* pImage = NULL;//g_image.GetRGB24ImageMap(); const XnIRPixel* pIR = NULL;//g_ir.GetIRMap(); ProcessDepthFrame(pDepthMap, g_depthWidth, g_depthHeight); FindFingertip(); frames++; clock_gettime(CLOCK_REALTIME, &now); nowtime = (double)(now.tv_sec) + ((double)(now.tv_nsec))/1000000000; if (g_stabalize) // If we are still stablizing then don't do anything { if ((nowtime - starttime) >= STABILIZATION_TIME_SECS) { g_stabalize = FALSE; g_set_background = TRUE; } } else if (g_calibrate) // Do we need to calibrate? Calibrate(sock); else SendFingers(sock); // otherwise just send the touches /* if (nowtime - lasttime >= 1.0) { printf("%d FPS\n", (int)(frames/(nowtime-lasttime))); lasttime = nowtime; frames = 0; if (sock >= 0 && pDepthMap != 0 )// && pImage != 0 )//&& pIR != 0) SendImage(sock,pDepthMap, pImage, pIR, g_depthWidth, g_depthHeight); } */ } }
bool DataCapture::startDataCapture() { XnStatus rc = context_.StartGeneratingAll(); if( rc != XN_STATUS_OK ) { std::cout << "StartGeneratingAll: " << xnGetStatusString(rc) << std::endl; return false; } return true; }
/* DWORD WINAPI EEThreadProc(LPVOID lpThreadParameter) { { g_Context.WaitAndUpdateAll(); if (g_bRecord) { g_pRecorder->Record(); } } return 0; } */ int EEThreadProc(void *lpThreadParameter) { { g_Context.WaitAndUpdateAll(); if (g_bRecord) { g_pRecorder->Record(); } } return 0; }
/* The matlab mex function */ void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] ) { XnUInt64 *MXadress; if(nrhs==0) { printf("Close failed: Give Pointer to Kinect as input\n"); mexErrMsgTxt("Kinect Error"); } MXadress = (XnUInt64*)mxGetData(prhs[0]); if(MXadress[0]>0){ g_Context = ((xn::Context*) MXadress[0])[0]; } g_Context.Shutdown(); }
/*********************************************************************************** * Main routine - additions here also. Session manager and Point control objects will be initialized and the callbacks assigned ***********************************************************************************/ int main(int argc,char *argv[]){ //------------------ Context initializations go in here ------------------ xn::EnumerationErrors errors; rc = cxt.InitFromXmlFile(SAMPLE_XML_PATH,&errors); //initialize the context from the xml file if(rc != XN_STATUS_OK){ printf("Failed to open:%s", xnGetStatusString(rc)); //handle error in reading from XML return rc; } rc = cxt.FindExistingNode(XN_NODE_TYPE_DEPTH,depthGen); //try to find the depth node from the context if(rc != XN_STATUS_OK){ //handle error if node is not found printf("Failed to open Depth node!"); return rc; } //-------------------- Init Nite objects ----------------------- sessionMgr = new XnVSessionManager(); //session manager is created rc = sessionMgr->Initialize(&cxt,"Click,Wave","RaiseHand"); //session manager is initialized if(rc!= XN_STATUS_OK){ //check if this init operation was good printf("Session manager couldn't be initialized"); return rc; } sessionMgr->RegisterSession(&cxt,sessionStart,sessionEnd); //register the callbacks for the session manager pointCtrl = new XnVPointControl("Point Tracker"); //create the point control object pointCtrl->RegisterPrimaryPointCreate(&cxt,pointCreate); //register the primary point created handler pointCtrl->RegisterPrimaryPointUpdate(&cxt,pointUpdate); //register the primary point updated handler pointCtrl->RegisterPrimaryPointDestroy(&cxt,pointDestroy); //register the primary point destroyed handler sessionMgr->AddListener(pointCtrl); //make the session manager listen to the point control object nullifyHandPoint(); //initialize the global variable to track hand points //----------------- GL initializations go in here -------------- initGraphics(argc,argv); //init GL routine glutMainLoop(); //start the main loop for openGL return 0; }
int configKinect(){ XnStatus rc = XN_STATUS_OK; xn::EnumerationErrors errors; // Initialize OpenNI rc = g_Context.InitFromXmlFile(SAMPLE_XML_PATH, g_ScriptNode, &errors); CHECK_ERRORS(rc, errors, "InitFromXmlFile"); CHECK_RC(rc, "InitFromXmlFile"); rc = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator); CHECK_RC(rc, "Find depth generator"); rc = g_Context.FindExistingNode(XN_NODE_TYPE_GESTURE, g_GestureGenerator); CHECK_RC(rc, "Find gesture generator"); XnCallbackHandle hGestureIntermediateStageCompleted, hGestureProgress, hGestureReadyForNextIntermediateStage; g_GestureGenerator.RegisterToGestureIntermediateStageCompleted(GestureIntermediateStageCompletedHandler, NULL, hGestureIntermediateStageCompleted); g_GestureGenerator.RegisterToGestureReadyForNextIntermediateStage(GestureReadyForNextIntermediateStageHandler, NULL, hGestureReadyForNextIntermediateStage); g_GestureGenerator.RegisterGestureCallbacks(NULL, GestureProgressHandler, NULL, hGestureProgress); // Create NITE objects g_pSessionManager = new XnVSessionManager; rc = g_pSessionManager->Initialize(&g_Context, "Click,Wave", "RaiseHand"); CHECK_RC(rc, "SessionManager::Initialize"); g_pSessionManager->RegisterSession(NULL, SessionStarting, SessionEnding, FocusProgress); pointHandler = new PointHandler(20, g_DepthGenerator); g_pFlowRouter = new XnVFlowRouter; g_pFlowRouter->SetActive(pointHandler); g_pSessionManager->AddListener(g_pFlowRouter); pointHandler->RegisterNoPoints(NULL, NoHands); // Initialization done. Start generating rc = g_Context.StartGeneratingAll(); CHECK_RC(rc, "StartGenerating"); return rc; }
int main(int argc, char *argv[]) { if(argc==2){ gl_init(&argc, argv); xn_init(argv[1]); gContext.StartGeneratingAll(); glutMainLoop(); } return 0; }
/** * @fn waitKinect * Wait Kinect * This function defines the standby state kinect * While not made any motion is put on hold */ bool waitKinect() { if (g_init_kinect == true) { // Read next available data g_Context.WaitOneUpdateAll(g_DepthGenerator); // Process the data g_pSessionManager->Update(&g_Context); } return g_session_started; }
void gl_onKeyboard(unsigned char key, int x, int y) { switch(key){ case 27: gContext.Shutdown(); exit(1); break; case 't': setMainWindow((winMode == DEPTH_WINDOW)? IMAGE_WINDOW : DEPTH_WINDOW); break; } }
void CleanupExit() { g_Context.Shutdown(); if(gPhysicsSDK != NULL) { if(gScene != NULL) gPhysicsSDK->releaseScene(*gScene); gScene = NULL; NxReleasePhysicsSDK(gPhysicsSDK); gPhysicsSDK = NULL; } exit (1); }
int main(int argc, char **argv) { XnStatus nRetVal = XN_STATUS_OK; if (argc > 1) { nRetVal = g_Context.Init(); CHECK_RC(nRetVal, "Init"); nRetVal = g_Context.OpenFileRecording(argv[1]); if (nRetVal != XN_STATUS_OK) { printf("Can't open recording %s: %s\n", argv[1], xnGetStatusString(nRetVal)); return 1; } } else { nRetVal = g_Context.InitFromXmlFile(SAMPLE_XML_PATH); CHECK_RC(nRetVal, "InitFromXml"); } g_tunnel = new Tunnel(); nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator); CHECK_RC(nRetVal, "Find depth generator"); nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator); if (nRetVal != XN_STATUS_OK) { nRetVal = g_UserGenerator.Create(g_Context); CHECK_RC(nRetVal, "Find user generator"); } XnCallbackHandle hUserCallbacks, hCalibrationCallbacks, hPoseCallbacks; if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON)) { printf("Supplied user generator doesn't support skeleton\n"); return 1; } g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks); g_UserGenerator.GetSkeletonCap().RegisterCalibrationCallbacks(UserCalibration_CalibrationStart, UserCalibration_CalibrationEnd, NULL, hCalibrationCallbacks); if (g_UserGenerator.GetSkeletonCap().NeedPoseForCalibration()) { g_bNeedPose = TRUE; if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION)) { printf("Pose required, but not supported\n"); return 1; } g_UserGenerator.GetPoseDetectionCap().RegisterToPoseCallbacks(UserPose_PoseDetected, NULL, NULL, hPoseCallbacks); g_UserGenerator.GetSkeletonCap().GetCalibrationPose(g_strPose); } g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL); nRetVal = g_Context.StartGeneratingAll(); CHECK_RC(nRetVal, "StartGenerating"); glInit(&argc, argv); glutMainLoop(); }
// メインループ void run() { int nAudioNextBuffer = 0; printf ("Press any key to exit...\n"); // 今のデータを捨てる audio.WaitAndUpdateData(); while (!xnOSWasKeyboardHit()) { // データの更新 XnStatus nRetVal = context.WaitAndUpdateAll(); if (nRetVal != XN_STATUS_OK) { throw std::runtime_error(xnGetStatusString(nRetVal)); } // バッファの取得 WAVEHDR* pHeader = &AudioBuffers[nAudioNextBuffer]; if ((pHeader->dwFlags & WHDR_DONE) == 0) { printf("No audio buffer is available!. Audio buffer will be lost!\n"); continue; } // WAVEヘッダのクリーンアップ MMRESULT mmRes = waveOutUnprepareHeader(hWaveOut, pHeader, sizeof(WAVEHDR)); if ( mmRes != MMSYSERR_NOERROR ) { OutputErrorText( mmRes ); } // WAVEデータの取得 pHeader->dwBufferLength = audio.GetDataSize(); pHeader->dwFlags = 0; xnOSMemCopy(pHeader->lpData, audio.GetAudioBuffer(), pHeader->dwBufferLength); // WAVEヘッダの初期化 mmRes = waveOutPrepareHeader(hWaveOut, pHeader, sizeof(WAVEHDR)); if ( mmRes != MMSYSERR_NOERROR ) { OutputErrorText( mmRes ); continue; } // WAVEデータを出力キューに入れる mmRes = waveOutWrite(hWaveOut, pHeader, sizeof(WAVEHDR)); if ( mmRes != MMSYSERR_NOERROR ) { OutputErrorText( mmRes ); continue; } // 次のバッファインデックス nAudioNextBuffer = (nAudioNextBuffer + 1) % NUMBER_OF_AUDIO_BUFFERS; } }