void glutKeyboard (unsigned char key, int x, int y) { switch (key) { case 27: exit (1); case '1': g_nViewState = DISPLAY_MODE_OVERLAY; g_depth.GetAlternativeViewPointCap().SetViewPoint(g_image); break; case '2': g_nViewState = DISPLAY_MODE_DEPTH; g_depth.GetAlternativeViewPointCap().ResetViewPoint(); break; case '3': g_nViewState = DISPLAY_MODE_IMAGE; g_depth.GetAlternativeViewPointCap().ResetViewPoint(); break; case 's': takePhoto(); break; case 'm': g_context.SetGlobalMirror(!g_context.GetGlobalMirror()); break; } }
void changeRegistration(int nValue) { if (!g_Depth.IsValid() || !g_Depth.IsCapabilitySupported(XN_CAPABILITY_ALTERNATIVE_VIEW_POINT)) { return; } if (!nValue) { g_Depth.GetAlternativeViewPointCap().ResetViewPoint(); } else if (g_Image.IsValid()) { g_Depth.GetAlternativeViewPointCap().SetViewPoint(g_Image); } }
// ----------------------------------------------------------------------------------------------------- // connect // ----------------------------------------------------------------------------------------------------- bool CameraDevice::connect() { //Connect to kinect printf("Connecting to Kinect... "); fflush(stdout); XnStatus nRetVal = XN_STATUS_OK; EnumerationErrors errors; ScriptNode script; nRetVal = g_context.InitFromXmlFile(Config::_PathKinectXmlFile.c_str(), script, &errors); if (nRetVal == XN_STATUS_NO_NODE_PRESENT) { XnChar strError[1024]; errors.ToString(strError, 1024); printf("%s\n", strError); return false; } else if (nRetVal != XN_STATUS_OK) { printf("Open failed: %s\n", xnGetStatusString(nRetVal)); return false; } printf("OK\n"); // allocate the point cloud buffer g_cloudPointSave.width = NBPIXELS_WIDTH; g_cloudPointSave.height = NBPIXELS_HEIGHT; g_cloudPointSave.points.resize(NBPIXELS_WIDTH*NBPIXELS_HEIGHT); nRetVal = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth); CHECK_RC(nRetVal, "Find depth generator"); nRetVal = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image); CHECK_RC(nRetVal, "Find image generator"); nRetVal = xnFPSInit(&g_xnFPS, 180); CHECK_RC(nRetVal, "FPS Init"); g_context.SetGlobalMirror(false); // mirror image horizontally g_depth.GetAlternativeViewPointCap().SetViewPoint(g_image); if (g_depth.GetIntProperty ("ShadowValue", g_shadowValue) != XN_STATUS_OK) printf ("[OpenNIDriver] Could not read shadow value!"); if (g_depth.GetIntProperty ("NoSampleValue", g_noSampleValue) != XN_STATUS_OK) printf ("[OpenNIDriver] Could not read no sample value!"); return (nRetVal == XN_STATUS_OK); }
bool getImageCoordinatesForDepthPixel(int x, int y, int& imageX, int& imageY) { if (!g_Depth.IsValid()) return false; // no depth if (!g_Image.IsValid()) return false; // no image if (!g_Depth.IsCapabilitySupported(XN_CAPABILITY_ALTERNATIVE_VIEW_POINT)) return false; XnUInt32 altX; XnUInt32 altY; if (XN_STATUS_OK != g_Depth.GetAlternativeViewPointCap().GetPixelCoordinatesInViewPoint(g_Image, x, y, altX, altY)) return false; imageX = (int)altX; imageY = (int)altY; return true; }
//---------------------------------------------------- // キーボード処理 //---------------------------------------------------- void glutKeyboard (unsigned char key, int x, int y){ switch (key){ case '1': // オーバーレイモード g_nViewState = DISPLAY_MODE_OVERLAY; g_depth.GetAlternativeViewPointCap().SetViewPoint(g_image); // イメージとデプスのずれを無くす break; case '2': // デプスモード g_nViewState = DISPLAY_MODE_DEPTH; g_depth.GetAlternativeViewPointCap().ResetViewPoint(); // デプスの大きさを元に戻す? break; case '3': // イメージモード g_nViewState = DISPLAY_MODE_IMAGE; g_depth.GetAlternativeViewPointCap().SetViewPoint(g_image); // イメージとデプスのずれを無くす break; case '4': // クロマキーモード g_nViewState = DISPLAY_MODE_CHROMA; g_depth.GetAlternativeViewPointCap().SetViewPoint(g_image); // イメージとデプスのずれを無くす break; case '5': // ポイントクラウドモード g_nViewState = DISPLAY_MODE_POINT_CLOUD; g_depth.GetAlternativeViewPointCap().SetViewPoint(g_image); // イメージとデプスのずれを無くす break; case 'm': // ★連打厳禁!!(何故か重くなる)★ g_context.SetGlobalMirror(!g_context.GetGlobalMirror()); // ミラーリング break; case 'd': g_debugMode = !g_debugMode; // デバッグモードのオンオフ break; case 'f': g_fullScreenMode = !g_fullScreenMode; // フルスクリーンモードの切り替え(トグルスイッチ) if(g_fullScreenMode){ // フルスクリーンモード glutFullScreen(); }else{ // ウィンドウモード glutPositionWindow(100, 100); glutReshapeWindow(KINECT_IMAGE_WIDTH, KINECT_IMAGE_HEIGHT); } break; case 's': // スクリーンショットを撮る(深さも撮る) g_screenShotDepthMode = true; case 'S': // スクリーンショットを撮る(深さは撮らない) g_screenShotImageMode = true; break; case 'R': // レコードストップ g_recorder.RemoveNodeFromRecording(g_image); g_recorder.RemoveNodeFromRecording(g_depth); cout << "recording stop!" << endl; break; // 閾値の増減 case 't': g_chromaThresh += 10; break; case 'T': g_chromaThresh -= 10; break; //case 'p': // g_pointSize += 0.000001; // break; //case 'P': // g_pointSize -= 0.000001; // break; // 視点移動 case 'x': g_lokEyeX ++; g_lokDirX ++; break; case 'X': g_lokEyeX --; g_lokDirX --; break; case 'y': g_lokEyeY ++; g_lokDirY ++; break; case 'Y': g_lokEyeY --; g_lokDirY --; break; case 'z': g_lokEyeZ += 1; g_lokDirZ += 1; break; case 'Z': g_lokEyeZ -= 1; g_lokDirZ -= 1; break; // 強制終了 case 27: // Escボタン case 'q': case 'Q': exit (1); } }
int Init() { XnStatus rc; //Make sure our image types are the same as the OpenNI image types. assert(sizeof(XnRGB24Pixel) == sizeof(ColorPixel)); assert(sizeof(XnDepthPixel) == sizeof(DepthPixel)); assert(sizeof(XnStatus) == sizeof(int)); // Load OpenNI xml settings char filePath[255]; int length = Util::Helpers::GetExeDirectory(filePath, sizeof(filePath)); filePath[length] = '\\'; strcpy(&filePath[length+1], SAMPLE_XML_PATH); EnumerationErrors errors; rc = deviceContext.InitFromXmlFile(filePath, &errors); if (rc == XN_STATUS_NO_NODE_PRESENT) { //One reason would be if Microsoft SDK is installed beside PrimeSense. Device manager should say PrimeSense instead of Microsoft Kinect. //XnChar strError[1024]; //errors.ToString(strError, 1024); //LOGE("%s\n", strError); return -1; } else if (rc != XN_STATUS_OK) { fprintf(stderr, "%s\n", xnGetStatusString(rc)); /*LOGE("Open failed: %s\n", xnGetStatusString(rc));*/ return (rc); } // Retrieve colour and depth nodes rc = deviceContext.FindExistingNode(XN_NODE_TYPE_IMAGE, colorImageGenerator); rc = deviceContext.FindExistingNode(XN_NODE_TYPE_DEPTH, depthImageGenerator); // Set mirror mode to off SetMirrorMode(false); // Get a frame to perform checks on it ImageMetaData colorImageMetaData; DepthMetaData depthImageMetaData; depthImageGenerator.GetMetaData(depthImageMetaData); colorImageGenerator.GetMetaData(colorImageMetaData); // Hybrid mode isn't supported in this sample if (colorImageMetaData.FullXRes() != depthImageMetaData.FullXRes() || colorImageMetaData.FullYRes() != depthImageMetaData.FullYRes()) { /*LOGE("The device depth and image resolution must be equal!\n");*/ return 1; } // RGB is the only image format supported. if (colorImageMetaData.PixelFormat() != XN_PIXEL_FORMAT_RGB24) { /*LOGE("The device image format must be RGB24\n");*/ return 1; } // Need to make sure the automatic alignment of colour and depth images is supported. XnBool isSupported = depthImageGenerator.IsCapabilitySupported("AlternativeViewPoint"); if(!isSupported) { /*LOGE("Cannot set AlternativeViewPoint!\n");*/ return 1; } // Set it to VGA maps at 30 FPS /*XnMapOutputMode mapMode; mapMode.nXRes = XN_VGA_X_RES; mapMode.nYRes = XN_VGA_Y_RES; mapMode.nFPS = 60; rc = g_depth.SetMapOutputMode(mapMode); if(rc) { LOGE("Failed to set depth map mode: %s\n", xnGetStatusString(rc)); return 1; } mapMode.nFPS = 30; rc = g_image.SetMapOutputMode(mapMode); if(rc) { LOGE("Failed to set image map mode: %s\n", xnGetStatusString(rc)); return 1; }*/ // Set automatic alignment of the colour and depth images. rc = depthImageGenerator.GetAlternativeViewPointCap().SetViewPoint(colorImageGenerator); if(rc) { /*LOGE("Failed to set depth map mode: %s\n", xnGetStatusString(rc));*/ return 1; } return XN_STATUS_OK; }
int main(int argc, char *argv[]) { //--------------------------------------------------------------------// //------------------------- SETUP REQUIRED NODES ---------------------// //--------------------------------------------------------------------// // Setup the command line parameters. setupParams(argc, argv); // Setup all the sockets. setupSockets(); // Setup the capture socket server for Mac. #if (XN_PLATFORM == XN_PLATFORM_MACOSX) if(_featureDepthMapCapture || _featureRGBCapture) { if(_useSockets) { g_AS3Network = network(); g_AS3Network.init(setupServer); } } #endif // Setup the status. XnStatus _status = XN_STATUS_OK; EnumerationErrors _errors; // Context Init and Add license. _status = _context.Init(); CHECK_RC(_status, "AS3OpenNI :: Initialize context"); _context.SetGlobalMirror(_mirror); XnChar vendor[XN_MAX_NAME_LENGTH]; XnChar license[XN_MAX_LICENSE_LENGTH]; _license.strVendor[XN_MAX_NAME_LENGTH] = strcmp(vendor, "PrimeSense"); _license.strKey[XN_MAX_LICENSE_LENGTH] = strcmp(license, "0KOIk2JeIBYClPWVnMoRKn5cdY4="); _status = _context.AddLicense(_license); CHECK_RC(_status, "AS3OpenNI :: Added license"); // Set it to VGA maps at 30 FPS _depthMode.nXRes = 640; _depthMode.nYRes = 480; _depthMode.nFPS = 30; // Depth map create. _status = _depth.Create(_context); CHECK_RC(_status, "AS3OpenNI :: Create depth generator"); _status = _depth.SetMapOutputMode(_depthMode); // Depth map create. _status = _image.Create(_context); CHECK_RC(_status, "AS3OpenNI :: Create image generator"); _status = _image.SetMapOutputMode(_depthMode); _status = _image.SetPixelFormat(XN_PIXEL_FORMAT_RGB24); // Create the hands generator. _status = _hands.Create(_context); CHECK_RC(_status, "AS3OpenNI :: Create hands generator"); _hands.SetSmoothing(0.1); // Create the gesture generator. _status = _gesture.Create(_context); CHECK_RC(_status, "AS3OpenNI :: Create gesture generator"); // Create user generator. _status = _userGenerator.Create(_context); CHECK_RC(_status, "AS3OpenNI :: Find user generator"); // Create and initialize point tracker _sessionManager = new XnVSessionManager(); _status = _sessionManager->Initialize(&_context, "Wave", "RaiseHand"); if (_status != XN_STATUS_OK) { printf("AS3OpenNI :: Couldn't initialize the Session Manager: %s\n", xnGetStatusString(_status)); CleanupExit(); } _sessionManager->RegisterSession(NULL, &SessionStart, &SessionEnd, &SessionProgress); // Start catching signals for quit indications CatchSignals(&_quit); //---------------------------------------------------------------// //------------------------- SETUP FEATURES ---------------------// //--------------------------------------------------------------// // Define the Wave and SinglePoint detectors. _waveDetector = new XnVWaveDetector(); // SinglePoint detector. if(_featureSinglePoint) _waveDetector->RegisterPointUpdate(NULL, &OnPointUpdate); // Feature Gesture. if(_featureGesture) { // Wave detector. _waveDetector->RegisterWave(NULL, &OnWave); // Push detector. _pushDetector = new XnVPushDetector(); _pushDetector->RegisterPush(NULL, &onPush); // Swipe detector. _swipeDetector = new XnVSwipeDetector(); _swipeDetector->RegisterSwipeUp(NULL, &Swipe_SwipeUp); _swipeDetector->RegisterSwipeDown(NULL, &Swipe_SwipeDown); _swipeDetector->RegisterSwipeLeft(NULL, &Swipe_SwipeLeft); _swipeDetector->RegisterSwipeRight(NULL, &Swipe_SwipeRight); // Steady detector. _steadyDetector = new XnVSteadyDetector(); _steadyDetector->RegisterSteady(NULL, &Steady_OnSteady); } // Feature Circle. if(_featureCircle) { // Circle detector. _circleDetector = new XnVCircleDetector(); _circleDetector->RegisterCircle(NULL, &CircleCB); _circleDetector->RegisterNoCircle(NULL, &NoCircleCB); _circleDetector->RegisterPrimaryPointCreate(NULL, &Circle_PrimaryCreate); _circleDetector->RegisterPrimaryPointDestroy(NULL, &Circle_PrimaryDestroy); } // Feature Slider. if(_featureSlider) { // Left/Right slider. _leftRightSlider = new XnVSelectableSlider1D(3, 0, AXIS_X); _leftRightSlider->RegisterActivate(NULL, &LeftRightSlider_OnActivate); _leftRightSlider->RegisterDeactivate(NULL, &LeftRightSlider_OnDeactivate); _leftRightSlider->RegisterPrimaryPointCreate(NULL, &LeftRightSlider_OnPrimaryCreate); _leftRightSlider->RegisterPrimaryPointDestroy(NULL, &LeftRightSlider_OnPrimaryDestroy); _leftRightSlider->RegisterValueChange(NULL, &LeftRightSlider_OnValueChange); _leftRightSlider->SetValueChangeOnOffAxis(false); // Up/Down slider. _upDownSlider = new XnVSelectableSlider1D(3, 0, AXIS_Y); _upDownSlider->RegisterActivate(NULL, &UpDownSlider_OnActivate); _upDownSlider->RegisterDeactivate(NULL, &UpDownSlider_OnDeactivate); _upDownSlider->RegisterPrimaryPointCreate(NULL, &UpDownSlider_OnPrimaryCreate); _upDownSlider->RegisterPrimaryPointDestroy(NULL, &UpDownSlider_OnPrimaryDestroy); _upDownSlider->RegisterValueChange(NULL, &UpDownSlider_OnValueChange); _upDownSlider->SetValueChangeOnOffAxis(false); // In/Out slider. _inOutSlider = new XnVSelectableSlider1D(3, 0, AXIS_Z); _inOutSlider->RegisterActivate(NULL, &InOutSlider_OnActivate); _inOutSlider->RegisterDeactivate(NULL, &InOutSlider_OnDeactivate); _inOutSlider->RegisterPrimaryPointCreate(NULL, &InOutSlider_OnPrimaryCreate); _inOutSlider->RegisterPrimaryPointDestroy(NULL, &InOutSlider_OnPrimaryDestroy); _inOutSlider->RegisterValueChange(NULL, &InOutSlider_OnValueChange); _inOutSlider->SetValueChangeOnOffAxis(false); } // Feature TrackPad. if(_featureTrackPad) { // Track Pad. if(trackpad_columns > 0 && trackpad_rows > 0) { _trackPad = new XnVSelectableSlider2D(trackpad_columns, trackpad_rows); } else { _trackPad = new XnVSelectableSlider2D(4, 9); } _trackPad->RegisterItemHover(NULL, &TrackPad_ItemHover); _trackPad->RegisterItemSelect(NULL, &TrackPad_ItemSelect); _trackPad->RegisterPrimaryPointCreate(NULL, &TrackPad_PrimaryCreate); _trackPad->RegisterPrimaryPointDestroy(NULL, &TrackPad_PrimaryDestroy); } // Feature User Tracking. if(_featureUserTracking) { // Setup user generator callbacks. XnCallbackHandle hUserCallbacks, hCalibrationCallbacks, hPoseCallbacks; if (!_userGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON)) { printf("AS3OpenNI :: Supplied user generator doesn't support skeleton\n"); return 1; } _userGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks); // Setup Skeleton detection. _userGenerator.GetSkeletonCap().RegisterCalibrationCallbacks(UserCalibration_CalibrationStart, UserCalibration_CalibrationEnd, NULL, hCalibrationCallbacks); if (_userGenerator.GetSkeletonCap().NeedPoseForCalibration()) { _needPose = true; if (!_userGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION)) { printf("AS3OpenNI :: Pose required, but not supported\n"); return 1; } _userGenerator.GetPoseDetectionCap().RegisterToPoseCallbacks(UserPose_PoseDetected, NULL, NULL, hPoseCallbacks); _userGenerator.GetSkeletonCap().GetCalibrationPose(_strPose); } _userGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL); } // Create the broadcaster manager. _broadcaster = new XnVBroadcaster(); // Start generating all. _context.StartGeneratingAll(); // Set the frame rate. _status = xnFPSInit(&xnFPS, 180); CHECK_RC(_status, "AS3OpenNI :: FPS Init"); //----------------------------------------------------------------------// //------------------------- SETUP DISPLAY SUPPORT ---------------------// //--------------------------------------------------------------------// // Setup depth and image data. _depth.GetMetaData(_depthData); _image.GetMetaData(_imageData); // Hybrid mode isn't supported in this sample if (_imageData.FullXRes() != _depthData.FullXRes() || _imageData.FullYRes() != _depthData.FullYRes()) { printf ("AS3OpenNI :: The device depth and image resolution must be equal!\n"); return 1; } // RGB is the only image format supported. if (_imageData.PixelFormat() != XN_PIXEL_FORMAT_RGB24) { printf("AS3OpenNI :: The device image format must be RGB24\n"); return 1; } // Setup the view points to match between the depth and image maps. if(_snapPixels) _depth.GetAlternativeViewPointCap().SetViewPoint(_image); //-------------------------------------------------------------// //------------------------- MAIN LOOP ------------------------// //-----------------------------------------------------------// // Setup the capture socket server for PC. #if (XN_PLATFORM == XN_PLATFORM_WIN32) if(_featureDepthMapCapture || _featureRGBCapture || _featureUserTracking) { if(_useSockets) { g_AS3Network = network(); g_AS3Network.init(setupServer); } } #endif // Main loop while ((!_kbhit()) && (!_quit)) { xnFPSMarkFrame(&xnFPS); _context.WaitAndUpdateAll(); _sessionManager->Update(&_context); if(_featureDepthMapCapture) captureDepthMap(g_ucDepthBuffer); if(_featureRGBCapture) captureRGB(g_ucImageBuffer); #if (XN_PLATFORM == XN_PLATFORM_WIN32) if(_featureUserTracking) getPlayers(); #else if(_featureUserTracking) renderSkeleton(); #endif } CleanupExit(); }
void mixRGB_Depth() { bool bShouldRun = true; int c; XnStatus nRetVal = XN_STATUS_OK; Context context; // Initialize context object nRetVal = context.Init(); // Check error code if (nRetVal) printf("Error: %s", xnGetStatusString(nRetVal)); context.SetGlobalMirror(true); //Create Depth generator node DepthGenerator depth; nRetVal = depth.Create(context); // Check error code if (nRetVal) printf("Error: %s", xnGetStatusString(nRetVal)); // Create an ImageGenetor node ImageGenerator image; nRetVal = image.Create(context); if (nRetVal) printf("Error: %s", xnGetStatusString(nRetVal)); // Sync the DepthGenerator with the ImageGenerator nRetVal = depth.GetAlternativeViewPointCap().SetViewPoint(image); if (nRetVal) printf("Error: %s", xnGetStatusString(nRetVal)); //Set it to VGA maps at 30 fps XnMapOutputMode mapMode; mapMode.nXRes = XN_VGA_X_RES; mapMode.nYRes = XN_VGA_Y_RES; mapMode.nFPS = 30; nRetVal = depth.SetMapOutputMode(mapMode); // Make it start generating data nRetVal = context.StartGeneratingAll(); if (nRetVal) printf("Error: %s", xnGetStatusString(nRetVal)); // Create an OpenCv matrix CvMat* depthMetersMat = cvCreateMat(480, 640, CV_16UC1); IplImage *kinectDepthImage; kinectDepthImage = cvCreateImage(cvSize(640,480), 16, 1); IplImage *rgbimg = cvCreateImageHeader(cvSize(640,480), 8,3); // Main loop while (bShouldRun) { //wait for new data to be available nRetVal = context.WaitOneUpdateAll(depth); if (nRetVal) { printf("Error: %s", xnGetStatusString(nRetVal)); continue; } //Take current depth map const XnDepthPixel* pDepthMap = depth.GetDepthMap(); for (int y=0; y<XN_VGA_Y_RES; y++) { for (int x=0; x<XN_VGA_X_RES; x++) { depthMetersMat->data.s[y*XN_VGA_X_RES+x]=10*pDepthMap[y*XN_VGA_X_RES+x]; } } cvGetImage(depthMetersMat, kinectDepthImage); //take current image const XnRGB24Pixel* pImage = image.GetRGB24ImageMap(); //process image data XnRGB24Pixel* ucpImage = const_cast<XnRGB24Pixel*>(pImage); cvSetData(rgbimg, ucpImage, 640*3); cvShowImage("RGB", kinectDepthImage); c = cvWaitKey(1); if (c == 27) bShouldRun = false; } cvReleaseImageHeader(&kinectDepthImage); context.Shutdown(); }