int main(int argc, char **argv) { XnStatus nRetVal = XN_STATUS_OK; if (argc > 1) { nRetVal = g_Context.Init(); CHECK_RC(nRetVal, "Init"); nRetVal = g_Context.OpenFileRecording(argv[1]); if (nRetVal != XN_STATUS_OK) { printf("Can't open recording %s: %s\n", argv[1], xnGetStatusString(nRetVal)); return 1; } } else { nRetVal = g_Context.InitFromXmlFile(SAMPLE_XML_PATH); CHECK_RC(nRetVal, "InitFromXml"); } g_tunnel = new Tunnel(); nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator); CHECK_RC(nRetVal, "Find depth generator"); nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator); if (nRetVal != XN_STATUS_OK) { nRetVal = g_UserGenerator.Create(g_Context); CHECK_RC(nRetVal, "Find user generator"); } XnCallbackHandle hUserCallbacks, hCalibrationCallbacks, hPoseCallbacks; if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON)) { printf("Supplied user generator doesn't support skeleton\n"); return 1; } g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks); g_UserGenerator.GetSkeletonCap().RegisterCalibrationCallbacks(UserCalibration_CalibrationStart, UserCalibration_CalibrationEnd, NULL, hCalibrationCallbacks); if (g_UserGenerator.GetSkeletonCap().NeedPoseForCalibration()) { g_bNeedPose = TRUE; if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION)) { printf("Pose required, but not supported\n"); return 1; } g_UserGenerator.GetPoseDetectionCap().RegisterToPoseCallbacks(UserPose_PoseDetected, NULL, NULL, hPoseCallbacks); g_UserGenerator.GetSkeletonCap().GetCalibrationPose(g_strPose); } g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL); nRetVal = g_Context.StartGeneratingAll(); CHECK_RC(nRetVal, "StartGenerating"); glInit(&argc, argv); glutMainLoop(); }
bool SetupPrimesense(void) { XnStatus nRetVal = XN_STATUS_OK; xn::ScriptNode scriptNode; xn::EnumerationErrors errors; nRetVal = g_context.Init(); //nRetVal = g_context.InitFromXmlFile("./config.xml", scriptNode, &errors); if (nRetVal == XN_STATUS_NO_NODE_PRESENT) { XnChar strError[1024]; errors.ToString(strError, 1024); printf("%s\n", strError); return (nRetVal); } else if (nRetVal != XN_STATUS_OK) { printf("Open failed: %s\n", xnGetStatusString(nRetVal)); return (nRetVal); } SetupDepth(g_context); //SetupImage(g_context); //SetupIR(g_context); if ((nRetVal = g_context.StartGeneratingAll()) != XN_STATUS_OK) { fprintf(stderr,"Could not start: %s\n", xnGetStatusString(nRetVal)); return FALSE; } return TRUE; }
// OpenNIの初期化 void initOpenNI( const std::string& recordFileName ) { // コンテキストの初期化 XnStatus nRetVal = context.Init(); if (nRetVal != XN_STATUS_OK) { throw std::runtime_error(xnGetStatusString(nRetVal)); } // 記録したファイルを開く nRetVal = context.OpenFileRecording( recordFileName.c_str() ); if (nRetVal != XN_STATUS_OK) { throw std::runtime_error(xnGetStatusString(nRetVal)); } // プレーヤーを作成する nRetVal = context.FindExistingNode(XN_NODE_TYPE_PLAYER, player); if (nRetVal != XN_STATUS_OK) { throw std::runtime_error(xnGetStatusString(nRetVal)); } // デバイスを作成する nRetVal = audio.Create(context); if (nRetVal != XN_STATUS_OK) { throw std::runtime_error(xnGetStatusString(nRetVal)); } // WAVEデータを取得する nRetVal = audio.GetWaveOutputMode(waveMode); if (nRetVal != XN_STATUS_OK) { throw std::runtime_error(xnGetStatusString(nRetVal)); } // データの取得を開始する context.StartGeneratingAll(); }
void initialize() { std::cout << endl << "Initializing Kinect... " << endl << endl; g_RetVal = g_Context.Init(); g_RetVal = g_DepthGenerator.Create(g_Context); if (g_RetVal != XN_STATUS_OK) printf("Failed creating DEPTH generator %s\n", xnGetStatusString(g_RetVal)); XnMapOutputMode outputMode; outputMode.nXRes = g_im_w; outputMode.nYRes = g_im_h; outputMode.nFPS = g_fps; g_RetVal = g_DepthGenerator.SetMapOutputMode(outputMode); if (g_RetVal != XN_STATUS_OK) printf("Failed setting the DEPTH output mode %s\n", xnGetStatusString(g_RetVal)); g_RetVal = g_Context.StartGeneratingAll(); if (g_RetVal != XN_STATUS_OK) printf("Failed starting generating all %s\n", xnGetStatusString(g_RetVal)); g_DepthGenerator.GetIntProperty ("ZPD", g_focal_length); g_DepthGenerator.GetRealProperty ("ZPPS", g_pixel_size); g_pixel_size *= 2.f; g_im3D.create(g_im_h,g_im_w,CV_32FC3); }
information() { RC(context.Init(), "Context Intialized"); XnMapOutputMode mode; mode.nXRes = XN_VGA_X_RES; mode.nYRes = XN_VGA_Y_RES; mode.nFPS = 30; RC(image.Create(context), "Create image buffer"); RC(image.SetMapOutputMode(mode), "Set image mode"); RC(depth.Create(context), "Create depth buffer"); RC(depth.SetMapOutputMode(mode), "Set depth mode"); xn::Query q; RC(q.AddSupportedCapability(XN_CAPABILITY_SKELETON), "Request skeleton"); try { RC(context.FindExistingNode(XN_NODE_TYPE_USER, user), "User generator"); } catch (...) { RC(user.Create(context), "Get skeleton!!!"); } // RC(user.Create(context, &q), "Get skeleton!!!"); // // xn::NodeInfoList il; // RC(context.EnumerateProductionTrees(XN_NODE_TYPE_USER, &q, il, NULL), // "Enumerate nodes"); // // xn::NodeInfo i = *il.Begin(); // RC(context.CreateProductionTree(i), "Create skeleton node"); // RC(i.GetInstance(user), "Get skeleton"); user.RegisterUserCallbacks(User_NewUser, NULL, NULL, hUserCallbacks); user.GetSkeletonCap().RegisterCalibrationCallbacks(UserCalibration_CalibrationStart, UserCalibration_CalibrationEnd, &user, hCalibrationCallbacks); if (user.GetSkeletonCap().NeedPoseForCalibration()) { if (!user.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION)) { post("Pose required, but not supported\n"); } else { user.GetPoseDetectionCap().RegisterToPoseCallbacks(UserPose_PoseDetected, NULL, &user, hPoseCallbacks); user.GetSkeletonCap().GetCalibrationPose(g_strPose); user.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL); } } RC(context.StartGeneratingAll(), "Start generating data"); post("Kinect initialized!\n"); }
// OpenNIの初期化 void initOpenNI() { // コンテキストの初期化 XnStatus nRetVal = context.Init(); if (nRetVal != XN_STATUS_OK) { throw std::runtime_error(xnGetStatusString(nRetVal)); } // デバイスを作成する(XMLからの生成だと、デバイスがないといわれる) nRetVal = audio.Create(context); if (nRetVal != XN_STATUS_OK) { throw std::runtime_error(xnGetStatusString(nRetVal)); } // 取得するWAVEデータの設定 waveMode.nSampleRate = 44100; waveMode.nChannels = 2; waveMode.nBitsPerSample = 16; nRetVal = audio.SetWaveOutputMode(waveMode); if (nRetVal != XN_STATUS_OK) { throw std::runtime_error(xnGetStatusString(nRetVal)); } // レコーダーの作成 nRetVal = recorder.Create( context ); if (nRetVal != XN_STATUS_OK) { throw std::runtime_error(xnGetStatusString(nRetVal)); } // 出力先の設定 nRetVal = recorder.SetDestination( XN_RECORD_MEDIUM_FILE, RECORDE_PATH ); if (nRetVal != XN_STATUS_OK) { throw std::runtime_error(xnGetStatusString(nRetVal)); } // 記録するノードの追加 nRetVal = recorder.AddNodeToRecording(audio, XN_CODEC_UNCOMPRESSED); if (nRetVal != XN_STATUS_OK) { throw std::runtime_error(xnGetStatusString(nRetVal)); } // 記録の開始 nRetVal = recorder.Record(); if (nRetVal != XN_STATUS_OK) { throw std::runtime_error(xnGetStatusString(nRetVal)); } // データの取得を開始する context.StartGeneratingAll(); }
bool kinect_reader2::init() { nRetVal = XN_STATUS_OK; nRetVal = g_Context.Init(); if (nRetVal != XN_STATUS_OK) { printf("Creating context failed: %s\n", xnGetStatusString(nRetVal)); return false; } nRetVal = g_DepthGenerator.Create(g_Context); if(nRetVal != XN_STATUS_OK) { printf("Creating depth generator failed: %s\n", xnGetStatusString(nRetVal)); return false; } g_DepthGenerator.GetMirrorCap().SetMirror(true); nRetVal = g_UserGenerator.Create(g_Context); if(nRetVal != XN_STATUS_OK) { printf("Creating user generator failed: %s\n", xnGetStatusString(nRetVal)); return false; } XnCallbackHandle hUserCallbacks, hCalibrationStart, hCalibrationComplete, hPoseDetected; if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON)) { printf("Supplied user generator doesn't support skeleton\n"); return false; } g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks); g_UserGenerator.GetSkeletonCap().RegisterToCalibrationStart(UserCalibration_CalibrationStart, NULL, hCalibrationStart); g_UserGenerator.GetSkeletonCap().RegisterToCalibrationComplete(UserCalibration_CalibrationComplete, NULL, hCalibrationComplete); if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION)) { printf("Pose required, but not supported\n"); return false; } g_UserGenerator.GetPoseDetectionCap().RegisterToPoseDetected(UserPose_PoseDetected, NULL, hPoseDetected); g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL); g_Context.StartGeneratingAll(); return true; }
void xn_init(const char *filename) { xn_call_and_check(gContext.Init(), "init context"); xn_call_and_check(gContext.OpenFileRecording(filename), "set global mirror mode."); gContext.FindExistingNode(XN_NODE_TYPE_PLAYER, gPlayer); gPlayer.SetRepeat(false); XnStatus rc = XN_STATUS_OK; xn::ImageGenerator image; rc = gContext.FindExistingNode(XN_NODE_TYPE_IMAGE, image); gImageDrawer = (rc == XN_STATUS_OK)? new ImageDrawer(image) : NULL; xn::DepthGenerator depth; rc = gContext.FindExistingNode(XN_NODE_TYPE_DEPTH, depth); gDepthDrawer = (rc == XN_STATUS_OK)? new DepthDrawer(depth) : NULL; setMainWindow(IMAGE_WINDOW); }
void Context_Init_wrapped(xn::Context& self) { #ifdef _DEBUG PyCout << "Initializing OpenNI.." << std::endl; #endif XnStatus returnCode; returnCode = self.Init(); #ifdef _DEBUG if (returnCode == XN_STATUS_OK) PyCout << "OpenNI was initialized successfully" << std::endl; else { PyCout << "OpenNI failed to initialize: " << xnGetStatusName(returnCode) << std::endl; } #endif check(returnCode); }
void InitialKinect() { //1. Initial Context mContext.Init(); // 2. Set Map_mode (建構函數已定義) mapMode.nXRes = 640; mapMode.nYRes = 480; mapMode.nFPS = 30; // 3.a Create Depth_Generator mDepthGenerator.Create( mContext ); mDepthGenerator.SetMapOutputMode( mapMode ); // 3.b Create Image_Generator mImageGenerator.Create( mContext ); mImageGenerator.SetMapOutputMode( mapMode ); // 4. Correct view port mDepthGenerator.GetAlternativeViewPointCap().SetViewPoint( mImageGenerator ); std::cout << "正確:KinectSensor Intial Correct!" << std::endl; }
void initFromContextFile() { /* Initialize Context from XML file + lookup generators of the needed type: - depth (for depth images) - image (for RGB images) - skeleton (for Skeleton position and tracking) Errors in the XML file, lack thereof or lack of generators of the mentioned types will stop the program altogether. */ printf("xml file is: %s\n", getKinectXMLConfig()); XnStatus nRetVal = XN_STATUS_OK; xn::EnumerationErrors errors; if (g_useSpecifiedDevice) { xnLogInitFromXmlFile(getKinectXMLConfig()); nRetVal = g_Context.Init(); if (nRetVal != XN_STATUS_OK) { printf("Open failed: %s\n", xnGetStatusString(nRetVal)); exit(1); } // find devices xn::NodeInfoList list; nRetVal = g_Context.EnumerateProductionTrees(XN_NODE_TYPE_DEVICE, NULL, list, &errors); if (nRetVal != XN_STATUS_OK) { printf("Enumerate devices failed: %s\n", xnGetStatusString(nRetVal)); exit(1); } for (xn::NodeInfoList::Iterator it = list.Begin(); it != list.End(); ++it) { xn::NodeInfo deviceNodeInfo = *it; xn::Device deviceNode; deviceNodeInfo.GetInstance(deviceNode); XnBool bExists = deviceNode.IsValid(); if (!bExists) { g_Context.CreateProductionTree(deviceNodeInfo, deviceNode); // this might fail. } if (deviceNode.IsValid() && deviceNode.IsCapabilitySupported(XN_CAPABILITY_DEVICE_IDENTIFICATION)) { deviceNodeInfo.GetAdditionalData(); const XnChar* ci = deviceNodeInfo.GetCreationInfo(); if (deviceMatches(std::string(ci))) { // now run the rest of the XML nRetVal = g_Context.RunXmlScriptFromFile(getKinectXMLConfig(), g_scriptNode, &errors); if (nRetVal != XN_STATUS_OK) { printf("run xml script from file failed: %s\n", xnGetStatusString(nRetVal)); exit(1); } } else { // release the device if we created it if (!bExists && deviceNode.IsValid()) { deviceNode.Release(); } } } } } else { nRetVal = g_Context.InitFromXmlFile(getKinectXMLConfig(), g_scriptNode, &errors); if (nRetVal == XN_STATUS_NO_NODE_PRESENT) { XnChar strError[1024]; errors.ToString(strError, 1024); printf("%s\n", strError); exit(1); } else if (nRetVal != XN_STATUS_OK) { printf("Open failed: %s\n", xnGetStatusString(nRetVal)); exit(1); } } if (g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator) != XN_STATUS_OK) { printf("XML file should contain a depth generator\n"); exit(1); } if (g_Context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_ImageGenerator) != XN_STATUS_OK) { printf("XML file should contain an image generator\n"); exit(1); } if (g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator) != XN_STATUS_OK) { printf("XML file should contain an user generator\n"); exit(1); } if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON)) { printf("Supplied user generator doesn't support skeleton\n"); exit(1); } }
int initialize(){ //std::cout << "initializing kinect... " << endl; // Initialize context object g_RetVal = g_Context.Init(); //g_RetVal = g_DepthGenerator.Create(g_Context); //if (g_RetVal != XN_STATUS_OK) // printf("Failed creating DEPTH generator %s\n", xnGetStatusString(g_RetVal)); //XnMapOutputMode outputMode; //outputMode.nXRes = g_im_w; //outputMode.nYRes = g_im_h; // outputMode.nFPS = g_fps; // g_RetVal = g_DepthGenerator.SetMapOutputMode(outputMode); //if (g_RetVal != XN_STATUS_OK) // printf("Failed setting the DEPTH output mode %s\n", xnGetStatusString(g_RetVal)); //g_RetVal = g_Context.StartGeneratingAll(); //if (g_RetVal != XN_STATUS_OK) // printf("Failed starting generating all %s\n", xnGetStatusString(g_RetVal)); // get the focal length in mm (ZPS = zero plane distance) //g_DepthGenerator.GetIntProperty ("ZPD", g_focal_length); // get the pixel size in mm ("ZPPS" = pixel size at zero plane) //g_DepthGenerator.GetRealProperty ("ZPPS", g_pixel_size); //g_pixel_size *= 2.f; g_im3D.create(g_im_h,g_im_w,CV_32FC3); printf("size of g_pixel_size: %d\n", sizeof(g_pixel_size)); ///for sockets // Initialize Winsock if(1) { iResult = WSAStartup(MAKEWORD(2,2), &wsaData); if (iResult != 0) { printf("WSAStartup failed with error: %d\n", iResult); return 1; } printf("WSAStartup() success\n"); ZeroMemory(&hints, sizeof(hints)); hints.ai_family = AF_INET; hints.ai_socktype = SOCK_STREAM; hints.ai_protocol = IPPROTO_TCP; hints.ai_flags = AI_PASSIVE; // Resolve the server address and port iResult = getaddrinfo(NULL, DEFAULT_PORT, &hints, &result); if ( iResult != 0 ) { printf("getaddrinfo failed with error: %d\n", iResult); WSACleanup(); return 1; } printf("getaddrinfo success\n"); // Create a SOCKET for connecting to server ListenSocket = socket(result->ai_family, result->ai_socktype, result->ai_protocol); if (ListenSocket == INVALID_SOCKET) { printf("socket failed with error: %ld\n", WSAGetLastError()); freeaddrinfo(result); WSACleanup(); return 1; } printf("socket create success\n"); // Setup the TCP listening socket iResult = bind( ListenSocket, result->ai_addr, (int)result->ai_addrlen); if (iResult == SOCKET_ERROR) { printf("bind failed with error: %d\n", WSAGetLastError()); freeaddrinfo(result); closesocket(ListenSocket); WSACleanup(); return 1; } printf("bind success\n"); freeaddrinfo(result); iResult = listen(ListenSocket, SOMAXCONN); if (iResult == SOCKET_ERROR) { printf("listen failed with error: %d\n", WSAGetLastError()); closesocket(ListenSocket); WSACleanup(); return 1; } printf("listen success\n"); // Accept a client socket ClientSocket = accept(ListenSocket, NULL, NULL); if (ClientSocket == INVALID_SOCKET) { printf("accept failed with error: %d\n", WSAGetLastError()); closesocket(ListenSocket); WSACleanup(); return 1; } printf("accept() success\n"); // No longer need server socket closesocket(ListenSocket); printf("listening socket closed - no longer needed\n"); //receive size of the image iResult = recv(ClientSocket, (char*)&recvImageSize, sizeof(recvImageSize), 0); if (iResult > 0) { printf("Bytes received: %d\n", iResult); //int imgSize = ntohl(recvImageSize); printf("Size of image: %d\n", recvImageSize); } else if (iResult == 0) printf("Connection closing...\n"); else { printf("recv failed with error: %d\n", WSAGetLastError()); closesocket(ClientSocket); WSACleanup(); return 1; } //receive the focal length in mm iResult = recv(ClientSocket, (char*)&g_focal_length, sizeof(g_focal_length), 0); if (iResult > 0) { printf("Bytes received: %d\n", iResult); //int imgSize = ntohl(g_focal_length); printf("focal length in mm: %d\n", g_focal_length); } else if (iResult == 0) printf("Connection closing...\n"); else { printf("recv failed with error: %d\n", WSAGetLastError()); closesocket(ClientSocket); WSACleanup(); return 1; } //receive the pixel size in mm //iResult = recv(ClientSocket, (char*)&g_pixel_size, sizeof(g_pixel_size), 0); iResult = recv(ClientSocket, (char*)&g_pixel_size_str, sizeof(g_pixel_size_str), 0); if (iResult > 0) { printf("Bytes received: %d\n", iResult); //float pixelSize = ntohl(g_pixel_size); printf("pixel size in mm(string): %s\n", g_pixel_size_str); g_pixel_size = atof(g_pixel_size_str); printf("pixel size in mm(double): %f\n", g_pixel_size); } else if (iResult == 0) printf("Connection closing...\n"); else { printf("recv failed with error: %d\n", WSAGetLastError()); closesocket(ClientSocket); WSACleanup(); return 1; } } ///for sockets - end }
int main(int argc, char **argv) { nRetVal = XN_STATUS_OK; /* Context initialisieren (Kameradaten) */ nRetVal = context.Init(); checkError("Fehler beim Initialisieren des Context", nRetVal)?0:exit(-1); /* Tiefengenerator erstellen */ nRetVal = depth.Create(context); checkError("Fehler beim Erstellen des Tiefengenerators", nRetVal)?0:exit(-1); /* Tiefengenerator einstellen */ XnMapOutputMode outputModeDepth; outputModeDepth.nXRes = 640; outputModeDepth.nYRes = 480; outputModeDepth.nFPS = 30; nRetVal = depth.SetMapOutputMode(outputModeDepth); checkError("Fehler beim Konfigurieren des Tiefengenerators", nRetVal)?0:exit(-1); /* Imagegenerator erstellen */ nRetVal = image.Create(context); checkError("Fehler beim Erstellen des Bildgenerators", nRetVal)?0:exit(-1); /* Imagegenerator einstellen */ XnMapOutputMode outputModeImage; outputModeImage.nXRes = 640; outputModeImage.nYRes = 480; outputModeImage.nFPS = 30; nRetVal = image.SetMapOutputMode(outputModeImage); checkError("Fehler beim Konfigurieren des Bildgenerators", nRetVal)?0:exit(-1); /* Starten der Generatoren - volle Kraft vorraus! */ nRetVal = context.StartGeneratingAll(); checkError("Fehler beim Starten der Generatoren", nRetVal)?0:exit(-1); /* Glut initialisieren */ glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE); glutInitWindowSize(WINDOW_SIZE_X, WINDOW_SIZE_Y); glutInitWindowPosition(300,150); win = glutCreateWindow("kinect-head-tracking"); glClearColor(0, 0, 0, 0.0); //Hintergrundfarbe: Hier ein leichtes Blau glEnable(GL_DEPTH_TEST); //Tiefentest aktivieren glDepthFunc(GL_LEQUAL); // glEnable(GL_CULL_FACE); //Backface Culling aktivieren // glEnable(GL_ALPHA_TEST); // glAlphaFunc(GL_GEQUAL, 1); /* Texturen */ glGenTextures(1, &texture_rgb); glBindTexture(GL_TEXTURE_2D, texture_rgb); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP); glGenTextures(1, &texture_depth); glBindTexture(GL_TEXTURE_2D, texture_depth); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP); glutDisplayFunc(glut_display); glutIdleFunc(glut_idle); glutMouseFunc(glut_mouse); glutMotionFunc(glut_mouse_motion); glutKeyboardFunc(glut_keyboard); glutMainLoop(); return 0; }
int main(int argc, char **argv) { sleep(10); ros::init(argc, argv, "skel_tracker"); ros::NodeHandle nh_; // Read the device_id parameter from the server int device_id; // param_nh.param ("device_id", device_id, argc > 1 ? atoi (argv[1]) : 0); pmap_pub = nh_.advertise<mapping_msgs::PolygonalMap> ("skeletonpmaps", 100); skel_pub = nh_.advertise<body_msgs::Skeletons> ("skeletons", 100); XnStatus nRetVal = XN_STATUS_OK; if (argc > 1) { nRetVal = g_Context.Init(); CHECK_RC(nRetVal, "Init"); nRetVal = g_Context.OpenFileRecording(argv[1]); if (nRetVal != XN_STATUS_OK) { printf("Can't open recording %s: %s\n", argv[1], xnGetStatusString(nRetVal)); return 1; } } else { std::string configFilename = ros::package::getPath("openni_tracker") + "/openni_tracker.xml"; nRetVal = g_Context.InitFromXmlFile(configFilename.c_str()); CHECK_RC(nRetVal, "InitFromXml"); } nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator); CHECK_RC(nRetVal, "Find depth generator"); nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator); if (nRetVal != XN_STATUS_OK) { nRetVal = g_UserGenerator.Create(g_Context); CHECK_RC(nRetVal, "Find user generator"); } XnCallbackHandle hUserCallbacks, hCalibrationCallbacks, hPoseCallbacks; if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON)) { printf("Supplied user generator doesn't support skeleton\n"); return 1; } g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks); g_UserGenerator.GetSkeletonCap().RegisterCalibrationCallbacks(UserCalibration_CalibrationStart, UserCalibration_CalibrationEnd, NULL, hCalibrationCallbacks); if (g_UserGenerator.GetSkeletonCap().NeedPoseForCalibration()) { g_bNeedPose = TRUE; if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION)) { printf("Pose required, but not supported\n"); return 1; } g_UserGenerator.GetPoseDetectionCap().RegisterToPoseCallbacks(UserPose_PoseDetected, NULL, NULL, hPoseCallbacks); g_UserGenerator.GetSkeletonCap().GetCalibrationPose(g_strPose); } g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL); nRetVal = g_Context.StartGeneratingAll(); CHECK_RC(nRetVal, "StartGenerating"); glInit(&argc, argv); glutMainLoop(); }
//extern "C" void baz(/*type* var*/){ // var->foo(); //} extern "C" void init(){ printf("C init()\n"); CHECK(context.Init()); }
int main(int argc, char **argv) { XnStatus nRetVal = XN_STATUS_OK; if (argc > 1) { nRetVal = g_Context.Init(); CHECK_RC(nRetVal, "Init"); nRetVal = g_Context.OpenFileRecording(argv[1], g_Player); if (nRetVal != XN_STATUS_OK) { printf("Can't open recording %s: %s\n", argv[1], xnGetStatusString(nRetVal)); return 1; } } else { xn::EnumerationErrors errors; nRetVal = g_Context.InitFromXmlFile(SAMPLE_XML_PATH, g_scriptNode, &errors); if (nRetVal == XN_STATUS_NO_NODE_PRESENT) { XnChar strError[1024]; errors.ToString(strError, 1024); printf("%s\n", strError); return (nRetVal); } else if (nRetVal != XN_STATUS_OK) { printf("Open failed: %s\n", xnGetStatusString(nRetVal)); return (nRetVal); } } nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator); if (nRetVal != XN_STATUS_OK) { printf("No depth generator found. Using a default one..."); xn::MockDepthGenerator mockDepth; nRetVal = mockDepth.Create(g_Context); CHECK_RC(nRetVal, "Create mock depth"); // set some defaults XnMapOutputMode defaultMode; defaultMode.nXRes = 320; defaultMode.nYRes = 240; defaultMode.nFPS = 30; nRetVal = mockDepth.SetMapOutputMode(defaultMode); CHECK_RC(nRetVal, "set default mode"); // set FOV XnFieldOfView fov; fov.fHFOV = 1.0225999419141749; fov.fVFOV = 0.79661567681716894; nRetVal = mockDepth.SetGeneralProperty(XN_PROP_FIELD_OF_VIEW, sizeof(fov), &fov); CHECK_RC(nRetVal, "set FOV"); XnUInt32 nDataSize = defaultMode.nXRes * defaultMode.nYRes * sizeof(XnDepthPixel); XnDepthPixel* pData = (XnDepthPixel*)xnOSCallocAligned(nDataSize, 1, XN_DEFAULT_MEM_ALIGN); nRetVal = mockDepth.SetData(1, 0, nDataSize, pData); CHECK_RC(nRetVal, "set empty depth map"); g_DepthGenerator = mockDepth; } nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator); if (nRetVal != XN_STATUS_OK) { nRetVal = g_UserGenerator.Create(g_Context); CHECK_RC(nRetVal, "Find user generator"); } XnCallbackHandle hUserCallbacks, hCalibrationStart, hCalibrationComplete, hPoseDetected, hCalibrationInProgress, hPoseInProgress; if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON)) { printf("Supplied user generator doesn't support skeleton\n"); return 1; } nRetVal = g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks); CHECK_RC(nRetVal, "Register to user callbacks"); nRetVal = g_UserGenerator.GetSkeletonCap().RegisterToCalibrationStart(UserCalibration_CalibrationStart, NULL, hCalibrationStart); CHECK_RC(nRetVal, "Register to calibration start"); nRetVal = g_UserGenerator.GetSkeletonCap().RegisterToCalibrationComplete(UserCalibration_CalibrationComplete, NULL, hCalibrationComplete); CHECK_RC(nRetVal, "Register to calibration complete"); if (g_UserGenerator.GetSkeletonCap().NeedPoseForCalibration()) { g_bNeedPose = TRUE; if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION)) { printf("Pose required, but not supported\n"); return 1; } nRetVal = g_UserGenerator.GetPoseDetectionCap().RegisterToPoseDetected(UserPose_PoseDetected, NULL, hPoseDetected); CHECK_RC(nRetVal, "Register to Pose Detected"); g_UserGenerator.GetSkeletonCap().GetCalibrationPose(g_strPose); nRetVal = g_UserGenerator.GetPoseDetectionCap().RegisterToPoseInProgress(MyPoseInProgress, NULL, hPoseInProgress); CHECK_RC(nRetVal, "Register to pose in progress"); } g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL); nRetVal = g_UserGenerator.GetSkeletonCap().RegisterToCalibrationInProgress(MyCalibrationInProgress, NULL, hCalibrationInProgress); CHECK_RC(nRetVal, "Register to calibration in progress"); nRetVal = g_Context.StartGeneratingAll(); CHECK_RC(nRetVal, "StartGenerating"); XnStatus rc; xn::EnumerationErrors errors; // Create a context with default settings rc = g_context.InitFromXmlFile(SAMPLE_XML_PATH, g_scriptNode, &errors); if (rc == XN_STATUS_NO_NODE_PRESENT) { XnChar strError[1024]; errors.ToString(strError, 1024); printf("%s\n", strError); return (rc); } else if (rc != XN_STATUS_OK) { printf("Open failed: %s\n", xnGetStatusString(rc)); return (rc); } SimpleViewer& viewer = HandViewer::CreateInstance(g_context); rc = viewer.Init(argc, argv); if (rc != XN_STATUS_OK) { printf("Viewer init failed: %s\n", xnGetStatusString(rc)); return 1; } rc = viewer.Run(); if (rc != XN_STATUS_OK) { printf("Viewer run failed: %s\n", xnGetStatusString(rc)); return 1; } #ifndef USE_GLES glInit(&argc, argv); glutMainLoop(); #else if (!opengles_init(GL_WIN_SIZE_X, GL_WIN_SIZE_Y, &display, &surface, &context)) { printf("Error initializing opengles\n"); CleanupExit(); } glDisable(GL_DEPTH_TEST); glEnable(GL_TEXTURE_2D); glEnableClientState(GL_VERTEX_ARRAY); glDisableClientState(GL_COLOR_ARRAY); while (!g_bQuit) { glutDisplay(); eglSwapBuffers(display, surface); } opengles_shutdown(display, surface, context); CleanupExit(); #endif }
bool DataCapture::initialise() { context_.Shutdown(); XnStatus rc = context_.Init(); if( rc != XN_STATUS_OK ) { std::cout << "Init: " << xnGetStatusString(rc) << std::endl; return false; } rc = depthGen_.Create(context_); if( rc != XN_STATUS_OK ) { std::cout << "depthGen.Create: " << xnGetStatusString(rc) << std::endl; return false; } rc = imageGen_.Create(context_); if( rc != XN_STATUS_OK ) { std::cout << "imageGen.Create: " << xnGetStatusString(rc) << std::endl; return false; } rc = imageGen_.SetPixelFormat(XN_PIXEL_FORMAT_RGB24); if( rc != XN_STATUS_OK ) { std::cout << "SetPixelFormat: " << xnGetStatusString(rc) << std::endl; return false; } XnMapOutputMode imgMode; imgMode.nXRes = 640; // XN_VGA_X_RES imgMode.nYRes = 480; // XN_VGA_Y_RES imgMode.nFPS = 30; rc = imageGen_.SetMapOutputMode(imgMode); if( rc != XN_STATUS_OK ) { std::cout << "image SetMapOutputMode: " << xnGetStatusString(rc) << std::endl; return false; } rc = depthGen_.SetMapOutputMode(imgMode); if( rc != XN_STATUS_OK ) { std::cout << "depth SetMapOutputMode: " << xnGetStatusString(rc) << std::endl; return false; } depthGen_.GetMetaData(depthMd_); std::cout << "Depth offset " << depthMd_.XOffset() << " " << depthMd_.YOffset() << std::endl; // set the depth image viewpoint depthGen_.GetAlternativeViewPointCap().SetViewPoint(imageGen_); // read off the depth camera field of view. This is the FOV corresponding to // the IR camera viewpoint, regardless of the alternative viewpoint settings. XnFieldOfView fov; rc = depthGen_.GetFieldOfView(fov); std::cout << "Fov: " << fov.fHFOV << " " << fov.fVFOV << std::endl; pDepthData_ = new char [640 * 480]; pRgbData_ = new char [640 * 480 * 3]; return true; }
int main(int argc, char **argv) { XnStatus nRetVal = XN_STATUS_OK; if (argc > 1) { nRetVal = g_Context.Init(); CHECK_RC(nRetVal, "Init"); nRetVal = g_Context.OpenFileRecording(argv[1], g_Player); if (nRetVal != XN_STATUS_OK) { printf("Can't open recording %s: %s\n", argv[1], xnGetStatusString(nRetVal)); return 1; } } else { xn::EnumerationErrors errors; nRetVal = g_Context.InitFromXmlFile(SAMPLE_XML_PATH, g_scriptNode, &errors); if (nRetVal == XN_STATUS_NO_NODE_PRESENT) { XnChar strError[1024]; errors.ToString(strError, 1024); printf("%s\n", strError); return (nRetVal); } else if (nRetVal != XN_STATUS_OK) { printf("Open failed: %s\n", xnGetStatusString(nRetVal)); return (nRetVal); } } nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator); CHECK_RC(nRetVal, "Find depth generator"); nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator); if (nRetVal != XN_STATUS_OK) { nRetVal = g_UserGenerator.Create(g_Context); CHECK_RC(nRetVal, "Find user generator"); } XnCallbackHandle hUserCallbacks, hCalibrationStart, hCalibrationComplete, hPoseDetected, hCalibrationInProgress, hPoseInProgress; if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON)) { printf("Supplied user generator doesn't support skeleton\n"); return 1; } nRetVal = g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks); CHECK_RC(nRetVal, "Register to user callbacks"); nRetVal = g_UserGenerator.GetSkeletonCap().RegisterToCalibrationStart(UserCalibration_CalibrationStart, NULL, hCalibrationStart); CHECK_RC(nRetVal, "Register to calibration start"); nRetVal = g_UserGenerator.GetSkeletonCap().RegisterToCalibrationComplete(UserCalibration_CalibrationComplete, NULL, hCalibrationComplete); CHECK_RC(nRetVal, "Register to calibration complete"); if (g_UserGenerator.GetSkeletonCap().NeedPoseForCalibration()) { g_bNeedPose = TRUE; if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION)) { printf("Pose required, but not supported\n"); return 1; } nRetVal = g_UserGenerator.GetPoseDetectionCap().RegisterToPoseDetected(UserPose_PoseDetected, NULL, hPoseDetected); CHECK_RC(nRetVal, "Register to Pose Detected"); g_UserGenerator.GetSkeletonCap().GetCalibrationPose(g_strPose); } g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL); nRetVal = g_UserGenerator.GetSkeletonCap().RegisterToCalibrationInProgress(MyCalibrationInProgress, NULL, hCalibrationInProgress); CHECK_RC(nRetVal, "Register to calibration in progress"); nRetVal = g_UserGenerator.GetPoseDetectionCap().RegisterToPoseInProgress(MyPoseInProgress, NULL, hPoseInProgress); CHECK_RC(nRetVal, "Register to pose in progress"); nRetVal = g_Context.StartGeneratingAll(); CHECK_RC(nRetVal, "StartGenerating"); #ifndef USE_GLES glInit(&argc, argv); glutMainLoop(); #else if (!opengles_init(GL_WIN_SIZE_X, GL_WIN_SIZE_Y, &display, &surface, &context)) { printf("Error initializing opengles\n"); CleanupExit(); } glDisable(GL_DEPTH_TEST); glEnable(GL_TEXTURE_2D); glEnableClientState(GL_VERTEX_ARRAY); glDisableClientState(GL_COLOR_ARRAY); while (!g_bQuit) { glutDisplay(); eglSwapBuffers(display, surface); } opengles_shutdown(display, surface, context); CleanupExit(); #endif }
int main(int argc, char **argv) { ros::init(argc, argv, "openni_tracker1"); ros::NodeHandle nh; /* string configFilename = ros::package::getPath("openni_tracker1") + "/openni_tracker1.xml"; XnStatus nRetVal = g_Context.InitFromXmlFile(configFilename.c_str()); CHECK_RC(nRetVal, "InitFromXml"); */ XnStatus nRetVal = g_Context.Init(); CHECK_RC(nRetVal, "Init"); nRetVal = g_Context.OpenFileRecording("/home/latifanjum/Downloads/OpenNI-master/Platform/Linux/Bin/x86-Release/Dataset/Wave2/wave25.oni", g_Player); g_Player.SetRepeat( false ); if (nRetVal != XN_STATUS_OK) { printf("Can't open recording %s\n", xnGetStatusString(nRetVal)); return 1; } nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator); CHECK_RC(nRetVal, "Find depth generator"); nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator); if (nRetVal != XN_STATUS_OK) { nRetVal = g_UserGenerator.Create(g_Context); CHECK_RC(nRetVal, "Find user generator"); } if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON)) { ROS_INFO("Supplied user generator doesn't support skeleton"); return 1; } XnCallbackHandle hUserCallbacks; g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks); XnCallbackHandle hCalibrationCallbacks; g_UserGenerator.GetSkeletonCap().RegisterCalibrationCallbacks(UserCalibration_CalibrationStart, UserCalibration_CalibrationEnd, NULL, hCalibrationCallbacks); if (g_UserGenerator.GetSkeletonCap().NeedPoseForCalibration()) { g_bNeedPose = TRUE; if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION)) { ROS_INFO("Pose required, but not supported"); return 1; } XnCallbackHandle hPoseCallbacks; g_UserGenerator.GetPoseDetectionCap().RegisterToPoseCallbacks(UserPose_PoseDetected, NULL, NULL, hPoseCallbacks); g_UserGenerator.GetSkeletonCap().GetCalibrationPose(g_strPose); } g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL); nRetVal = g_Context.StartGeneratingAll(); CHECK_RC(nRetVal, "StartGenerating"); ros::Rate r(30); ros::NodeHandle pnh("~"); string frame_id("openni_depth_frame"); pnh.getParam("camera_frame_id", frame_id); while (ros::ok()) { g_Context.WaitAndUpdateAll(); publishTransforms(frame_id); r.sleep(); } g_Context.Shutdown(); return 0; }
bool OpenNIVideo::init() { //open the video //if you are using a device, you can open the device //if you are using video streaming, you can initialize the connection //if you are using video files, you can read the configuration, cache the data, etc. xn::EnumerationErrors errors; int resolutionX = 640; int resolutionY = 480; unsigned int FPS = 30; XnStatus nRetVal = XN_STATUS_OK; nRetVal = context.Init(); CHECK_RC(nRetVal, "context global init"); //xn::NodeInfoList list; //nRetVal = context.EnumerateProductionTrees(XN_NODE_TYPE_DEVICE, NULL, list, &errors); //CHECK_RC(nRetVal, "enumerate production tree"); // HandsGenerator hands; //UserGenerator user; //GestureGenerator gesture; //SceneAnalyzer scene; nRetVal = depth_generator.Create(context); CHECK_RC(nRetVal, "creating depth generator"); nRetVal = image_generator.Create(context); CHECK_RC(nRetVal, "creating image generator"); if(depth_generator.IsCapabilitySupported(XN_CAPABILITY_ALTERNATIVE_VIEW_POINT)) { nRetVal = depth_generator.GetAlternativeViewPointCap().SetViewPoint(image_generator); CHECK_RC(nRetVal, "creating registered image/depth generator"); } else { printf("WARNING: XN_CAPABILITY_ALTERNATIVE_VIEW_POINT not supported"); } if (depth_generator.IsCapabilitySupported(XN_CAPABILITY_FRAME_SYNC)) { if( depth_generator.GetFrameSyncCap().CanFrameSyncWith(image_generator)) { //nRetVal=depth.GetFrameSyncCap().FrameSyncWith(image); //CHECK_RC(nRetVal, "creating frame sync image/depth generator"); } } else { printf("WARNING: XN_CAPABILITY_FRAME_SYNC not supported"); } XnMapOutputMode mode = {resolutionX,resolutionY,FPS}; nRetVal = depth_generator.SetMapOutputMode(mode); CHECK_RC(nRetVal, "set output mode"); //NOT NEEDED IF SYNCHRO nRetVal = image_generator.SetMapOutputMode(mode); CHECK_RC(nRetVal, "set output mode"); _depthBufferShort=new unsigned short[resolutionX*resolutionY]; _depthBufferByte=new unsigned char[resolutionX*resolutionY]; //we need to create one video stream _videoStreamList.push_back(new osgART::VideoStream()); _videoStreamList[0]->allocateImage(resolutionX,resolutionY, 1, GL_RGB, GL_UNSIGNED_BYTE); //we need to create one video stream _videoStreamList.push_back(new osgART::VideoStream()); // _videoStreamList[1]->allocateImage(resolutionX,resolutionY, 1, GL_LUMINANCE, GL_FLOAT); _videoStreamList[1]->allocateImage(resolutionX,resolutionY, 1, GL_LUMINANCE, GL_UNSIGNED_BYTE); //_videoStreamList[1]->allocateImage(resolutionX,resolutionY, 1, GL_LUMINANCE, GL_UNSIGNED_SHORT); //_videoStreamList[1]->allocateImage(w, h, 1, GL_DEPTHCOMPONENT16, GL_UNSIGNED_BYTE); if (m_flip_vertical) { _videoStreamList[0]->flipVertical(); _videoStreamList[1]->flipVertical(); } if (m_flip_horizontal) { _videoStreamList[0]->flipHorizontal(); _videoStreamList[1]->flipHorizontal(); } return true; }
int main(int argc, char **argv) { ros::init(argc, argv, "tracker_2"); ros::NodeHandle nh; string configFilename = ros::package::getPath("openni_tracker_multi") + "/openni_tracker.xml"; XnStatus nRetVal = g_Context.InitFromXmlFile(configFilename.c_str()); CHECK_RC(nRetVal, "InitFromXml"); // XnStatus nRetVal = XN_STATUS_OK; //xnLogInitFromXmlFile(csXmlFile); nRetVal = g_Context.Init(); XN_IS_STATUS_OK(nRetVal); // SELECTION OF THE DEVICE xn::EnumerationErrors errors; xn::Device g_Device; // find devices xn::NodeInfoList list; xn::NodeInfoList list_depth; nRetVal = g_Context.EnumerateProductionTrees(XN_NODE_TYPE_DEVICE, NULL, list, &errors); XN_IS_STATUS_OK(nRetVal); printf("The following devices were found:\n"); int i = 1; for (xn::NodeInfoList::Iterator it = list.Begin(); it != list.End(); ++it, ++i) { xn::NodeInfo deviceNodeInfo = *it; xn::Device deviceNode; deviceNodeInfo.GetInstance(deviceNode); XnBool bExists = deviceNode.IsValid(); if (!bExists) { g_Context.CreateProductionTree(deviceNodeInfo, deviceNode); // this might fail. } if (deviceNode.IsValid() && deviceNode.IsCapabilitySupported(XN_CAPABILITY_DEVICE_IDENTIFICATION)) { const XnUInt32 nStringBufferSize = 200; XnChar strDeviceName[nStringBufferSize]; XnChar strSerialNumber[nStringBufferSize]; XnUInt32 nLength = nStringBufferSize; deviceNode.GetIdentificationCap().GetDeviceName(strDeviceName, nLength); nLength = nStringBufferSize; deviceNode.GetIdentificationCap().GetSerialNumber(strSerialNumber, nLength); printf("[%d] %s (%s)\n", i, strDeviceName, strSerialNumber); } else { printf("[%d] %s\n", i, deviceNodeInfo.GetCreationInfo()); } // release the device if we created it if (!bExists && deviceNode.IsValid()) { deviceNode.Release(); } } printf("\n"); printf("Choose device to open (1): "); int chosen=0; if (argc != 2) { printf("Choose device to open (1): "); int nRetval = scanf("%d", &chosen); } else { chosen = atoi(argv[1]); } // create it xn::NodeInfoList::Iterator it = list.Begin(); for (i = 1; i < chosen; ++i) { it++; } xn::NodeInfo deviceNode = *it; nRetVal = g_Context.CreateProductionTree(deviceNode, g_Device); printf("Production tree of the device created.\n"); // SELECTION OF THE DEPTH GENERATOR nRetVal = g_Context.EnumerateProductionTrees(XN_NODE_TYPE_DEPTH, NULL, list_depth, &errors); XN_IS_STATUS_OK(nRetVal); printf("The following devices were found:\n"); int i_depth = 1; for (xn::NodeInfoList::Iterator it_depth = list_depth.Begin(); it_depth != list_depth.End(); ++it_depth, ++i_depth) { xn::NodeInfo depthNodeInfo = *it_depth; xn::Device depthNode; depthNodeInfo.GetInstance(depthNode); XnBool bExists_depth = depthNode.IsValid(); if (!bExists_depth) { g_Context.CreateProductionTree(depthNodeInfo, depthNode); // this might fail. } if (depthNode.IsValid() && depthNode.IsCapabilitySupported(XN_CAPABILITY_DEVICE_IDENTIFICATION)) { const XnUInt32 nStringBufferSize = 200; XnChar strDeviceName[nStringBufferSize]; XnChar strSerialNumber[nStringBufferSize]; XnUInt32 nLength = nStringBufferSize; depthNode.GetIdentificationCap().GetDeviceName(strDeviceName, nLength); nLength = nStringBufferSize; depthNode.GetIdentificationCap().GetSerialNumber(strSerialNumber, nLength); printf("[%d] %s (%s)\n", i, strDeviceName, strSerialNumber); } else { printf("[%d] %s\n", i, depthNodeInfo.GetCreationInfo()); } // release the device if we created it if (!bExists_depth && depthNode.IsValid()) { depthNode.Release(); } } printf("\n"); printf("Choose device to open (1): "); int chosen_depth = 1; /*if (argc != 2) { printf("Choose device to open (1): "); int nRetval_depth = scanf("%d", &chosen_depth); } else { chosen_depth = atoi(argv[1]); }*/ // create it xn::NodeInfoList::Iterator it_depth = list_depth.Begin(); for (i = 1; i < chosen_depth; ++i) { it_depth++; } xn::NodeInfo depthNode = *it_depth; nRetVal = g_Context.CreateProductionTree(depthNode, g_DepthGenerator); printf("Production tree of the DepthGenerator created.\n"); nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator); printf("Production tree of the depth generator created.\n"); XN_IS_STATUS_OK(nRetVal); printf("XN_IS_STATUS_OK(nRetVal).\n"); CHECK_RC(nRetVal, "Find depth generator"); printf("CHECK_RC(nRetVal, Find depth generator);\n"); nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator); printf("User generator found.\n"); if (nRetVal != XN_STATUS_OK) { nRetVal = g_UserGenerator.Create(g_Context); printf("User generator created.\n"); CHECK_RC(nRetVal, "Find user generator"); } if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON)) { printf("Supplied user generator doesn't support skeleton.\n"); ROS_INFO("Supplied user generator doesn't support skeleton"); return 1; } XnCallbackHandle hUserCallbacks; g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks); XnCallbackHandle hCalibrationCallbacks; g_UserGenerator.GetSkeletonCap().RegisterCalibrationCallbacks(UserCalibration_CalibrationStart, UserCalibration_CalibrationEnd, NULL, hCalibrationCallbacks); if (g_UserGenerator.GetSkeletonCap().NeedPoseForCalibration()) { g_bNeedPose = TRUE; if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION)) { ROS_INFO("Pose required, but not supported"); return 1; } XnCallbackHandle hPoseCallbacks; g_UserGenerator.GetPoseDetectionCap().RegisterToPoseCallbacks(UserPose_PoseDetected, NULL, NULL, hPoseCallbacks); g_UserGenerator.GetSkeletonCap().GetCalibrationPose(g_strPose); } g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL); nRetVal = g_Context.StartGeneratingAll(); CHECK_RC(nRetVal, "StartGenerating"); ros::Rate r(30); ros::NodeHandle pnh("~"); string frame_id("/kinect2_depth_frame"); pnh.getParam("camera_frame_id", frame_id); while (ros::ok()) { g_Context.WaitAndUpdateAll(); publishTransforms(frame_id); r.sleep(); } g_Context.Shutdown(); return 0; }
int main() { printf("main() START\n"); signal(SIGINT, onSignalReceived); // hit CTRL-C keys in terminal (2) signal(SIGTERM, onSignalReceived); // hit stop button in eclipse CDT (15) mapMode.nXRes = XN_VGA_X_RES; mapMode.nYRes = XN_VGA_Y_RES; mapMode.nFPS = 30; CHECK_RC(ctx.Init(), "init"); CHECK_RC(depthGenerator.Create(ctx), "create depth"); depthGenerator.SetMapOutputMode(mapMode); XnStatus userAvailable = ctx.FindExistingNode(XN_NODE_TYPE_USER, userGenerator); if(userAvailable != XN_STATUS_OK) { CHECK_RC(userGenerator.Create(ctx), "create user"); } XnCallbackHandle hUserCallbacks, hCalibrationCallbacks, hPoseCallbacks; xn::SkeletonCapability skel = userGenerator.GetSkeletonCap(); CHECK_RC(userGenerator.RegisterUserCallbacks(onUserNew, onUserLost, NULL, hUserCallbacks), "register user"); CHECK_RC(skel.RegisterCalibrationCallbacks(onCalibrationStart, onCalibrationEnd, NULL, hCalibrationCallbacks), "register calib"); CHECK_RC(userGenerator.GetPoseDetectionCap().RegisterToPoseCallbacks(onPoseDetected, NULL, NULL, hPoseCallbacks), "register pose"); XnChar poseName[20] = ""; CHECK_RC(skel.GetCalibrationPose(poseName), "get posename"); printf("poseName: %s\n", poseName); CHECK_RC(skel.SetSkeletonProfile(XN_SKEL_PROFILE_ALL), "set skel profile"); CHECK_RC(skel.SetSmoothing(0.8), "set smoothing"); // xnSetMirror(depth, !mirrorMode); CHECK_RC(ctx.StartGeneratingAll(), "start generating"); printf("waitAnyUpdateAll ...\n"); while(!shouldTerminate) { ctx.WaitAnyUpdateAll(); // depthGenerator.GetMetaData(tempDepthMetaData); const XnUInt16 userCount = userGenerator.GetNumberOfUsers(); // printf("userCount: %i\n", userCount); XnUserID aUsers[userCount]; XnUInt16 nUsers = userCount; userGenerator.GetUsers(aUsers, nUsers); for (int i = 0; i < nUsers; ++i) { XnUserID currentUserId = aUsers[i]; if (skel.IsTracking(aUsers[i])) { XnSkeletonJointPosition joint; skel.GetSkeletonJointPosition(currentUserId, XN_SKEL_HEAD, joint); XnFloat x = joint.position.X; XnFloat y = joint.position.Y; XnFloat z = joint.position.Z; printf("joint position: %.2f x %.2f x %.2f\n", x, y, z); printf("joint.fConfidence: %.2f\n", joint.fConfidence); } } } printf("STOP\n"); CHECK_RC(ctx.StopGeneratingAll(), "stop generating"); ctx.Shutdown(); printf("main() END\n"); }
int main(int argc, char **argv) { XnStatus nRetVal = XN_STATUS_OK; xn::EnumerationErrors errors; if( USE_RECORED_DATA ){ g_Context.Init(); g_Context.OpenFileRecording(RECORD_FILE_PATH); xn::Player player; // Player nodeの取得 nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_PLAYER, player); CHECK_RC(nRetVal, "Find player"); LOG_D("PlaybackSpeed: %d", player.GetPlaybackSpeed()); xn:NodeInfoList nodeList; player.EnumerateNodes(nodeList); for( xn::NodeInfoList::Iterator it = nodeList.Begin(); it != nodeList.End(); ++it){ if( (*it).GetDescription().Type == XN_NODE_TYPE_IMAGE ){ nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_ImageGenerator); CHECK_RC(nRetVal, "Find image node"); LOG_D("%s", "ImageGenerator created."); } else if( (*it).GetDescription().Type == XN_NODE_TYPE_DEPTH ){ nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator); CHECK_RC(nRetVal, "Find depth node"); LOG_D("%s", "DepthGenerator created."); } else{ LOG_D("%s %s %s", ::xnProductionNodeTypeToString((*it).GetDescription().Type ), (*it).GetInstanceName(), (*it).GetDescription().strName); } } } else{ LOG_I("Reading config from: '%s'", CONFIG_XML_PATH); nRetVal = g_Context.InitFromXmlFile(CONFIG_XML_PATH, g_scriptNode, &errors); if (nRetVal == XN_STATUS_NO_NODE_PRESENT){ XnChar strError[1024]; errors.ToString(strError, 1024); LOG_E("%s\n", strError); return (nRetVal); } else if (nRetVal != XN_STATUS_OK){ LOG_E("Open failed: %s", xnGetStatusString(nRetVal)); return (nRetVal); } nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator); CHECK_RC(nRetVal,"No depth"); // ImageGeneratorの作成 nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_ImageGenerator); CHECK_RC(nRetVal, "Find image generator"); } // UserGeneratorの取得 nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator); if(nRetVal!=XN_STATUS_OK){ nRetVal = g_UserGenerator.Create(g_Context); CHECK_RC(nRetVal, "Create user generator"); } XnCallbackHandle hUserCallbacks, hCalibrationStart, hCalibrationComplete, hPoseDetected; if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON)){ LOG_E("%s", "Supplied user generator doesn't support skeleton"); return 1; } nRetVal = g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks); CHECK_RC(nRetVal, "Register to user callbacks"); g_SkeletonCap = g_UserGenerator.GetSkeletonCap(); nRetVal = g_SkeletonCap.RegisterToCalibrationStart(UserCalibration_CalibrationStart, NULL, hCalibrationStart); CHECK_RC(nRetVal, "Register to calibration start"); nRetVal = g_SkeletonCap.RegisterToCalibrationComplete(UserCalibration_CalibrationComplete, NULL, hCalibrationComplete); CHECK_RC(nRetVal, "Register to calibration complete"); if (g_SkeletonCap.NeedPoseForCalibration()){ g_bNeedPose = TRUE; if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION)){ LOG_E("%s", "Pose required, but not supported"); return 1; } nRetVal = g_UserGenerator.GetPoseDetectionCap().RegisterToPoseDetected(UserPose_PoseDetected, NULL, hPoseDetected); CHECK_RC(nRetVal, "Register to Pose Detected"); g_SkeletonCap.GetCalibrationPose(g_strPose); } g_SkeletonCap.SetSkeletonProfile(XN_SKEL_PROFILE_ALL); nRetVal = g_Context.StartGeneratingAll(); CHECK_RC(nRetVal, "StartGenerating"); // 表示用の画像データの作成 XnMapOutputMode mapMode; g_ImageGenerator.GetMapOutputMode(mapMode); g_rgbImage = cvCreateImage(cvSize(mapMode.nXRes, mapMode.nYRes), IPL_DEPTH_8U, 3); LOG_I("%s", "Starting to run"); if(g_bNeedPose){ LOG_I("%s", "Assume calibration pose"); } xn::Recorder recorder; if( DO_RECORED && !USE_RECORED_DATA ){ // レコーダーの作成 LOG_I("%s", "Setup Recorder"); nRetVal = recorder.Create(g_Context); CHECK_RC(nRetVal, "Create recorder"); // 保存設定 nRetVal = recorder.SetDestination(XN_RECORD_MEDIUM_FILE, RECORD_FILE_PATH); CHECK_RC(nRetVal, "Set recorder destination file"); // 深度、ビデオカメラ入力を保存対象として記録開始 nRetVal = recorder.AddNodeToRecording(g_DepthGenerator, XN_CODEC_NULL); CHECK_RC(nRetVal, "Add depth node to recording"); nRetVal = recorder.AddNodeToRecording(g_ImageGenerator, XN_CODEC_NULL); CHECK_RC(nRetVal, "Add image node to recording"); LOG_I("%s", "Recorder setup done."); } while (!xnOSWasKeyboardHit()) { g_Context.WaitOneUpdateAll(g_UserGenerator); if( DO_RECORED && !USE_RECORED_DATA ){ nRetVal = recorder.Record(); CHECK_RC(nRetVal, "Record"); } // ビデオカメラ画像の生データを取得 xn::ImageMetaData imageMetaData; g_ImageGenerator.GetMetaData(imageMetaData); // メモリコピー xnOSMemCopy(g_rgbImage->imageData, imageMetaData.RGB24Data(), g_rgbImage->imageSize); // BGRからRGBに変換して表示 cvCvtColor(g_rgbImage, g_rgbImage, CV_RGB2BGR); // UserGeneratorからユーザー識別ピクセルを取得 xn::SceneMetaData sceneMetaData; g_UserGenerator.GetUserPixels(0, sceneMetaData); XnUserID allUsers[MAX_NUM_USERS]; XnUInt16 nUsers = MAX_NUM_USERS; g_UserGenerator.GetUsers(allUsers, nUsers); for (int i = 0; i < nUsers; i++) { // キャリブレーションに成功しているかどうか if (g_SkeletonCap.IsTracking(allUsers[i])) { // スケルトンを描画 DrawSkelton(allUsers[i], i); } } // 表示 cvShowImage("User View", g_rgbImage); // ESCもしくはqが押されたら終了させる if (cvWaitKey(10) == 27) { break; } } if( !USE_RECORED_DATA ){ g_scriptNode.Release(); } g_DepthGenerator.Release(); g_UserGenerator.Release(); g_Context.Release(); if (g_rgbImage != NULL) { cvReleaseImage(&g_rgbImage); } g_Context.Shutdown(); }
bool SetupPrimesense(void) { XnStatus nRetVal = XN_STATUS_OK; if ((nRetVal = g_context.Init()) != XN_STATUS_OK) { fprintf(stderr,"Could not init OpenNI context: %s\n", xnGetStatusString(nRetVal)); return FALSE; } if ((nRetVal = g_depth.Create(g_context))!= XN_STATUS_OK) { fprintf(stderr,"Could not create depth generator: %s\n", xnGetStatusString(nRetVal)); g_haveDepth = FALSE; } else if ((nRetVal = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth)) != XN_STATUS_OK) { fprintf(stderr,"Could not find depth sensor: %s\n", xnGetStatusString(nRetVal)); g_haveDepth = FALSE; } if ((nRetVal = g_image.Create(g_context))!= XN_STATUS_OK) { fprintf(stderr,"Could not create image generator: %s\n", xnGetStatusString(nRetVal)); g_haveImage = FALSE; } else if ((nRetVal = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image)) != XN_STATUS_OK) { fprintf(stderr,"Could not find image sensor: %s\n", xnGetStatusString(nRetVal)); g_haveImage = FALSE; } if (!g_haveImage && !g_haveDepth) { fprintf(stderr,"Could not find either depth or image sources.\n"); return FALSE; } XnMapOutputMode mapMode; mapMode.nXRes = XN_VGA_X_RES; mapMode.nYRes = XN_VGA_Y_RES; mapMode.nFPS = 30; if (g_haveDepth && ( (nRetVal = g_depth.SetMapOutputMode(mapMode)) != XN_STATUS_OK)) { fprintf(stderr,"Could not set depth mode: %s\n", xnGetStatusString(nRetVal)); return FALSE; } if (g_haveDepth) { g_depth.GetMetaData(g_depthMD); g_depthWidth = g_depthMD.FullXRes(); g_depthHeight = g_depthMD.FullYRes(); } if (g_haveImage && (nRetVal = g_image.SetMapOutputMode(mapMode)) != XN_STATUS_OK) { fprintf(stderr,"Could not set image: %s\n", xnGetStatusString(nRetVal)); return FALSE; } if ((nRetVal = g_context.StartGeneratingAll()) != XN_STATUS_OK) { fprintf(stderr,"Could not start: %s\n", xnGetStatusString(nRetVal)); return FALSE; } return TRUE; }
int main(int argc, char **argv) { XnStatus nRetVal = XN_STATUS_OK; memset(COM_tracker,0,15*100*sizeof(float)); if (argc > 1) { nRetVal = g_Context.Init(); CHECK_RC(nRetVal, "Init"); nRetVal = g_Context.OpenFileRecording(argv[1], g_Player); if (nRetVal != XN_STATUS_OK) { printf("Can't open recording %s: %s\n", argv[1], xnGetStatusString(nRetVal)); return 1; } } else { xn::EnumerationErrors errors; nRetVal = g_Context.InitFromXmlFile(SAMPLE_XML_PATH, g_scriptNode, &errors); if (nRetVal == XN_STATUS_NO_NODE_PRESENT) { XnChar strError[1024]; errors.ToString(strError, 1024); printf("%s\n", strError); return (nRetVal); } else if (nRetVal != XN_STATUS_OK) { printf("Open failed: %s\n", xnGetStatusString(nRetVal)); return (nRetVal); } } nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator); if (nRetVal != XN_STATUS_OK) { printf("No depth generator found. Using a default one..."); xn::MockDepthGenerator mockDepth; nRetVal = mockDepth.Create(g_Context); CHECK_RC(nRetVal, "Create mock depth"); // set some defaults XnMapOutputMode defaultMode; defaultMode.nXRes = 320; defaultMode.nYRes = 240; defaultMode.nFPS = 30; nRetVal = mockDepth.SetMapOutputMode(defaultMode); CHECK_RC(nRetVal, "set default mode"); // set FOV XnFieldOfView fov; fov.fHFOV = 1.0225999419141749; fov.fVFOV = 0.79661567681716894; nRetVal = mockDepth.SetGeneralProperty(XN_PROP_FIELD_OF_VIEW, sizeof(fov), &fov); CHECK_RC(nRetVal, "set FOV"); XnUInt32 nDataSize = defaultMode.nXRes * defaultMode.nYRes * sizeof(XnDepthPixel); XnDepthPixel* pData = (XnDepthPixel*)xnOSCallocAligned(nDataSize, 1, XN_DEFAULT_MEM_ALIGN); nRetVal = mockDepth.SetData(1, 0, nDataSize, pData); CHECK_RC(nRetVal, "set empty depth map"); g_DepthGenerator = mockDepth; } nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_ImageGenerator); if (nRetVal != XN_STATUS_OK) { printf("No image node exists! check your XML."); return 1; } nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator); if (nRetVal != XN_STATUS_OK) { nRetVal = g_UserGenerator.Create(g_Context); CHECK_RC(nRetVal, "Find user generator"); } XnCallbackHandle hUserCallbacks; if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON)) { printf("Supplied user generator doesn't support skeleton\n"); return 1; } nRetVal = g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks); CHECK_RC(nRetVal, "Register to user callbacks"); nRetVal = g_Context.StartGeneratingAll(); CHECK_RC(nRetVal, "StartGenerating"); #ifndef USE_GLES glInit(&argc, argv); glutMainLoop(); #else if (!opengles_init(GL_WIN_SIZE_X, GL_WIN_SIZE_Y, &display, &surface, &context)) { printf("Error initializing opengles\n"); CleanupExit(); } glDisable(GL_DEPTH_TEST); glEnable(GL_TEXTURE_2D); glEnableClientState(GL_VERTEX_ARRAY); glDisableClientState(GL_COLOR_ARRAY); while (!g_bQuit) { glutDisplay(); eglSwapBuffers(display, surface); } opengles_shutdown(display, surface, context); CleanupExit(); #endif }
bool initialize(){ std::cout << "initializing kinect... " << endl; #ifdef USE_MS_SKD int iSensorCount = 0; if ( NuiGetSensorCount(&iSensorCount) < 0 ) return false; // Look at each Kinect sensor for (int i = 0; i < iSensorCount; ++i) { // Create the sensor so we can check status, if we can't create it, move on to the next if ( NuiCreateSensorByIndex(i, &g_pNuiSensor) < 0 ) continue; // Get the status of the sensor, and if connected, then we can initialize it if( 0== g_pNuiSensor->NuiStatus() ){ g_pNuiSensor = g_pNuiSensor; break; } // This sensor wasn't OK, so release it since we're not using it g_pNuiSensor->Release(); } if (NULL != g_pNuiSensor) { // Initialize the Kinect and specify that we'll be using depth if ( g_pNuiSensor->NuiInitialize(NUI_INITIALIZE_FLAG_USES_DEPTH) >= 0 ) { // Create an event that will be signaled when depth data is available g_hNextDepthFrameEvent = CreateEvent(NULL, TRUE, FALSE, NULL); // Open a depth image stream to receive depth frames g_pNuiSensor->NuiImageStreamOpen( NUI_IMAGE_TYPE_DEPTH, NUI_IMAGE_RESOLUTION_640x480, 0, 2, g_hNextDepthFrameEvent, &g_pDepthStreamHandle); } } else return false; #endif #ifdef USE_OPENNI // Initialize context object g_RetVal = g_Context.Init(); g_RetVal = g_DepthGenerator.Create(g_Context); if (g_RetVal != XN_STATUS_OK) printf("Failed creating DEPTH generator %s\n", xnGetStatusString(g_RetVal)); XnMapOutputMode outputMode; outputMode.nXRes = g_im_w; outputMode.nYRes = g_im_h; outputMode.nFPS = g_fps; g_RetVal = g_DepthGenerator.SetMapOutputMode(outputMode); if (g_RetVal != XN_STATUS_OK){ printf("Failed setting the DEPTH output mode %s\n", xnGetStatusString(g_RetVal)); return false; } g_RetVal = g_Context.StartGeneratingAll(); if (g_RetVal != XN_STATUS_OK){ printf("Failed starting generating all %s\n", xnGetStatusString(g_RetVal)); return false; } #endif #ifdef USE_LIBFREENECT depth_mid = (uint16_t*)malloc(640*480*2); depth_front = (uint16_t*)malloc(640*480*2); if (freenect_init(&f_ctx, NULL) < 0) { printf("freenect_init() failed\n"); return 1; } freenect_set_log_level(f_ctx, FREENECT_LOG_ERROR); freenect_select_subdevices(f_ctx, (freenect_device_flags)(FREENECT_DEVICE_MOTOR | FREENECT_DEVICE_CAMERA)); int nr_devices = freenect_num_devices (f_ctx); printf ("Number of devices found: %d\n", nr_devices); int user_device_number = 0; if (nr_devices < 1) { freenect_shutdown(f_ctx); return false; } if (freenect_open_device(f_ctx, &f_dev, user_device_number) < 0) { printf("Could not open device\n"); freenect_shutdown(f_ctx); return false; } bool res = pthread_create(&freenect_thread, NULL, freenect_threadfunc, NULL); if (res) { printf("pthread_create failed\n"); freenect_shutdown(f_ctx); return false; } freenect_raw_tilt_state state; freenect_get_tilt_status(&state); freenect_angle = freenect_get_tilt_degs(&state); #endif g_im3D.create(g_im_h,g_im_w,CV_32FC3); g_imD.create(g_im_h,g_im_w,CV_16UC1); return true; }