void initialize()
{
	std::cout << endl << "Initializing Kinect... " << endl << endl;

	g_RetVal = g_Context.Init();

	g_RetVal = g_DepthGenerator.Create(g_Context);
	if (g_RetVal != XN_STATUS_OK)
		printf("Failed creating DEPTH generator %s\n", xnGetStatusString(g_RetVal));

	XnMapOutputMode outputMode;
	outputMode.nXRes = g_im_w;
	outputMode.nYRes = g_im_h;
	outputMode.nFPS = g_fps;
	g_RetVal = g_DepthGenerator.SetMapOutputMode(outputMode);
	if (g_RetVal != XN_STATUS_OK)
		printf("Failed setting the DEPTH output mode %s\n", xnGetStatusString(g_RetVal));

	g_RetVal = g_Context.StartGeneratingAll();
	if (g_RetVal != XN_STATUS_OK)
		printf("Failed starting generating all %s\n", xnGetStatusString(g_RetVal));

	g_DepthGenerator.GetIntProperty ("ZPD", g_focal_length);
	g_DepthGenerator.GetRealProperty ("ZPPS", g_pixel_size);
	g_pixel_size *= 2.f;

	g_im3D.create(g_im_h,g_im_w,CV_32FC3);
}
Beispiel #2
0
	information()
	{
		RC(context.Init(),							"Context Intialized");
		
		XnMapOutputMode mode;
		mode.nXRes = XN_VGA_X_RES;
		mode.nYRes = XN_VGA_Y_RES;
		mode.nFPS  = 30;
		
		RC(image.Create(context),					"Create image buffer");
		RC(image.SetMapOutputMode(mode),			"Set image mode");
		
		RC(depth.Create(context),					"Create depth buffer");
		RC(depth.SetMapOutputMode(mode),			"Set depth mode");
		
		xn::Query q;
		RC(q.AddSupportedCapability(XN_CAPABILITY_SKELETON), "Request skeleton");

		try {
			RC(context.FindExistingNode(XN_NODE_TYPE_USER, user), "User generator");
		} catch (...) {
			RC(user.Create(context),					"Get skeleton!!!");
		}
//		RC(user.Create(context, &q),					"Get skeleton!!!");
//		
//		xn::NodeInfoList il;
//		RC(context.EnumerateProductionTrees(XN_NODE_TYPE_USER, &q, il, NULL),
//													"Enumerate nodes");
//		
//		xn::NodeInfo i = *il.Begin();
//		RC(context.CreateProductionTree(i),			"Create skeleton node");
//		RC(i.GetInstance(user),						"Get skeleton");
		
		user.RegisterUserCallbacks(User_NewUser, NULL, NULL, hUserCallbacks);
		user.GetSkeletonCap().RegisterCalibrationCallbacks(UserCalibration_CalibrationStart, UserCalibration_CalibrationEnd, &user, hCalibrationCallbacks);
		
		if (user.GetSkeletonCap().NeedPoseForCalibration())
		{
			if (!user.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION))
			{
				post("Pose required, but not supported\n");
			}
			else
			{
				user.GetPoseDetectionCap().RegisterToPoseCallbacks(UserPose_PoseDetected, NULL, &user, hPoseCallbacks);
				user.GetSkeletonCap().GetCalibrationPose(g_strPose);
				user.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL);
			}
		}
		
		RC(context.StartGeneratingAll(),			"Start generating data");
		
		post("Kinect initialized!\n");
	}
Beispiel #3
0
bool kinect_reader2::init() {
	nRetVal = XN_STATUS_OK;

	nRetVal = g_Context.Init();
	if (nRetVal != XN_STATUS_OK) {
		printf("Creating context failed: %s\n", xnGetStatusString(nRetVal));
		return false;
	}

	nRetVal = g_DepthGenerator.Create(g_Context);
	if(nRetVal != XN_STATUS_OK) {
		printf("Creating depth generator failed: %s\n", xnGetStatusString(nRetVal));
		return false;
	}
	g_DepthGenerator.GetMirrorCap().SetMirror(true);

	nRetVal = g_UserGenerator.Create(g_Context);
	if(nRetVal != XN_STATUS_OK) {
		printf("Creating user generator failed: %s\n", xnGetStatusString(nRetVal));
		return false;
	}

	XnCallbackHandle hUserCallbacks, hCalibrationStart, hCalibrationComplete, hPoseDetected;
	if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON))
	{
		printf("Supplied user generator doesn't support skeleton\n");
		return false;
	}
	g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks);
	g_UserGenerator.GetSkeletonCap().RegisterToCalibrationStart(UserCalibration_CalibrationStart, NULL, hCalibrationStart);
	g_UserGenerator.GetSkeletonCap().RegisterToCalibrationComplete(UserCalibration_CalibrationComplete, NULL, hCalibrationComplete);

	if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION))
	{
		printf("Pose required, but not supported\n");
		return false;
	}
	g_UserGenerator.GetPoseDetectionCap().RegisterToPoseDetected(UserPose_PoseDetected, NULL, hPoseDetected);

	g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL);

	g_Context.StartGeneratingAll();

	return true;
}
void InitialKinect()
{
	    //1. Initial Context
		mContext.Init();

		// 2. Set Map_mode (建構函數已定義)
		mapMode.nXRes = 640;
		mapMode.nYRes = 480;
		mapMode.nFPS = 30;

		// 3.a Create Depth_Generator
		mDepthGenerator.Create( mContext );
		mDepthGenerator.SetMapOutputMode( mapMode );

		// 3.b Create Image_Generator
		mImageGenerator.Create( mContext );
		mImageGenerator.SetMapOutputMode( mapMode );

		 // 4. Correct view port
		mDepthGenerator.GetAlternativeViewPointCap().SetViewPoint( mImageGenerator );
		std::cout << "正確:KinectSensor Intial Correct!" << std::endl;
}
Beispiel #5
0
bool SetupDepth(xn::Context& g_context)
{
	XnStatus nRetVal = XN_STATUS_OK;

	fprintf(stderr,"Setting up the depth generator\n");

	if ((nRetVal = g_depth.Create(g_context))!= XN_STATUS_OK)
	{
		printf("Could not create depth generator: %s\n", xnGetStatusString(nRetVal));
		return FALSE;
	}


/*
	if ((nRetVal = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth)) != XN_STATUS_OK)
	{
		fprintf(stderr,"Could not find depth sensor: %s\n", xnGetStatusString(nRetVal));
		return FALSE;
	}
	*/

	XnMapOutputMode mapMode;
	mapMode.nXRes = XN_VGA_X_RES;
	mapMode.nYRes = XN_VGA_Y_RES;
	mapMode.nFPS = 30;
	if ((nRetVal = g_depth.SetMapOutputMode(mapMode)) != XN_STATUS_OK)
	{
		fprintf(stderr,"Could not set depth mode: %s\n", xnGetStatusString(nRetVal));
		return FALSE;
	}

	g_depth.GetMetaData(g_depthMD);
	g_depthWidth  = g_depthMD.FullXRes();
	g_depthHeight = g_depthMD.FullYRes();	
	return TRUE;
}
Beispiel #6
0
bool DataCapture::initialise()
{
    context_.Shutdown();

    XnStatus rc = context_.Init(); 
    if( rc != XN_STATUS_OK )
    {
        std::cout << "Init: " << xnGetStatusString(rc) << std::endl;
        return false;
    }

    rc = depthGen_.Create(context_);
    if( rc != XN_STATUS_OK )
    {
        std::cout << "depthGen.Create: " << xnGetStatusString(rc) << std::endl;
        return false;
    }

    rc = imageGen_.Create(context_);
    if( rc != XN_STATUS_OK )
    {
        std::cout << "imageGen.Create: " << xnGetStatusString(rc) << std::endl;
        return false;
    }

    rc = imageGen_.SetPixelFormat(XN_PIXEL_FORMAT_RGB24);
    if( rc != XN_STATUS_OK )
    {
        std::cout << "SetPixelFormat: " << xnGetStatusString(rc) << std::endl;
        return false;
    }

    XnMapOutputMode imgMode;
    imgMode.nXRes = 640; // XN_VGA_X_RES
    imgMode.nYRes = 480; // XN_VGA_Y_RES
    imgMode.nFPS = 30;
    rc = imageGen_.SetMapOutputMode(imgMode);
    if( rc != XN_STATUS_OK )
    {
        std::cout << "image SetMapOutputMode: " << xnGetStatusString(rc) << std::endl;
        return false;
    }

    rc = depthGen_.SetMapOutputMode(imgMode);
    if( rc != XN_STATUS_OK )
    {
        std::cout << "depth SetMapOutputMode: " << xnGetStatusString(rc) << std::endl;
        return false;
    }

    depthGen_.GetMetaData(depthMd_);
    std::cout << "Depth offset " << depthMd_.XOffset() << " " << depthMd_.YOffset() << std::endl;

    // set the depth image viewpoint
    depthGen_.GetAlternativeViewPointCap().SetViewPoint(imageGen_);

    // read off the depth camera field of view.  This is the FOV corresponding to
    // the IR camera viewpoint, regardless of the alternative viewpoint settings.
    XnFieldOfView fov;
    rc = depthGen_.GetFieldOfView(fov);
    std::cout << "Fov: " << fov.fHFOV << " " << fov.fVFOV << std::endl;

    pDepthData_ = new char [640 * 480];
    pRgbData_ = new char [640 * 480 * 3];

    return true;
}
int main(int argc, char **argv) {
	nRetVal = XN_STATUS_OK;

	/* Context initialisieren (Kameradaten) */
	nRetVal = context.Init();
	checkError("Fehler beim Initialisieren des Context", nRetVal)?0:exit(-1);



	/* Tiefengenerator erstellen */
	nRetVal = depth.Create(context);
	checkError("Fehler beim Erstellen des Tiefengenerators", nRetVal)?0:exit(-1);

	/* Tiefengenerator einstellen */
	XnMapOutputMode outputModeDepth;
	outputModeDepth.nXRes = 640;
	outputModeDepth.nYRes = 480;
	outputModeDepth.nFPS = 30;
	nRetVal = depth.SetMapOutputMode(outputModeDepth);
	checkError("Fehler beim Konfigurieren des Tiefengenerators", nRetVal)?0:exit(-1);


	/* Imagegenerator erstellen */
	nRetVal = image.Create(context);
	checkError("Fehler beim Erstellen des Bildgenerators", nRetVal)?0:exit(-1);

	/* Imagegenerator einstellen */
	XnMapOutputMode outputModeImage;
	outputModeImage.nXRes = 640;
	outputModeImage.nYRes = 480;
	outputModeImage.nFPS = 30;
	nRetVal = image.SetMapOutputMode(outputModeImage);
	checkError("Fehler beim Konfigurieren des Bildgenerators", nRetVal)?0:exit(-1);	

	/* Starten der Generatoren - volle Kraft vorraus! */
	nRetVal = context.StartGeneratingAll();
	checkError("Fehler beim Starten der Generatoren", nRetVal)?0:exit(-1);

	/* Glut initialisieren */
	glutInit(&argc, argv);
	glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
	glutInitWindowSize(WINDOW_SIZE_X, WINDOW_SIZE_Y);
	glutInitWindowPosition(300,150);
	win = glutCreateWindow("kinect-head-tracking");
	glClearColor(0, 0, 0, 0.0); //Hintergrundfarbe: Hier ein leichtes Blau
	glEnable(GL_DEPTH_TEST);          //Tiefentest aktivieren
	glDepthFunc(GL_LEQUAL);
//	glEnable(GL_CULL_FACE);           //Backface Culling aktivieren
//	glEnable(GL_ALPHA_TEST);
//	glAlphaFunc(GL_GEQUAL, 1);

	/* Texturen */
	glGenTextures(1, &texture_rgb);
	glBindTexture(GL_TEXTURE_2D, texture_rgb);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);


	glGenTextures(1, &texture_depth);
	glBindTexture(GL_TEXTURE_2D, texture_depth);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);

	glutDisplayFunc(glut_display);
	glutIdleFunc(glut_idle);
	glutMouseFunc(glut_mouse);
	glutMotionFunc(glut_mouse_motion);
	glutKeyboardFunc(glut_keyboard);
	glutMainLoop();
	return 0;
}
Beispiel #8
0
int main(int argc, char* argv[]) {
  glue.Init(argc, argv, 640, 240, "TrackHand");

  xn::Context context;
  XnStatus status = context.Init();
  bmg::OnError(status, []{
    std::cout << "Couldn't init OpenNi!" << std::endl;
    exit(1);
  });

  xn::ImageGenerator image_generator;
  status = image_generator.Create(context);
  bmg::OnError(status, []{
    std::cout << "Couldn't create image generator!" << std::endl;
  });

  status = depth_generator.Create(context);
  bmg::OnError(status, []{
    std::cout << "Couldn't create depth generator!" << std::endl;
  });

  xn::ImageMetaData image_metadata;
  xn::DepthMetaData depth_metadata;

  // Create gesture & hands generators
  status = gesture_generator.Create(context);
  bmg::OnError(status, []{
    std::cout << "Couldn't create gesture generator!" << std::endl;
  });
  status = hands_generator.Create(context);
  bmg::OnError(status, []{
    std::cout << "Couldn't create hands generator!" << std::endl;
  });

  // Register to callbacks
  XnCallbackHandle h1, h2;
  gesture_generator
    .RegisterGestureCallbacks(Gesture_Recognized, Gesture_Process, NULL, h1);
  hands_generator
    .RegisterHandCallbacks(Hand_Create, Hand_Update, Hand_Destroy, NULL, h2);

  status = context.StartGeneratingAll();
  bmg::OnError(status, []{
    std::cout << "Couldn't generate all data!" << std::endl;
  });
  status = gesture_generator.AddGesture(GESTURE, NULL);
  bmg::OnError(status, []{
    std::cout << "Couldn't add gesture!" << std::endl;
  });

  glue.BindDisplayFunc([&]{
    glue.BeginDraw();

    // here goes code for app main loop
    XnStatus status = context.WaitAndUpdateAll();
    bmg::OnError(status, []{
      std::cout << "Couldn't update and wait for new data!" << std::endl;
    });

    image_generator.GetMetaData(image_metadata);
    unsigned imageX = image_metadata.XRes();
    unsigned imageY = image_metadata.YRes();

    glue.DrawOnTexture(
      (void*)image_metadata.RGB24Data(),
      imageX, imageY,
      imageX, imageY,
      320, 0, 640, 240);

    depth_generator.GetMetaData(depth_metadata);
    unsigned depthX = depth_metadata.XRes();
    unsigned depthY = depth_metadata.YRes();

    XnRGB24Pixel* transformed_depth_map = new XnRGB24Pixel[depthX * depthY];
    bmg::CalculateDepth(
      depth_generator.GetDepthMap(), depthX, depthY, MAX_DEPTH, transformed_depth_map);

    glue.DrawOnTexture(
      (void*)transformed_depth_map,
      depthX, depthY,
      depthX, depthY,
      0, 0,
      320, 240);
    delete [] transformed_depth_map;

    if (hand_recognized) {
      // Draw point over tracked hand
      glue.DrawPointOverRegion(static_cast<unsigned>(projective_point.X), static_cast<unsigned>(projective_point.Y), 0, 0);
      glue.DrawPointOverRegion(static_cast<unsigned>(projective_point.X), static_cast<unsigned>(projective_point.Y), 320, 0);
    }

    glue.EndDraw();
  });

  glue.BindKeyboardFunc([](unsigned char key, int x, int y){
    switch(key) {
    case 27:
      exit(1);
    }
  });

  glue.Run();
  context.Release();
}
Beispiel #9
0
bool initialize(){

	std::cout << "initializing kinect... " << endl;

#ifdef USE_MS_SKD

	int iSensorCount = 0;
    if ( NuiGetSensorCount(&iSensorCount) < 0 )
		return false;

    // Look at each Kinect sensor
    for (int i = 0; i < iSensorCount; ++i)
    {
        // Create the sensor so we can check status, if we can't create it, move on to the next
        if ( NuiCreateSensorByIndex(i, &g_pNuiSensor) < 0 ) continue;
      
		// Get the status of the sensor, and if connected, then we can initialize it
        if( 0== g_pNuiSensor->NuiStatus() ){
            g_pNuiSensor = g_pNuiSensor;
            break;
        }

        // This sensor wasn't OK, so release it since we're not using it
        g_pNuiSensor->Release();
    }

    if (NULL != g_pNuiSensor)
    {
        // Initialize the Kinect and specify that we'll be using depth
        if ( g_pNuiSensor->NuiInitialize(NUI_INITIALIZE_FLAG_USES_DEPTH) >= 0 )
        {
            // Create an event that will be signaled when depth data is available
            g_hNextDepthFrameEvent = CreateEvent(NULL, TRUE, FALSE, NULL);

            // Open a depth image stream to receive depth frames
            g_pNuiSensor->NuiImageStreamOpen(
                NUI_IMAGE_TYPE_DEPTH,
                NUI_IMAGE_RESOLUTION_640x480,
                0,
                2,
                g_hNextDepthFrameEvent,
                &g_pDepthStreamHandle);
        }
    }
	else
        return false;
	
#endif

#ifdef USE_OPENNI

	// Initialize context object
	g_RetVal = g_Context.Init();

	g_RetVal = g_DepthGenerator.Create(g_Context);
	if (g_RetVal != XN_STATUS_OK)
		printf("Failed creating DEPTH generator %s\n", xnGetStatusString(g_RetVal));

	XnMapOutputMode outputMode;
	outputMode.nXRes = g_im_w;
	outputMode.nYRes = g_im_h;
	outputMode.nFPS = g_fps;
	g_RetVal = g_DepthGenerator.SetMapOutputMode(outputMode);
	if (g_RetVal != XN_STATUS_OK){
		printf("Failed setting the DEPTH output mode %s\n", xnGetStatusString(g_RetVal));
		return false;
	}

	g_RetVal = g_Context.StartGeneratingAll();
	if (g_RetVal != XN_STATUS_OK){
		printf("Failed starting generating all %s\n", xnGetStatusString(g_RetVal));
		return false;
	}
#endif

#ifdef USE_LIBFREENECT
  depth_mid = (uint16_t*)malloc(640*480*2);
  depth_front = (uint16_t*)malloc(640*480*2);

	if (freenect_init(&f_ctx, NULL) < 0) {
		printf("freenect_init() failed\n");
		return 1;
	}
	
	freenect_set_log_level(f_ctx, FREENECT_LOG_ERROR);
	freenect_select_subdevices(f_ctx, (freenect_device_flags)(FREENECT_DEVICE_MOTOR | FREENECT_DEVICE_CAMERA));
	
	int nr_devices = freenect_num_devices (f_ctx);
	printf ("Number of devices found: %d\n", nr_devices);
	
	int user_device_number = 0;
	
	if (nr_devices < 1) {
		freenect_shutdown(f_ctx);
		return false;
	}
	
	if (freenect_open_device(f_ctx, &f_dev, user_device_number) < 0) {
		printf("Could not open device\n");
		freenect_shutdown(f_ctx);
		return false;
	}
	
	bool res = pthread_create(&freenect_thread, NULL, freenect_threadfunc, NULL);
	if (res) {
		printf("pthread_create failed\n");
		freenect_shutdown(f_ctx);
		return false;
	}
	freenect_raw_tilt_state state;
	freenect_get_tilt_status(&state);
	
	freenect_angle = freenect_get_tilt_degs(&state);
	
#endif
	
	g_im3D.create(g_im_h,g_im_w,CV_32FC3);
	g_imD.create(g_im_h,g_im_w,CV_16UC1);

	return true;
}
Beispiel #10
0
bool OpenNIVideo::init() {

	//open the video 
	//if you are using a device, you can open the device
	//if you are using video streaming, you can initialize the connection
	//if you are using video files, you can read the configuration, cache the data, etc.

	xn::EnumerationErrors errors;

	int resolutionX = 640;
	int resolutionY = 480;
	unsigned int FPS = 30;

	XnStatus nRetVal = XN_STATUS_OK;

	nRetVal = context.Init();
	CHECK_RC(nRetVal, "context global init");

	//xn::NodeInfoList list;
	//nRetVal = context.EnumerateProductionTrees(XN_NODE_TYPE_DEVICE, NULL, list, &errors);
	//CHECK_RC(nRetVal, "enumerate production tree");
	
	// HandsGenerator hands;
	//UserGenerator user;
	//GestureGenerator gesture;
	//SceneAnalyzer scene;

	nRetVal = depth_generator.Create(context);
	CHECK_RC(nRetVal, "creating depth generator");

	nRetVal = image_generator.Create(context);
	CHECK_RC(nRetVal, "creating image generator");

	
	if(depth_generator.IsCapabilitySupported(XN_CAPABILITY_ALTERNATIVE_VIEW_POINT))
	{
		nRetVal = depth_generator.GetAlternativeViewPointCap().SetViewPoint(image_generator);
		CHECK_RC(nRetVal, "creating registered image/depth generator");
	}
	else
	{
		printf("WARNING: XN_CAPABILITY_ALTERNATIVE_VIEW_POINT not supported");
	}

	if (depth_generator.IsCapabilitySupported(XN_CAPABILITY_FRAME_SYNC))
	{
		if( depth_generator.GetFrameSyncCap().CanFrameSyncWith(image_generator)) {
			//nRetVal=depth.GetFrameSyncCap().FrameSyncWith(image);
			//CHECK_RC(nRetVal, "creating frame sync image/depth generator");
		}
	}
	else
	{
		printf("WARNING: XN_CAPABILITY_FRAME_SYNC not supported");
	}

	XnMapOutputMode mode = {resolutionX,resolutionY,FPS};

	nRetVal = depth_generator.SetMapOutputMode(mode);
	CHECK_RC(nRetVal, "set output mode");

	//NOT NEEDED IF SYNCHRO
	nRetVal = image_generator.SetMapOutputMode(mode);
	CHECK_RC(nRetVal, "set output mode");
	
	_depthBufferShort=new unsigned short[resolutionX*resolutionY];
	_depthBufferByte=new unsigned char[resolutionX*resolutionY];
	
	//we need to create one video stream
	_videoStreamList.push_back(new osgART::VideoStream());
	
	_videoStreamList[0]->allocateImage(resolutionX,resolutionY, 1, GL_RGB, GL_UNSIGNED_BYTE);

	//we need to create one video stream
	_videoStreamList.push_back(new osgART::VideoStream());
	
//	_videoStreamList[1]->allocateImage(resolutionX,resolutionY, 1, GL_LUMINANCE, GL_FLOAT);
	_videoStreamList[1]->allocateImage(resolutionX,resolutionY, 1, GL_LUMINANCE, GL_UNSIGNED_BYTE);
	//_videoStreamList[1]->allocateImage(resolutionX,resolutionY, 1, GL_LUMINANCE, GL_UNSIGNED_SHORT);
	//_videoStreamList[1]->allocateImage(w, h, 1, GL_DEPTHCOMPONENT16, GL_UNSIGNED_BYTE);
	
	if (m_flip_vertical) 
	{
		_videoStreamList[0]->flipVertical();
		_videoStreamList[1]->flipVertical();
	}
	if (m_flip_horizontal) 
	{
		_videoStreamList[0]->flipHorizontal();
		_videoStreamList[1]->flipHorizontal();
	}
	return true;

}
bool SetupPrimesense(void)
{
	XnStatus nRetVal = XN_STATUS_OK;

	if ((nRetVal = g_context.Init()) != XN_STATUS_OK)
	{
		fprintf(stderr,"Could not init OpenNI context: %s\n", xnGetStatusString(nRetVal));
		return FALSE;
	}

	if ((nRetVal = g_depth.Create(g_context))!= XN_STATUS_OK)
	{
		fprintf(stderr,"Could not create depth generator: %s\n", xnGetStatusString(nRetVal));
		g_haveDepth = FALSE;
	}
	else if ((nRetVal = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth)) != XN_STATUS_OK)
	{
		fprintf(stderr,"Could not find depth sensor: %s\n", xnGetStatusString(nRetVal));
		g_haveDepth = FALSE;
	}

	if ((nRetVal = g_image.Create(g_context))!= XN_STATUS_OK)
	{
		fprintf(stderr,"Could not create image generator: %s\n", xnGetStatusString(nRetVal));
		g_haveImage = FALSE;
	}
	else if ((nRetVal = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image)) != XN_STATUS_OK)
	{
		fprintf(stderr,"Could not find image sensor: %s\n", xnGetStatusString(nRetVal));
		g_haveImage = FALSE;
	}

	if (!g_haveImage &&  !g_haveDepth)
	{
		fprintf(stderr,"Could not find either depth or image sources.\n");
		return FALSE;
	}

	XnMapOutputMode mapMode;
	mapMode.nXRes = XN_VGA_X_RES;
	mapMode.nYRes = XN_VGA_Y_RES;
	mapMode.nFPS = 30;
	if (g_haveDepth && ( (nRetVal = g_depth.SetMapOutputMode(mapMode)) != XN_STATUS_OK))
	{
		fprintf(stderr,"Could not set depth mode: %s\n", xnGetStatusString(nRetVal));
		return FALSE;
	}

	if (g_haveDepth)
	{

		g_depth.GetMetaData(g_depthMD);
		g_depthWidth  = g_depthMD.FullXRes();
		g_depthHeight = g_depthMD.FullYRes();	

	}


	if (g_haveImage &&  (nRetVal = g_image.SetMapOutputMode(mapMode)) != XN_STATUS_OK)
	{
		fprintf(stderr,"Could not set image: %s\n", xnGetStatusString(nRetVal));
		return FALSE;
	}

	
	if ((nRetVal = g_context.StartGeneratingAll()) != XN_STATUS_OK)
	{
		fprintf(stderr,"Could not start: %s\n", xnGetStatusString(nRetVal));
		return FALSE;
	}
	return TRUE;
}
int main() {
	printf("main() START\n");
	signal(SIGINT, onSignalReceived); // hit CTRL-C keys in terminal (2)
	signal(SIGTERM, onSignalReceived); // hit stop button in eclipse CDT (15)

	mapMode.nXRes = XN_VGA_X_RES;
	mapMode.nYRes = XN_VGA_Y_RES;
	mapMode.nFPS = 30;

	CHECK_RC(ctx.Init(), "init");
	CHECK_RC(depthGenerator.Create(ctx), "create depth");
	depthGenerator.SetMapOutputMode(mapMode);

	XnStatus userAvailable = ctx.FindExistingNode(XN_NODE_TYPE_USER, userGenerator);
	if(userAvailable != XN_STATUS_OK) {
		CHECK_RC(userGenerator.Create(ctx), "create user");
	}

	XnCallbackHandle hUserCallbacks, hCalibrationCallbacks, hPoseCallbacks;
	xn::SkeletonCapability skel = userGenerator.GetSkeletonCap();
	CHECK_RC(userGenerator.RegisterUserCallbacks(onUserNew, onUserLost, NULL, hUserCallbacks), "register user");
	CHECK_RC(skel.RegisterCalibrationCallbacks(onCalibrationStart, onCalibrationEnd, NULL, hCalibrationCallbacks), "register calib");
	CHECK_RC(userGenerator.GetPoseDetectionCap().RegisterToPoseCallbacks(onPoseDetected, NULL, NULL, hPoseCallbacks), "register pose");

	XnChar poseName[20] = "";
	CHECK_RC(skel.GetCalibrationPose(poseName), "get posename");
	printf("poseName: %s\n", poseName);
	CHECK_RC(skel.SetSkeletonProfile(XN_SKEL_PROFILE_ALL), "set skel profile");
	CHECK_RC(skel.SetSmoothing(0.8), "set smoothing");
//	xnSetMirror(depth, !mirrorMode);

	CHECK_RC(ctx.StartGeneratingAll(), "start generating");

	printf("waitAnyUpdateAll ...\n");
	while(!shouldTerminate) {
		ctx.WaitAnyUpdateAll();
//		depthGenerator.GetMetaData(tempDepthMetaData);

		const XnUInt16 userCount = userGenerator.GetNumberOfUsers();
//		printf("userCount: %i\n", userCount);
		XnUserID aUsers[userCount];
		XnUInt16 nUsers = userCount;
		userGenerator.GetUsers(aUsers, nUsers);
		for (int i = 0; i < nUsers; ++i) {
			XnUserID currentUserId = aUsers[i];
			if (skel.IsTracking(aUsers[i])) {
				XnSkeletonJointPosition joint;
				skel.GetSkeletonJointPosition(currentUserId, XN_SKEL_HEAD, joint);
				XnFloat x = joint.position.X;
				XnFloat y = joint.position.Y;
				XnFloat z = joint.position.Z;
				printf("joint position: %.2f x %.2f x %.2f\n", x, y, z);
				printf("joint.fConfidence: %.2f\n", joint.fConfidence);
			}
		}
	}
	printf("STOP\n");
	CHECK_RC(ctx.StopGeneratingAll(), "stop generating");
	ctx.Shutdown();

	printf("main() END\n");
}