コード例 #1
0
ファイル: NiSimpleViewer.cpp プロジェクト: 3david/OpenNI
int main(int argc, char* argv[])
{
	XnStatus rc;

	EnumerationErrors errors;
	rc = g_context.InitFromXmlFile(SAMPLE_XML_PATH, &errors);
	if (rc == XN_STATUS_NO_NODE_PRESENT)
	{
		XnChar strError[1024];
		errors.ToString(strError, 1024);
		printf("%s\n", strError);
		return (rc);
	}
	else if (rc != XN_STATUS_OK)
	{
		printf("Open failed: %s\n", xnGetStatusString(rc));
		return (rc);
	}

	rc = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth);
	rc = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image);

	g_depth.GetMetaData(g_depthMD);
	g_image.GetMetaData(g_imageMD);

	// Hybrid mode isn't supported in this sample
	if (g_imageMD.FullXRes() != g_depthMD.FullXRes() || g_imageMD.FullYRes() != g_depthMD.FullYRes())
	{
		printf ("The device depth and image resolution must be equal!\n");
		return 1;
	}

	// RGB is the only image format supported.
	if (g_imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24)
	{
		printf("The device image format must be RGB24\n");
		return 1;
	}

	// Texture map init
	g_nTexMapX = (((unsigned short)(g_depthMD.FullXRes()-1) / 512) + 1) * 512;
	g_nTexMapY = (((unsigned short)(g_depthMD.FullYRes()-1) / 512) + 1) * 512;
	g_pTexMap = (XnRGB24Pixel*)malloc(g_nTexMapX * g_nTexMapY * sizeof(XnRGB24Pixel));

	// OpenGL init
	glutInit(&argc, argv);
	glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH);
	glutInitWindowSize(GL_WIN_SIZE_X, GL_WIN_SIZE_Y);
	glutCreateWindow ("OpenNI Simple Viewer");
	glutFullScreen();
	glutSetCursor(GLUT_CURSOR_NONE);

	glutKeyboardFunc(glutKeyboard);
	glutDisplayFunc(glutDisplay);
	glutIdleFunc(glutIdle);

	glDisable(GL_DEPTH_TEST);
	glEnable(GL_TEXTURE_2D);

	// Per frame code is in glutDisplay
	glutMainLoop();

	return 0;
}
コード例 #2
0
ファイル: main.cpp プロジェクト: Yusuke-Shimizu/depthkey
//----------------------------------------------------
// OpenNI関連の初期化
//----------------------------------------------------
void xnInit(void){
	XnStatus rc;

	EnumerationErrors errors;
	rc = g_context.InitFromXmlFile(SAMPLE_XML_PATH, &errors);
	if (rc == XN_STATUS_NO_NODE_PRESENT){
		XnChar strError[1024];
		errors.ToString(strError, 1024);
		printf("%s\n", strError);
		exit(1);
	}else if (rc != XN_STATUS_OK){
		printf("Open failed: %s\n", xnGetStatusString(rc));
		exit(1);
	}
	
	//playerInit();

	rc = xnFPSInit(&g_xnFPS, 180);	// FPSの初期化
	//CHECK_RC(rc, "FPS Init");

	// デプス・イメージ・ユーザジェネレータの作成
	rc = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth);
	errorCheck(rc, "g_depth");		// エラーチェック
	rc = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image);
	errorCheck(rc, "g_image");
	rc = g_context.FindExistingNode(XN_NODE_TYPE_USER, g_user);
	//rc = g_user.Create(g_context);
	errorCheck(rc, "g_user");

	// ユーザー検出機能をサポートしているか確認
	if (!g_user.IsCapabilitySupported(XN_CAPABILITY_SKELETON)) {
		//throw std::runtime_error("ユーザー検出をサポートしてません");
		cout << "ユーザー検出をサポートしてません" << endl;
		exit(1);
	}

	// レコーダーの設定
	//rc = setRecorder(g_recorder, rc);

	// ユーザコールバックの登録
	XnCallbackHandle userCallbacks;
	g_user.RegisterUserCallbacks(UserDetected, UserLost, NULL, userCallbacks);

	// デプス・イメージ・ユーザデータの取得
	g_depth.GetMetaData(g_depthMD);
	g_image.GetMetaData(g_imageMD);
	g_user.GetUserPixels(0, g_sceneMD);

	// Hybrid mode isn't supported in this sample
	// イメージとデプスの大きさが違うとエラー
	if (g_imageMD.FullXRes() != g_depthMD.FullXRes() || g_imageMD.FullYRes() != g_depthMD.FullYRes()){
		printf ("The device depth and image resolution must be equal!\n");
		exit(1);
	}

	// RGB is the only image format supported.
	// フォーマットの確認
	if (g_imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24){
		printf("The device image format must be RGB24\n");
		exit(1);
	}

	// Texture map init
	// フルスクリーン画面の大きさ調整
	g_nTexMapX = (((unsigned short)(g_depthMD.FullXRes() - 1) / 512) + 1) * 512;	// 大きさによって512の倍数に調整(1024)
	g_nTexMapY = (((unsigned short)(g_depthMD.FullYRes() - 1) / 512) + 1) * 512;	// 512
	g_pTexMap = (XnRGB24Pixel*)malloc(g_nTexMapX * g_nTexMapY * sizeof(XnRGB24Pixel));	// スクリーンの大きさ分の色情報の容量を確保

	// 座標ポインタの初期化
	g_pPoint = (XnPoint3D*)malloc(KINECT_IMAGE_SIZE * sizeof(XnPoint3D));			// 座標を入れるポインタを作成
	g_pBackTex = (XnRGB24Pixel*)malloc(KINECT_IMAGE_SIZE * sizeof(XnRGB24Pixel));	// 背景画像を入れるポインタを作成
	g_pBackPoint = (XnPoint3D*)malloc(KINECT_IMAGE_SIZE * sizeof(XnPoint3D));		// 背景座標を入れるポインタを作成
	g_pBackDepth = (XnDepthPixel*)malloc(KINECT_IMAGE_SIZE * sizeof(XnDepthPixel));		// 背景座標を入れるポインタを作成
}
コード例 #3
0
ファイル: main.cpp プロジェクト: alfiandosengkey/as3openni
int main(int argc, char *argv[])
{	
	//--------------------------------------------------------------------//
	//------------------------- SETUP REQUIRED NODES ---------------------//
	//--------------------------------------------------------------------//
	
	// Setup the command line parameters.
	setupParams(argc, argv);
	
	// Setup all the sockets.
	setupSockets();
    
	// Setup the capture socket server for Mac.
	#if (XN_PLATFORM == XN_PLATFORM_MACOSX)
		if(_featureDepthMapCapture || _featureRGBCapture)
		{
			if(_useSockets)
			{
				g_AS3Network = network();
				g_AS3Network.init(setupServer);
			}
		}
	#endif
	
	// Setup the status.
    XnStatus _status = XN_STATUS_OK;
    EnumerationErrors _errors;
    
    // Context Init and Add license.
	_status = _context.Init();
	CHECK_RC(_status, "AS3OpenNI :: Initialize context");
	_context.SetGlobalMirror(_mirror);
	
	XnChar vendor[XN_MAX_NAME_LENGTH];
	XnChar license[XN_MAX_LICENSE_LENGTH];

	_license.strVendor[XN_MAX_NAME_LENGTH] = strcmp(vendor, "PrimeSense");
	_license.strKey[XN_MAX_LICENSE_LENGTH] = strcmp(license, "0KOIk2JeIBYClPWVnMoRKn5cdY4=");
		
	_status = _context.AddLicense(_license);
   	CHECK_RC(_status, "AS3OpenNI :: Added license");
   	
   	// Set it to VGA maps at 30 FPS
	_depthMode.nXRes = 640;
	_depthMode.nYRes = 480;
	_depthMode.nFPS = 30;
	
	// Depth map create.
	_status = _depth.Create(_context);
	CHECK_RC(_status, "AS3OpenNI :: Create depth generator");
	_status = _depth.SetMapOutputMode(_depthMode);
	
	// Depth map create.
	_status = _image.Create(_context);
	CHECK_RC(_status, "AS3OpenNI :: Create image generator");
	_status = _image.SetMapOutputMode(_depthMode);
	_status = _image.SetPixelFormat(XN_PIXEL_FORMAT_RGB24);
	
	// Create the hands generator.
	_status = _hands.Create(_context);
	CHECK_RC(_status, "AS3OpenNI :: Create hands generator");
	_hands.SetSmoothing(0.1);

	// Create the gesture generator.
	_status = _gesture.Create(_context);
	CHECK_RC(_status, "AS3OpenNI :: Create gesture generator");
	
	// Create user generator.
	_status = _userGenerator.Create(_context);
	CHECK_RC(_status, "AS3OpenNI :: Find user generator");
	
	// Create and initialize point tracker
	_sessionManager = new XnVSessionManager();
	_status = _sessionManager->Initialize(&_context, "Wave", "RaiseHand");
	
	if (_status != XN_STATUS_OK)
	{
		printf("AS3OpenNI :: Couldn't initialize the Session Manager: %s\n", xnGetStatusString(_status));
		CleanupExit();
	}
	_sessionManager->RegisterSession(NULL, &SessionStart, &SessionEnd, &SessionProgress);
	
	// Start catching signals for quit indications
	CatchSignals(&_quit);
	
	//---------------------------------------------------------------//
	//------------------------- SETUP FEATURES ---------------------//
	//--------------------------------------------------------------//
	
	// Define the Wave and SinglePoint detectors.
	_waveDetector = new XnVWaveDetector();
	
	// SinglePoint detector.
	if(_featureSinglePoint) _waveDetector->RegisterPointUpdate(NULL, &OnPointUpdate);
	
	// Feature Gesture.
	if(_featureGesture)
	{
		// Wave detector.
		_waveDetector->RegisterWave(NULL, &OnWave);
		
		// Push detector.
		_pushDetector = new XnVPushDetector();
		_pushDetector->RegisterPush(NULL, &onPush);
	
		// Swipe detector.
		_swipeDetector = new XnVSwipeDetector();
		_swipeDetector->RegisterSwipeUp(NULL, &Swipe_SwipeUp);
		_swipeDetector->RegisterSwipeDown(NULL, &Swipe_SwipeDown);
		_swipeDetector->RegisterSwipeLeft(NULL, &Swipe_SwipeLeft);
		_swipeDetector->RegisterSwipeRight(NULL, &Swipe_SwipeRight);
	
		// Steady detector.
		_steadyDetector = new XnVSteadyDetector();
		_steadyDetector->RegisterSteady(NULL, &Steady_OnSteady);
	}
	
	// Feature Circle.
	if(_featureCircle)
	{
		// Circle detector.
		_circleDetector = new XnVCircleDetector();
		_circleDetector->RegisterCircle(NULL, &CircleCB);
		_circleDetector->RegisterNoCircle(NULL, &NoCircleCB);
		_circleDetector->RegisterPrimaryPointCreate(NULL, &Circle_PrimaryCreate);
		_circleDetector->RegisterPrimaryPointDestroy(NULL, &Circle_PrimaryDestroy);
	}
	
	// Feature Slider.
	if(_featureSlider)
	{
		// Left/Right slider.
		_leftRightSlider = new XnVSelectableSlider1D(3, 0, AXIS_X);
		_leftRightSlider->RegisterActivate(NULL, &LeftRightSlider_OnActivate);
		_leftRightSlider->RegisterDeactivate(NULL, &LeftRightSlider_OnDeactivate);
		_leftRightSlider->RegisterPrimaryPointCreate(NULL, &LeftRightSlider_OnPrimaryCreate);
		_leftRightSlider->RegisterPrimaryPointDestroy(NULL, &LeftRightSlider_OnPrimaryDestroy);
		_leftRightSlider->RegisterValueChange(NULL, &LeftRightSlider_OnValueChange);
		_leftRightSlider->SetValueChangeOnOffAxis(false);
		
		// Up/Down slider.
		_upDownSlider = new XnVSelectableSlider1D(3, 0, AXIS_Y);
		_upDownSlider->RegisterActivate(NULL, &UpDownSlider_OnActivate);
		_upDownSlider->RegisterDeactivate(NULL, &UpDownSlider_OnDeactivate);
		_upDownSlider->RegisterPrimaryPointCreate(NULL, &UpDownSlider_OnPrimaryCreate);
		_upDownSlider->RegisterPrimaryPointDestroy(NULL, &UpDownSlider_OnPrimaryDestroy);
		_upDownSlider->RegisterValueChange(NULL, &UpDownSlider_OnValueChange);
		_upDownSlider->SetValueChangeOnOffAxis(false);
		
		// In/Out slider.
		_inOutSlider = new XnVSelectableSlider1D(3, 0, AXIS_Z);
		_inOutSlider->RegisterActivate(NULL, &InOutSlider_OnActivate);
		_inOutSlider->RegisterDeactivate(NULL, &InOutSlider_OnDeactivate);
		_inOutSlider->RegisterPrimaryPointCreate(NULL, &InOutSlider_OnPrimaryCreate);
		_inOutSlider->RegisterPrimaryPointDestroy(NULL, &InOutSlider_OnPrimaryDestroy);
		_inOutSlider->RegisterValueChange(NULL, &InOutSlider_OnValueChange);
		_inOutSlider->SetValueChangeOnOffAxis(false);
	}
	
	// Feature TrackPad.
	if(_featureTrackPad)
	{
		// Track Pad.
		if(trackpad_columns > 0 && trackpad_rows > 0)
		{
			_trackPad = new XnVSelectableSlider2D(trackpad_columns, trackpad_rows);
		}
		else
		{
			_trackPad = new XnVSelectableSlider2D(4, 9);
		}
		
		_trackPad->RegisterItemHover(NULL, &TrackPad_ItemHover);
		_trackPad->RegisterItemSelect(NULL, &TrackPad_ItemSelect);
	    _trackPad->RegisterPrimaryPointCreate(NULL, &TrackPad_PrimaryCreate);
	  	_trackPad->RegisterPrimaryPointDestroy(NULL, &TrackPad_PrimaryDestroy);
	}
	
	// Feature User Tracking.
	if(_featureUserTracking)
	{
		// Setup user generator callbacks.
		XnCallbackHandle hUserCallbacks, hCalibrationCallbacks, hPoseCallbacks;
		if (!_userGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON))
		{
			printf("AS3OpenNI :: Supplied user generator doesn't support skeleton\n");
			return 1;
		}
		_userGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks);
		
		// Setup Skeleton detection.
		_userGenerator.GetSkeletonCap().RegisterCalibrationCallbacks(UserCalibration_CalibrationStart, UserCalibration_CalibrationEnd, NULL, hCalibrationCallbacks);
		if (_userGenerator.GetSkeletonCap().NeedPoseForCalibration())
		{
			_needPose = true;
			if (!_userGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION))
			{
				printf("AS3OpenNI :: Pose required, but not supported\n");
				return 1;
			}
			_userGenerator.GetPoseDetectionCap().RegisterToPoseCallbacks(UserPose_PoseDetected, NULL, NULL, hPoseCallbacks);
			_userGenerator.GetSkeletonCap().GetCalibrationPose(_strPose);
		}
		_userGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL);
	}
	
	// Create the broadcaster manager.
	_broadcaster = new XnVBroadcaster();
	
	// Start generating all.
	_context.StartGeneratingAll();
	
	// Set the frame rate.
	_status = xnFPSInit(&xnFPS, 180);
	CHECK_RC(_status, "AS3OpenNI :: FPS Init");
	
	//----------------------------------------------------------------------//
	//------------------------- SETUP DISPLAY SUPPORT ---------------------//
	//--------------------------------------------------------------------//
	
	// Setup depth and image data.
	_depth.GetMetaData(_depthData);
	_image.GetMetaData(_imageData);
	
	// Hybrid mode isn't supported in this sample
	if (_imageData.FullXRes() != _depthData.FullXRes() || _imageData.FullYRes() != _depthData.FullYRes())
	{
		printf ("AS3OpenNI :: The device depth and image resolution must be equal!\n");
		return 1;
	}

	// RGB is the only image format supported.
	if (_imageData.PixelFormat() != XN_PIXEL_FORMAT_RGB24)
	{
		printf("AS3OpenNI :: The device image format must be RGB24\n");
		return 1;
	}
	
	// Setup the view points to match between the depth and image maps.
	if(_snapPixels) _depth.GetAlternativeViewPointCap().SetViewPoint(_image);
	
	//-------------------------------------------------------------//
	//------------------------- MAIN LOOP ------------------------//
	//-----------------------------------------------------------//
	
	// Setup the capture socket server for PC.
	#if (XN_PLATFORM == XN_PLATFORM_WIN32)
		if(_featureDepthMapCapture || _featureRGBCapture || _featureUserTracking)
		{
			if(_useSockets)
			{
				g_AS3Network = network();
				g_AS3Network.init(setupServer);
			}
		}
	#endif
	
	// Main loop
	while ((!_kbhit()) && (!_quit))
	{
		xnFPSMarkFrame(&xnFPS);
		_context.WaitAndUpdateAll();
		_sessionManager->Update(&_context);
		if(_featureDepthMapCapture) captureDepthMap(g_ucDepthBuffer);
		if(_featureRGBCapture) captureRGB(g_ucImageBuffer);
		#if (XN_PLATFORM == XN_PLATFORM_WIN32)
			if(_featureUserTracking) getPlayers();
		#else
			if(_featureUserTracking) renderSkeleton();
		#endif
	}
	
	CleanupExit();
}
コード例 #4
0
int Init()
{
	
	XnStatus rc;

	//Make sure our image types are the same as the OpenNI image types.
	assert(sizeof(XnRGB24Pixel) == sizeof(ColorPixel));
	assert(sizeof(XnDepthPixel) == sizeof(DepthPixel));
	assert(sizeof(XnStatus) == sizeof(int));

	// Load OpenNI xml settings
	char filePath[255];
	int length = Util::Helpers::GetExeDirectory(filePath, sizeof(filePath));
	filePath[length] = '\\';
	strcpy(&filePath[length+1], SAMPLE_XML_PATH);

	EnumerationErrors errors;
	rc = deviceContext.InitFromXmlFile(filePath, &errors);
	if (rc == XN_STATUS_NO_NODE_PRESENT)
	{
		//One reason would be if Microsoft SDK is installed beside PrimeSense.  Device manager should say PrimeSense instead of Microsoft Kinect.
		
		//XnChar strError[1024];
		//errors.ToString(strError, 1024);
		//LOGE("%s\n", strError);
		return -1;
	}
	else if (rc != XN_STATUS_OK)
	{
		fprintf(stderr, "%s\n", xnGetStatusString(rc));
		/*LOGE("Open failed: %s\n", xnGetStatusString(rc));*/
		return (rc);
	}

	// Retrieve colour and depth nodes
	rc = deviceContext.FindExistingNode(XN_NODE_TYPE_IMAGE, colorImageGenerator);
	rc = deviceContext.FindExistingNode(XN_NODE_TYPE_DEPTH, depthImageGenerator);

	// Set mirror mode to off
	SetMirrorMode(false);

	// Get a frame to perform checks on it
	ImageMetaData colorImageMetaData;
	DepthMetaData depthImageMetaData;
	depthImageGenerator.GetMetaData(depthImageMetaData);
	colorImageGenerator.GetMetaData(colorImageMetaData);

	// Hybrid mode isn't supported in this sample
	if (colorImageMetaData.FullXRes() != depthImageMetaData.FullXRes() || colorImageMetaData.FullYRes() != depthImageMetaData.FullYRes())
	{
		/*LOGE("The device depth and image resolution must be equal!\n");*/
		return 1;
	}

	// RGB is the only image format supported.
	if (colorImageMetaData.PixelFormat() != XN_PIXEL_FORMAT_RGB24)
	{
		/*LOGE("The device image format must be RGB24\n");*/
		return 1;
	}
	
	// Need to make sure the automatic alignment of colour and depth images is supported.
	XnBool isSupported = depthImageGenerator.IsCapabilitySupported("AlternativeViewPoint");
	if(!isSupported)
	{
		/*LOGE("Cannot set AlternativeViewPoint!\n");*/
		return 1;
	}

	
	// Set it to VGA maps at 30 FPS
	/*XnMapOutputMode mapMode;
	mapMode.nXRes = XN_VGA_X_RES;
	mapMode.nYRes = XN_VGA_Y_RES;
	mapMode.nFPS = 60;
	rc = g_depth.SetMapOutputMode(mapMode);
	if(rc)
	{
		LOGE("Failed to set depth map mode: %s\n", xnGetStatusString(rc));
		return 1;
	}
	mapMode.nFPS = 30;
	rc = g_image.SetMapOutputMode(mapMode);
	if(rc)
	{
		LOGE("Failed to set image map mode: %s\n", xnGetStatusString(rc));
		return 1;
	}*/


	// Set automatic alignment of the colour and depth images.
	rc = depthImageGenerator.GetAlternativeViewPointCap().SetViewPoint(colorImageGenerator);
	if(rc)
	{
		/*LOGE("Failed to set depth map mode: %s\n", xnGetStatusString(rc));*/
		return 1;
	}


	return XN_STATUS_OK;
}
コード例 #5
0
int main(int argc, char* argv[])
{

	EnumerationErrors errors;


    //rc = context.Init();
    rc = context.InitFromXmlFile(strPathToXML,&errors);
    if (rc == XN_STATUS_NO_NODE_PRESENT)
	{
		XnChar strError[1024];
		errors.ToString(strError, 1024);
		printf("%s\n", strError);
		return (rc);
	}
	else if (rc != XN_STATUS_OK)
	{
		printf("Open failed: %s\n", xnGetStatusString(rc));
		return (rc);
	}
	
	/* UNCOMMENT TO GET FILE READING 
    //rc = context.OpenFileRecording(strInputFile);
	//CHECK_RC(rc, "Open input file");

	//rc = context.FindExistingNode(XN_NODE_TYPE_PLAYER, player);
	//CHECK_RC(rc, "Get player node"); */ 

	rc = context.FindExistingNode(XN_NODE_TYPE_DEPTH, depth);
	CHECK_RC(rc, "Find depth generator");

	rc = context.FindExistingNode(XN_NODE_TYPE_IMAGE, image);
	CHECK_RC(rc, "Find image generator");

    depth.GetMetaData(depthMD);
	image.GetMetaData(imageMD);

    //rc = player.SetRepeat(FALSE);
	XN_IS_STATUS_OK(rc);

    //rc = player.GetNumFrames(image.GetName(), nNumFrames);
	//CHECK_RC(rc, "Get player number of frames");
	//printf("%d\n",nNumFrames);

    //rc = player.GetNumFrames(depth.GetName(), nNumFrames);
	//CHECK_RC(rc, "Get player number of frames");
	//printf("%d\n",nNumFrames);

	// Hybrid mode isn't supported
	if (imageMD.FullXRes() != depthMD.FullXRes() || imageMD.FullYRes() != depthMD.FullYRes())
	{
		printf ("The device depth and image resolution must be equal!\n");
		return 1;
	}

	// RGB is the only image format supported.
	if (imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24)
	{
		printf("The device image format must be RGB24\n");
		return 1;
	}

    avi = cvCreateVideoWriter(strOutputFile, 0, 30, cvSize(640,480), TRUE);

    depthMetersMat = cvCreateMat(480, 640, CV_16UC1);
    kinectDepthImage = cvCreateImage( cvSize(640,480),16,1 );

    depthMetersMat2 = cvCreateMat(480, 640, CV_16UC1);
    kinectDepthImage2 = cvCreateImage( cvSize(640,480),16,1 );

    colorArr[0] = cv::Mat(imageMD.YRes(),imageMD.XRes(),CV_8U);
    colorArr[1] = cv::Mat(imageMD.YRes(),imageMD.XRes(),CV_8U);
    colorArr[2] = cv::Mat(imageMD.YRes(),imageMD.XRes(),CV_8U);

    //prepare_for_face_detection();

    int b;
    int g;
    int r;

	while ((rc = image.WaitAndUpdateData()) != XN_STATUS_EOF && (rc = depth.WaitAndUpdateData()) != XN_STATUS_EOF) {
        if (rc != XN_STATUS_OK) {
            printf("Read failed: %s\n", xnGetStatusString(rc));
            break;
        }
        depth.GetMetaData(depthMD);
        image.GetMetaData(imageMD);

        //XnUInt32 a;
        //a = g_imageMD.FPS;
        printf("%d\n",imageMD.FrameID());
        //a = g_depthMD.DataSize();
        //printf("%d\n",a);

        pDepth = depthMD.Data();
        pImageRow = imageMD.RGB24Data();

        for (unsigned int y=0; y<imageMD.YRes(); y++) {
            pPixel = pImageRow;
            uchar* Bptr = colorArr[0].ptr<uchar>(y);
            uchar* Gptr = colorArr[1].ptr<uchar>(y);
            uchar* Rptr = colorArr[2].ptr<uchar>(y);

            for(unsigned int x=0;x<imageMD.XRes();++x , ++pPixel){
                Bptr[x] = pPixel->nBlue;
                Gptr[x] = pPixel->nGreen;
                Rptr[x] = pPixel->nRed;

                depthMetersMat->data.s[y * XN_VGA_X_RES + x ] = 7*pDepth[y * XN_VGA_X_RES + x];
                depthMetersMat2->data.s[y * XN_VGA_X_RES + x ] = pDepth[y * XN_VGA_X_RES + x];
            }
            pImageRow += imageMD.XRes();
        }
        cv::merge(colorArr,3,colorImage);
        iplImage = colorImage;

        //cvThreshold(depthMetersMat2, depthMetersMat2, 150, 1500, THRESH_BINARY);

        cvGetImage(depthMetersMat,kinectDepthImage);
        cvGetImage(depthMetersMat2,kinectDepthImage2);

        depthImage = Bw2Image(kinectDepthImage2);
        printf("1. Middle pixel is %u millimeters away\n",depthImage[240][320]);

        rgbImage = RgbImage(&iplImage);

		// we want to see on up to 2000 MM 
        int THRESH = 2000;

        for (unsigned int y=0; y<imageMD.YRes(); y++) {
            for(unsigned int x=0;x<imageMD.XRes();++x){
                if ( depthImage[y][x] >= THRESH ) {
                    depthImage[y][x] = 0;
                } else {
                    float tmp = depthImage[y][x];
                    tmp = tmp / THRESH * (65536)*(-1) + 65536;
                    depthImage[y][x] = (unsigned int)tmp;
                }
            }
        }
		
		// THE PART ABOUT FILTERING COLOURS IN HSV TO SEE ONLY SPECIFIC ONE 
		// AFTER ONE FEW MORPHOLOGICAL OPERATIONS TO MAKE IT LOOK BETTER 

        IplImage* imgHSV = cvCreateImage(cvGetSize(&iplImage), 8, 3);
        cvCvtColor(&iplImage, imgHSV, CV_BGR2HSV);
        imgThreshed = cvCreateImage(cvGetSize(&iplImage), 8, 1);
        //cvInRangeS(imgHSV, cvScalar(100, 60, 80), cvScalar(110, 255, 255), imgThreshed); // BLUE
        cvInRangeS(imgHSV, cvScalar(29, 95, 95), cvScalar(35, 255, 255), imgThreshed); // YELLOW
        //cvInRangeS(imgHSV, cvScalar(29, 60, 60), cvScalar(35, 255, 255), imgThreshed); // YELLOW DARK
        //cvInRangeS(imgHSV, cvScalar(150, 70, 70), cvScalar(160, 255, 255), imgThreshed); // PINK
        //cvInRangeS(imgHSV, cvScalar(40, 76, 76), cvScalar(70, 255, 255), imgThreshed); // GREEN
        IplConvKernel* kernel = cvCreateStructuringElementEx(3, 3, 1, 1, CV_SHAPE_RECT, NULL);
        //cvDilate(imgThreshed,imgThreshed,kernel);
        //cvErode(imgThreshed,imgThreshed,kernel);
        Mat mat = Mat(imgThreshed);
        blur(Mat(imgThreshed),mat,cvSize(3,3));
        imgThreshed = &IplImage(mat);
        //cvInRangeS(imgThreshed,cvScalar(100),cvScalar(255),imgThreshed);
        //cvErode(imgThreshed,imgThreshed,kernel);
        cvDilate(imgThreshed,imgThreshed,kernel);
        cvDilate(imgThreshed,imgThreshed,kernel);
        cvErode(imgThreshed,imgThreshed,kernel);
        cvErode(imgThreshed,imgThreshed,kernel);
        mat = Mat(imgThreshed);
        blur(Mat(imgThreshed),mat,cvSize(6,6));
        imgThreshed = &IplImage(mat);
        cvInRangeS(imgThreshed,cvScalar(100),cvScalar(255),imgThreshed);
        cvReleaseImage(&imgHSV);
        BwImage threshed = BwImage(imgThreshed);



        if ( initialize == true ) {

            normalizeReferenceFace();
            int currentID = 0;

                for ( int y = 30; y<480; y++ ) {
                    for ( int x = 30; x<640; x++ ) {
                        bool g2g = true;
                        //printf("%d %d %d\n",ID, y,x);
                        if ( threshed[y][x]!=0 ) {
                            for ( int ID2 = 0; ID2<nbOfPoints; ID2++) {
                                if ( (abs(markers[ID2].y-y)<proximityLimit) && (abs(markers[ID2].x-x)<proximityLimit)) {
                                    g2g = false;
                                }
                            }
                            if (currentID >= nbOfPoints || g2g == false ) {
                                break;
                            }
                            markers[currentID].y=y;
                            markers[currentID].x=x;
                            currentID++;
                            printf("WHITE PIXEL INITIALIZED %d: %d %d\n",currentID, x,y);
                        }
                    }
                }


            if (isDebugConf==true || currentID == nbOfMarkers) {
                printf("%d PIXELS INITIALIZED\n", currentID);
                initialize = false;
                //printf("%d,%d\n", currentID, nbOfPoints);
                //return 0;
            } else {
                printf("WAITING FOR %d PIXELS TO APPEAR, %d SO FAR \n",nbOfMarkers, currentID);
                continue;
            }


            // FIND TOP RIGHT AND CHIN PIXEL

            int refPixID = 0;
            int chinPixID = 0;

            for ( int i = 0; i < nbOfMarkers; i++) {
                if ( (markers[i].x + markers[i].y)*(markers[i].x + markers[i].y) < (markers[refPixID].x + markers[refPixID].y)* (markers[refPixID].x + markers[refPixID].y)) {
                    refPixID = i;
                }
                if (markers[i].y > markers[chinPixID].y) {
                    chinPixID = i;
                }
            }

            float width = (markers[1].x-markers[0].x)*2;
            float heigth = abs(markers[1].y-markers[0].y);

            // WE GOT WIDTH & HEIGTH OF THE FACE, LETS ADJUST POINTS

            // SET 0 to REF, SET 1 to CHIN

            MyPoint tmp = MyPoint(markers[refPixID].x,markers[refPixID].y);
            markers[refPixID].x = markers[0].x;
            markers[refPixID].y = markers[0].y;
            markers[0].x = tmp.x;
            markers[0].y = tmp.y;

            tmp = MyPoint(markers[chinPixID].x,markers[chinPixID].y);
            markers[chinPixID].x = markers[1].x;
            markers[chinPixID].y = markers[1].y;
            markers[1].x = tmp.x;
            markers[1].y = tmp.y;


            // REST OF THE POINTS

            for ( int i = 2; i < nbOfPoints; i++) {

                int cost = 0;
                int lowestCost = 0;
                int closestPixID = -1;


                for ( int j = 2; j < nbOfMarkers; j++ ) {
                    cost = (markers[j].x-points[i].x*width)*(markers[j].x-points[i].x*width) + (markers[j].y-points[i].y*heigth)*(markers[j].y-points[i].y*heigth);
                    if ( cost < lowestCost ) {
                        lowestCost = cost;
                        closestPixID = j;
                    }
                    if (closestPixID == -1) {
                        //printf("COS JEST SPORO NIE W PORZADKU, CHECK HERE\n");
                        break;
                    }
                    tmp.x = markers[i].x;
                    tmp.y = markers[i].y;
                    markers[i].x=markers[closestPixID].x;
                    markers[i].x=markers[closestPixID].y;
                    markers[closestPixID].x = tmp.x;
                    markers[closestPixID].y = tmp.y;
                }
            }
        }

        for ( int currentPixelID = 0; currentPixelID < nbOfMarkers; currentPixelID++) {
            if (markers[currentPixelID].x == 0) {
                continue;
            }

            if ( threshed[markers[currentPixelID].y][markers[currentPixelID].x] < 128 ) {
                printf("PIXEL %d LOST\n",currentPixelID);

                for ( int neighbSize = 2; neighbSize < maxNeighbSize; neighbSize = neighbSize + 2 ) {

                    int x1 = markers[currentPixelID].x - neighbSize/2;
                    if ( x1 < intoDepthX(0) ) {
                        x1 = (int)intoDepthX(0);
                    }

                    int y1 = (int)(markers[currentPixelID].y-neighbSize/2);
                    if (  y1 < intoDepthY(0) ) {
                        y1 = intoDepthY(0);
                    }

                    int y2 = markers[currentPixelID].y+neighbSize/2;
                    if (  y2 > intoDepthY(480)  ) {
                        y2 = intoDepthY(480);
                    }

                    int x2 = markers[currentPixelID].x+neighbSize/2;
                    if ( x2 > intoDepthX(640) ) {
                        y2 = intoDepthX(640);
                    }

                    bool found = false;
                    for ( int y = y1; y < y2; y++) {
                        for ( int x = x1; x < x2; x++) {
                            bool g2g = true;
                            if (threshed[y][x] > 128) {
                                for ( int ID2 = 0; ID2<nbOfMarkers; ID2++) {
                                    if ( currentPixelID == ID2 )
                                        continue;
                                    if ( (abs(markers[ID2].y-y)<proximityLimit) && (abs(markers[ID2].x-x)<proximityLimit)) {
                                        g2g = false;
                                        break;
                                    }
                                }

                                if ( g2g ) {
                                    markers[currentPixelID].x = x;
                                    markers[currentPixelID].y = y;
                                    found = true;
                                    printf("Pixel %d, FOUND\n",currentPixelID);
                                    break;
                                }
                            }
                        }
                        if (found == true ) {
                            break;
                        }
                    }
                    if (found == true ) {
                        break;
                    }
                }
            }

            paintMarkerOnBoth(markers[currentPixelID]);

        }
        faceImage = cvCreateImage(cvGetSize(&iplImage), 8, 1);
        paintFace();

		// normal kinect depth
        cvShowImage("Depth_Kinect", kinectDepthImage);
		// depth within 80 - 200 mm, normalized 
        cvShowImage("Depth_Kinect_2", kinectDepthImage2);
		// rgb with tracking points
        cvShowImage("RGB_Kinect", &iplImage);
		// colour detector 
        cvShowImage("RGB_Threshed", imgThreshed);
		// attempt to draw a face 
        cvShowImage("Face Image", faceImage);

        cvWaitKey(50);           // wait 20 ms

        if ( avi == NULL) {
            printf ("dupa%d \n",1);
        }
        //cvWriteFrame (avi, &iplImage);
	}

//    cvReleaseImageHeader(kinectDepthImage);
    cvReleaseVideoWriter(&avi);
//    cvReleaseHaarClassifierCascade( &cascade );
    context.Shutdown();

	return 0;
}