예제 #1
0
// -----------------------------------------------------------------------------------------------------
//  connect
// -----------------------------------------------------------------------------------------------------
bool CameraDevice::connect()
{
	//Connect to kinect
	printf("Connecting to Kinect... ");
	fflush(stdout);
	XnStatus nRetVal = XN_STATUS_OK;
	EnumerationErrors errors;
	ScriptNode script;
	nRetVal = g_context.InitFromXmlFile(Config::_PathKinectXmlFile.c_str(), script, &errors);
	if (nRetVal == XN_STATUS_NO_NODE_PRESENT)
	{
		XnChar strError[1024];
		errors.ToString(strError, 1024);
		printf("%s\n", strError);
		return false;
	}
	else if (nRetVal != XN_STATUS_OK)
	{
		printf("Open failed: %s\n", xnGetStatusString(nRetVal));
		return false;
	}
	printf("OK\n");

	// allocate the point cloud buffer
	g_cloudPointSave.width = NBPIXELS_WIDTH;
    g_cloudPointSave.height = NBPIXELS_HEIGHT;
    g_cloudPointSave.points.resize(NBPIXELS_WIDTH*NBPIXELS_HEIGHT);

	nRetVal = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth);
	CHECK_RC(nRetVal, "Find depth generator");

	nRetVal = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image);
	CHECK_RC(nRetVal, "Find image generator");

	nRetVal = xnFPSInit(&g_xnFPS, 180);
	CHECK_RC(nRetVal, "FPS Init");

	g_context.SetGlobalMirror(false); // mirror image horizontally

	g_depth.GetAlternativeViewPointCap().SetViewPoint(g_image);
	if (g_depth.GetIntProperty ("ShadowValue", g_shadowValue) != XN_STATUS_OK)
		printf ("[OpenNIDriver] Could not read shadow value!");

	if (g_depth.GetIntProperty ("NoSampleValue", g_noSampleValue) != XN_STATUS_OK)
		printf ("[OpenNIDriver] Could not read no sample value!");

    return (nRetVal == XN_STATUS_OK);
}
예제 #2
0
int main()
{
    XnStatus nRetVal = XN_STATUS_OK;

    Context context;
    EnumerationErrors errors;

    nRetVal = context.InitFromXmlFile(SAMPLE_XML_PATH, &errors);

    if (nRetVal == XN_STATUS_NO_NODE_PRESENT)
    {
        XnChar strError[1024];
        errors.ToString(strError, 1024);
        printf("%s\n", strError);
        return (nRetVal);
    }
    else if (nRetVal != XN_STATUS_OK)
    {
        printf("Open failed: %s\n", xnGetStatusString(nRetVal));
        return (nRetVal);
    }

    DepthGenerator depth;
    nRetVal = context.FindExistingNode(XN_NODE_TYPE_DEPTH, depth);
    CHECK_RC(nRetVal, "Find depth generator");

    XnFPSData xnFPS;
    nRetVal = xnFPSInit(&xnFPS, 180);
    CHECK_RC(nRetVal, "FPS Init");

    DepthMetaData depthMD;

    while (!xnOSWasKeyboardHit())
    {
        nRetVal = context.WaitOneUpdateAll(depth);
        if (nRetVal != XN_STATUS_OK)
        {
            printf("UpdateData failed: %s\n", xnGetStatusString(nRetVal));
            continue;
        }

        xnFPSMarkFrame(&xnFPS);

        depth.GetMetaData(depthMD);
        const XnDepthPixel* pDepthMap = depthMD.Data();

        printf("Frame %d Middle point is: %u. FPS: %f\n", depthMD.FrameID(), depthMD(depthMD.XRes() / 2, depthMD.YRes() / 2), xnFPSCalc(&xnFPS));
    }

    context.Shutdown();

    return 0;
}
예제 #3
0
// Set up OpenNI to obtain 8-bit mono images from the Kinect's RGB camera
int kinectInit(void)
{
  XnStatus nRetVal = XN_STATUS_OK;
  ScriptNode scriptNode;
  EnumerationErrors errors;

  printf("Reading config from: '%s'\n", SAMPLE_XML_PATH_LOCAL);
  nRetVal = context.InitFromXmlFile(SAMPLE_XML_PATH_LOCAL, scriptNode, &errors);

  nRetVal = context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image); 
  //g_image.SetPixelFormat(XN_PIXEL_FORMAT_GRAYSCALE_8_BIT); 
  g_image.SetPixelFormat(XN_PIXEL_FORMAT_RGB24); 
  g_image.GetMetaData(g_imageMD);

  nRetVal = context.FindExistingNode(XN_NODE_TYPE_DEPTH, depth);
  depth.GetMetaData(depthMD);

  //  nRetVal = depth.GetAlternativeViewPointCap().SetViewPoint(g_image);
  //nRetVal = depth.GetFrameSyncCap().FrameSyncWith(g_image);

 return nRetVal;
}
예제 #4
0
//----------------------------------------------------
// プレイヤーの初期化
//----------------------------------------------------
void playerInit(void){
	XnStatus rc;

	rc = g_context.Init();
	rc = g_context.OpenFileRecording(PLAYER_RECORDE_PATH, g_player);
	errorCheck(rc, "OpenFileRecording");		// エラーチェック

	rc = g_context.FindExistingNode(XN_NODE_TYPE_PLAYER, g_player);
	errorCheck(rc, "FindExistingNode_player");

	// サポートされるフォーマットの取得
	cout << "SupportedFormat:" <<
		g_player.GetSupportedFormat() << endl;

	// 再生速度の取得
	cout << "PlaybackSpeed:" << g_player.GetPlaybackSpeed() << endl;
}
예제 #5
0
//--------------------------------------------------------------
void testApp::setup(){
	
	XnStatus rc;
	
	EnumerationErrors errors;
	rc = g_context.InitFromXmlFile(SAMPLE_XML_PATH, g_scriptNode, &errors);
	if (rc == XN_STATUS_NO_NODE_PRESENT)
	{
		XnChar strError[1024];
		errors.ToString(strError, 1024);
		printf("%s\n", strError);
		return ;
	}
	else if (rc != XN_STATUS_OK)
	{
		printf("Open failed: %s\n", xnGetStatusString(rc));
		return;
	}
	
	rc = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth);
	if (rc != XN_STATUS_OK)
	{
		printf("No depth node exists! Check your XML.");
		return;
	}
	
	g_depth.GetMetaData(g_depthMD);
	
	// Texture map init
	g_nTexMapX = (((unsigned short)(g_depthMD.FullXRes()-1) / 512) + 1) * 512;
	g_nTexMapY = (((unsigned short)(g_depthMD.FullYRes()-1) / 512) + 1) * 512;
	g_pTexMap = (XnRGB24Pixel*)malloc(g_nTexMapX * g_nTexMapY * sizeof(XnRGB24Pixel));
	std::cout << " w:" << g_depthMD.FullXRes() << " h:" << g_depthMD.FullYRes() << std::endl;
	pixels = (unsigned char*)malloc(640*480*3*sizeof(unsigned char));
	tex.allocate(640, 480, GL_RGB);
}
예제 #6
0
XnStatus Init_Kinect(EventOutSFNode* skltn,EventOutSFNode* hnz,EventOutSFNode* flr){
	XnStatus rc=XN_STATUS_OK;
	EnumerationErrors errors;
	DepthMetaData g_depthMD;
	ImageMetaData g_imageMD;
	rc = g_context.InitFromXmlFile(SAMPLE_XML_PATH, &errors);
	CHECK_RC(rc, "InitFromXml");
	rc = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth);
	CHECK_RC(rc,"XN_NODE_TYPE_DEPTH");
	rc = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image);
	CHECK_RC(rc,"XN_NODE_TYPE_IMAGE");
	rc=  g_context.FindExistingNode(XN_NODE_TYPE_USER,g_user); 
	CHECK_RC(rc,"XN_NODE_TYPE_USER");
	rc=g_context.FindExistingNode(XN_NODE_TYPE_SCENE,g_scene);
	CHECK_RC(rc,"XN_NODE_TYPE_SCENE");
	rc=g_context.FindExistingNode(XN_NODE_TYPE_HANDS,g_hands);
	CHECK_RC(rc,"XN_NODE_TYPE_HANDS");
	rc=g_context.FindExistingNode(XN_NODE_TYPE_GESTURE,g_gesture);
	CHECK_RC(rc,"XN_NODE_TYPE_GESTURE");
	g_depth.GetMetaData(g_depthMD);
	g_fps=g_depthMD.FPS();
	g_image.GetMetaData(g_imageMD);
	rc=init_skeleton();
	CHECK_RC(rc,"INIT SKELETON");
	rc=init_hands();
	CHECK_RC(rc,"INIT HANDS");
	pix_w=g_depthMD.FullXRes();
	pix_h=g_depthMD.FullYRes();
	if(pix_h==0||pix_w==0){return XN_STATUS_ERROR;}
	g_skltn=skltn;
	g_hnz=hnz;
	g_flr=flr;
	if(NULL==g_skltn||NULL==g_hands||NULL==g_flr)return XN_STATUS_ERROR;
	isInit=true;
	return rc;
}
예제 #7
0
int main()
{
	XnStatus nRetVal = XN_STATUS_OK;

	Context context;
	ScriptNode scriptNode;
	EnumerationErrors errors;

	const char *fn = NULL;
	if	(fileExists(SAMPLE_XML_PATH)) fn = SAMPLE_XML_PATH;
	else if (fileExists(SAMPLE_XML_PATH_LOCAL)) fn = SAMPLE_XML_PATH_LOCAL;
	else {
		printf("Could not find '%s' nor '%s'. Aborting.\n" , SAMPLE_XML_PATH, SAMPLE_XML_PATH_LOCAL);
		return XN_STATUS_ERROR;
	}
	printf("Reading config from: '%s'\n", fn);
	nRetVal = context.InitFromXmlFile(fn, scriptNode, &errors);

	if (nRetVal == XN_STATUS_NO_NODE_PRESENT)
	{
		XnChar strError[1024];
		errors.ToString(strError, 1024);
		printf("%s\n", strError);
		return (nRetVal);
	}
	else if (nRetVal != XN_STATUS_OK)
	{
		printf("Open failed: %s\n", xnGetStatusString(nRetVal));
		return (nRetVal);
	}

	DepthGenerator depth;
	nRetVal = context.FindExistingNode(XN_NODE_TYPE_DEPTH, depth);
	CHECK_RC(nRetVal, "Find depth generator");

	XnFPSData xnFPS;
	nRetVal = xnFPSInit(&xnFPS, 180);
	CHECK_RC(nRetVal, "FPS Init");

	DepthMetaData depthMD;

	while (!xnOSWasKeyboardHit())
	{
		nRetVal = context.WaitOneUpdateAll(depth);
		if (nRetVal != XN_STATUS_OK)
		{
			printf("UpdateData failed: %s\n", xnGetStatusString(nRetVal));
			continue;
		}

		xnFPSMarkFrame(&xnFPS);

		depth.GetMetaData(depthMD);

		printf("Frame %d Middle point is: %u. FPS: %f\n", depthMD.FrameID(), depthMD(depthMD.XRes() / 2, depthMD.YRes() / 2), xnFPSCalc(&xnFPS));
	}

	depth.Release();
	scriptNode.Release();
	context.Release();

	return 0;
}
예제 #8
0
int main(int argc, char* argv[])
{
	XnStatus rc;

	EnumerationErrors errors;
	rc = g_context.InitFromXmlFile(SAMPLE_XML_PATH, &errors);
	if (rc == XN_STATUS_NO_NODE_PRESENT)
	{
		XnChar strError[1024];
		errors.ToString(strError, 1024);
		printf("%s\n", strError);
		return (rc);
	}
	else if (rc != XN_STATUS_OK)
	{
		printf("Open failed: %s\n", xnGetStatusString(rc));
		return (rc);
	}

	rc = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth);
	rc = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image);

	g_depth.GetMetaData(g_depthMD);
	g_image.GetMetaData(g_imageMD);

	// Hybrid mode isn't supported in this sample
	if (g_imageMD.FullXRes() != g_depthMD.FullXRes() || g_imageMD.FullYRes() != g_depthMD.FullYRes())
	{
		printf ("The device depth and image resolution must be equal!\n");
		return 1;
	}

	// RGB is the only image format supported.
	if (g_imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24)
	{
		printf("The device image format must be RGB24\n");
		return 1;
	}

	// Texture map init
	g_nTexMapX = (((unsigned short)(g_depthMD.FullXRes()-1) / 512) + 1) * 512;
	g_nTexMapY = (((unsigned short)(g_depthMD.FullYRes()-1) / 512) + 1) * 512;
	g_pTexMap = (XnRGB24Pixel*)malloc(g_nTexMapX * g_nTexMapY * sizeof(XnRGB24Pixel));

	// OpenGL init
	glutInit(&argc, argv);
	glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH);
	glutInitWindowSize(GL_WIN_SIZE_X, GL_WIN_SIZE_Y);
	glutCreateWindow ("OpenNI Simple Viewer");
	glutFullScreen();
	glutSetCursor(GLUT_CURSOR_NONE);

	glutKeyboardFunc(glutKeyboard);
	glutDisplayFunc(glutDisplay);
	glutIdleFunc(glutIdle);

	glDisable(GL_DEPTH_TEST);
	glEnable(GL_TEXTURE_2D);

	// Per frame code is in glutDisplay
	glutMainLoop();

	return 0;
}
예제 #9
0
int main(int argc, char* argv[])
{
	XnStatus nRetVal = XN_STATUS_OK;
	nRetVal = xnLogInitFromXmlFile(SAMPLE_XML_PATH);
	if (nRetVal != XN_STATUS_OK)
	{
		printf("Log couldn't be opened: %s. Running without log", xnGetStatusString(nRetVal));
	}

	if (argc < 3)
	{
		printf("usage: %s <inputFile> <outputFile>\n", argv[0]);
		return -1;
	}

	const char* strInputFile = argv[1];
	const char* strOutputFile = argv[2];

	Context context;
	nRetVal = context.Init();
	CHECK_RC(nRetVal, "Init");

	// open input file
	Player player;
	nRetVal = context.OpenFileRecording(strInputFile, player);
	CHECK_RC(nRetVal, "Open input file");

	// Get depth node from recording
	DepthGenerator depth;
	nRetVal = context.FindExistingNode(XN_NODE_TYPE_DEPTH, depth);
	CHECK_RC(nRetVal, "Find depth generator");

	// Create mock node based on depth node from recording
	MockDepthGenerator mockDepth;
	nRetVal = mockDepth.CreateBasedOn(depth);
	CHECK_RC(nRetVal, "Create mock depth node");

	// create recorder
	Recorder recorder;

	nRetVal = recorder.Create(context);
	CHECK_RC(nRetVal, "Create recorder");

	nRetVal = recorder.SetDestination(XN_RECORD_MEDIUM_FILE, strOutputFile);
	CHECK_RC(nRetVal, "Set recorder destination file");

	// add depth node to recorder
	nRetVal = recorder.AddNodeToRecording(mockDepth);
	CHECK_RC(nRetVal, "Add node to recording");

	nRetVal = player.SetRepeat(FALSE);
	XN_IS_STATUS_OK(nRetVal);

	XnUInt32 nNumFrames = 0;

	nRetVal = player.GetNumFrames(depth.GetName(), nNumFrames);
	CHECK_RC(nRetVal, "Get player number of frames");

	DepthMetaData depthMD;

	while ((nRetVal = depth.WaitAndUpdateData()) != XN_STATUS_EOF)
	{
		CHECK_RC(nRetVal, "Read next frame");
		
		// Get depth meta data
		depth.GetMetaData(depthMD);

		//-----------------------------------------------//
		// Transform depth! This is the interesting part //
		//-----------------------------------------------//
		
		/* Enable the depth data to be modified. This is done implicitly by depthMD.WritableDepthMap(),
		   but we're calling it just to be clear. */
		nRetVal = depthMD.MakeDataWritable();
		CHECK_RC(nRetVal, "Make depth data writable");

		transformDepthMD(depthMD);

		// Pass the transformed data to the mock depth generator
		nRetVal = mockDepth.SetData(depthMD);
		CHECK_RC(nRetVal, "Set mock node new data");

		/* We need to call recorder.Record explicitly because we're not using WaitAndUpdateAll(). */
		nRetVal = recorder.Record();
		CHECK_RC(nRetVal, "Record");

		printf("Recorded: frame %u out of %u\r", depthMD.FrameID(), nNumFrames);
	}

	printf("\n");

	return 0;
}
예제 #10
0
//----------------------------------------------------
// OpenNI関連の初期化
//----------------------------------------------------
void xnInit(void){
	XnStatus rc;

	EnumerationErrors errors;
	rc = g_context.InitFromXmlFile(SAMPLE_XML_PATH, &errors);
	if (rc == XN_STATUS_NO_NODE_PRESENT){
		XnChar strError[1024];
		errors.ToString(strError, 1024);
		printf("%s\n", strError);
		exit(1);
	}else if (rc != XN_STATUS_OK){
		printf("Open failed: %s\n", xnGetStatusString(rc));
		exit(1);
	}
	
	//playerInit();

	rc = xnFPSInit(&g_xnFPS, 180);	// FPSの初期化
	//CHECK_RC(rc, "FPS Init");

	// デプス・イメージ・ユーザジェネレータの作成
	rc = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth);
	errorCheck(rc, "g_depth");		// エラーチェック
	rc = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image);
	errorCheck(rc, "g_image");
	rc = g_context.FindExistingNode(XN_NODE_TYPE_USER, g_user);
	//rc = g_user.Create(g_context);
	errorCheck(rc, "g_user");

	// ユーザー検出機能をサポートしているか確認
	if (!g_user.IsCapabilitySupported(XN_CAPABILITY_SKELETON)) {
		//throw std::runtime_error("ユーザー検出をサポートしてません");
		cout << "ユーザー検出をサポートしてません" << endl;
		exit(1);
	}

	// レコーダーの設定
	//rc = setRecorder(g_recorder, rc);

	// ユーザコールバックの登録
	XnCallbackHandle userCallbacks;
	g_user.RegisterUserCallbacks(UserDetected, UserLost, NULL, userCallbacks);

	// デプス・イメージ・ユーザデータの取得
	g_depth.GetMetaData(g_depthMD);
	g_image.GetMetaData(g_imageMD);
	g_user.GetUserPixels(0, g_sceneMD);

	// Hybrid mode isn't supported in this sample
	// イメージとデプスの大きさが違うとエラー
	if (g_imageMD.FullXRes() != g_depthMD.FullXRes() || g_imageMD.FullYRes() != g_depthMD.FullYRes()){
		printf ("The device depth and image resolution must be equal!\n");
		exit(1);
	}

	// RGB is the only image format supported.
	// フォーマットの確認
	if (g_imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24){
		printf("The device image format must be RGB24\n");
		exit(1);
	}

	// Texture map init
	// フルスクリーン画面の大きさ調整
	g_nTexMapX = (((unsigned short)(g_depthMD.FullXRes() - 1) / 512) + 1) * 512;	// 大きさによって512の倍数に調整(1024)
	g_nTexMapY = (((unsigned short)(g_depthMD.FullYRes() - 1) / 512) + 1) * 512;	// 512
	g_pTexMap = (XnRGB24Pixel*)malloc(g_nTexMapX * g_nTexMapY * sizeof(XnRGB24Pixel));	// スクリーンの大きさ分の色情報の容量を確保

	// 座標ポインタの初期化
	g_pPoint = (XnPoint3D*)malloc(KINECT_IMAGE_SIZE * sizeof(XnPoint3D));			// 座標を入れるポインタを作成
	g_pBackTex = (XnRGB24Pixel*)malloc(KINECT_IMAGE_SIZE * sizeof(XnRGB24Pixel));	// 背景画像を入れるポインタを作成
	g_pBackPoint = (XnPoint3D*)malloc(KINECT_IMAGE_SIZE * sizeof(XnPoint3D));		// 背景座標を入れるポインタを作成
	g_pBackDepth = (XnDepthPixel*)malloc(KINECT_IMAGE_SIZE * sizeof(XnDepthPixel));		// 背景座標を入れるポインタを作成
}
int main(int argc, char* argv[])
{

	EnumerationErrors errors;


    //rc = context.Init();
    rc = context.InitFromXmlFile(strPathToXML,&errors);
    if (rc == XN_STATUS_NO_NODE_PRESENT)
	{
		XnChar strError[1024];
		errors.ToString(strError, 1024);
		printf("%s\n", strError);
		return (rc);
	}
	else if (rc != XN_STATUS_OK)
	{
		printf("Open failed: %s\n", xnGetStatusString(rc));
		return (rc);
	}
	
	/* UNCOMMENT TO GET FILE READING 
    //rc = context.OpenFileRecording(strInputFile);
	//CHECK_RC(rc, "Open input file");

	//rc = context.FindExistingNode(XN_NODE_TYPE_PLAYER, player);
	//CHECK_RC(rc, "Get player node"); */ 

	rc = context.FindExistingNode(XN_NODE_TYPE_DEPTH, depth);
	CHECK_RC(rc, "Find depth generator");

	rc = context.FindExistingNode(XN_NODE_TYPE_IMAGE, image);
	CHECK_RC(rc, "Find image generator");

    depth.GetMetaData(depthMD);
	image.GetMetaData(imageMD);

    //rc = player.SetRepeat(FALSE);
	XN_IS_STATUS_OK(rc);

    //rc = player.GetNumFrames(image.GetName(), nNumFrames);
	//CHECK_RC(rc, "Get player number of frames");
	//printf("%d\n",nNumFrames);

    //rc = player.GetNumFrames(depth.GetName(), nNumFrames);
	//CHECK_RC(rc, "Get player number of frames");
	//printf("%d\n",nNumFrames);

	// Hybrid mode isn't supported
	if (imageMD.FullXRes() != depthMD.FullXRes() || imageMD.FullYRes() != depthMD.FullYRes())
	{
		printf ("The device depth and image resolution must be equal!\n");
		return 1;
	}

	// RGB is the only image format supported.
	if (imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24)
	{
		printf("The device image format must be RGB24\n");
		return 1;
	}

    avi = cvCreateVideoWriter(strOutputFile, 0, 30, cvSize(640,480), TRUE);

    depthMetersMat = cvCreateMat(480, 640, CV_16UC1);
    kinectDepthImage = cvCreateImage( cvSize(640,480),16,1 );

    depthMetersMat2 = cvCreateMat(480, 640, CV_16UC1);
    kinectDepthImage2 = cvCreateImage( cvSize(640,480),16,1 );

    colorArr[0] = cv::Mat(imageMD.YRes(),imageMD.XRes(),CV_8U);
    colorArr[1] = cv::Mat(imageMD.YRes(),imageMD.XRes(),CV_8U);
    colorArr[2] = cv::Mat(imageMD.YRes(),imageMD.XRes(),CV_8U);

    //prepare_for_face_detection();

    int b;
    int g;
    int r;

	while ((rc = image.WaitAndUpdateData()) != XN_STATUS_EOF && (rc = depth.WaitAndUpdateData()) != XN_STATUS_EOF) {
        if (rc != XN_STATUS_OK) {
            printf("Read failed: %s\n", xnGetStatusString(rc));
            break;
        }
        depth.GetMetaData(depthMD);
        image.GetMetaData(imageMD);

        //XnUInt32 a;
        //a = g_imageMD.FPS;
        printf("%d\n",imageMD.FrameID());
        //a = g_depthMD.DataSize();
        //printf("%d\n",a);

        pDepth = depthMD.Data();
        pImageRow = imageMD.RGB24Data();

        for (unsigned int y=0; y<imageMD.YRes(); y++) {
            pPixel = pImageRow;
            uchar* Bptr = colorArr[0].ptr<uchar>(y);
            uchar* Gptr = colorArr[1].ptr<uchar>(y);
            uchar* Rptr = colorArr[2].ptr<uchar>(y);

            for(unsigned int x=0;x<imageMD.XRes();++x , ++pPixel){
                Bptr[x] = pPixel->nBlue;
                Gptr[x] = pPixel->nGreen;
                Rptr[x] = pPixel->nRed;

                depthMetersMat->data.s[y * XN_VGA_X_RES + x ] = 7*pDepth[y * XN_VGA_X_RES + x];
                depthMetersMat2->data.s[y * XN_VGA_X_RES + x ] = pDepth[y * XN_VGA_X_RES + x];
            }
            pImageRow += imageMD.XRes();
        }
        cv::merge(colorArr,3,colorImage);
        iplImage = colorImage;

        //cvThreshold(depthMetersMat2, depthMetersMat2, 150, 1500, THRESH_BINARY);

        cvGetImage(depthMetersMat,kinectDepthImage);
        cvGetImage(depthMetersMat2,kinectDepthImage2);

        depthImage = Bw2Image(kinectDepthImage2);
        printf("1. Middle pixel is %u millimeters away\n",depthImage[240][320]);

        rgbImage = RgbImage(&iplImage);

		// we want to see on up to 2000 MM 
        int THRESH = 2000;

        for (unsigned int y=0; y<imageMD.YRes(); y++) {
            for(unsigned int x=0;x<imageMD.XRes();++x){
                if ( depthImage[y][x] >= THRESH ) {
                    depthImage[y][x] = 0;
                } else {
                    float tmp = depthImage[y][x];
                    tmp = tmp / THRESH * (65536)*(-1) + 65536;
                    depthImage[y][x] = (unsigned int)tmp;
                }
            }
        }
		
		// THE PART ABOUT FILTERING COLOURS IN HSV TO SEE ONLY SPECIFIC ONE 
		// AFTER ONE FEW MORPHOLOGICAL OPERATIONS TO MAKE IT LOOK BETTER 

        IplImage* imgHSV = cvCreateImage(cvGetSize(&iplImage), 8, 3);
        cvCvtColor(&iplImage, imgHSV, CV_BGR2HSV);
        imgThreshed = cvCreateImage(cvGetSize(&iplImage), 8, 1);
        //cvInRangeS(imgHSV, cvScalar(100, 60, 80), cvScalar(110, 255, 255), imgThreshed); // BLUE
        cvInRangeS(imgHSV, cvScalar(29, 95, 95), cvScalar(35, 255, 255), imgThreshed); // YELLOW
        //cvInRangeS(imgHSV, cvScalar(29, 60, 60), cvScalar(35, 255, 255), imgThreshed); // YELLOW DARK
        //cvInRangeS(imgHSV, cvScalar(150, 70, 70), cvScalar(160, 255, 255), imgThreshed); // PINK
        //cvInRangeS(imgHSV, cvScalar(40, 76, 76), cvScalar(70, 255, 255), imgThreshed); // GREEN
        IplConvKernel* kernel = cvCreateStructuringElementEx(3, 3, 1, 1, CV_SHAPE_RECT, NULL);
        //cvDilate(imgThreshed,imgThreshed,kernel);
        //cvErode(imgThreshed,imgThreshed,kernel);
        Mat mat = Mat(imgThreshed);
        blur(Mat(imgThreshed),mat,cvSize(3,3));
        imgThreshed = &IplImage(mat);
        //cvInRangeS(imgThreshed,cvScalar(100),cvScalar(255),imgThreshed);
        //cvErode(imgThreshed,imgThreshed,kernel);
        cvDilate(imgThreshed,imgThreshed,kernel);
        cvDilate(imgThreshed,imgThreshed,kernel);
        cvErode(imgThreshed,imgThreshed,kernel);
        cvErode(imgThreshed,imgThreshed,kernel);
        mat = Mat(imgThreshed);
        blur(Mat(imgThreshed),mat,cvSize(6,6));
        imgThreshed = &IplImage(mat);
        cvInRangeS(imgThreshed,cvScalar(100),cvScalar(255),imgThreshed);
        cvReleaseImage(&imgHSV);
        BwImage threshed = BwImage(imgThreshed);



        if ( initialize == true ) {

            normalizeReferenceFace();
            int currentID = 0;

                for ( int y = 30; y<480; y++ ) {
                    for ( int x = 30; x<640; x++ ) {
                        bool g2g = true;
                        //printf("%d %d %d\n",ID, y,x);
                        if ( threshed[y][x]!=0 ) {
                            for ( int ID2 = 0; ID2<nbOfPoints; ID2++) {
                                if ( (abs(markers[ID2].y-y)<proximityLimit) && (abs(markers[ID2].x-x)<proximityLimit)) {
                                    g2g = false;
                                }
                            }
                            if (currentID >= nbOfPoints || g2g == false ) {
                                break;
                            }
                            markers[currentID].y=y;
                            markers[currentID].x=x;
                            currentID++;
                            printf("WHITE PIXEL INITIALIZED %d: %d %d\n",currentID, x,y);
                        }
                    }
                }


            if (isDebugConf==true || currentID == nbOfMarkers) {
                printf("%d PIXELS INITIALIZED\n", currentID);
                initialize = false;
                //printf("%d,%d\n", currentID, nbOfPoints);
                //return 0;
            } else {
                printf("WAITING FOR %d PIXELS TO APPEAR, %d SO FAR \n",nbOfMarkers, currentID);
                continue;
            }


            // FIND TOP RIGHT AND CHIN PIXEL

            int refPixID = 0;
            int chinPixID = 0;

            for ( int i = 0; i < nbOfMarkers; i++) {
                if ( (markers[i].x + markers[i].y)*(markers[i].x + markers[i].y) < (markers[refPixID].x + markers[refPixID].y)* (markers[refPixID].x + markers[refPixID].y)) {
                    refPixID = i;
                }
                if (markers[i].y > markers[chinPixID].y) {
                    chinPixID = i;
                }
            }

            float width = (markers[1].x-markers[0].x)*2;
            float heigth = abs(markers[1].y-markers[0].y);

            // WE GOT WIDTH & HEIGTH OF THE FACE, LETS ADJUST POINTS

            // SET 0 to REF, SET 1 to CHIN

            MyPoint tmp = MyPoint(markers[refPixID].x,markers[refPixID].y);
            markers[refPixID].x = markers[0].x;
            markers[refPixID].y = markers[0].y;
            markers[0].x = tmp.x;
            markers[0].y = tmp.y;

            tmp = MyPoint(markers[chinPixID].x,markers[chinPixID].y);
            markers[chinPixID].x = markers[1].x;
            markers[chinPixID].y = markers[1].y;
            markers[1].x = tmp.x;
            markers[1].y = tmp.y;


            // REST OF THE POINTS

            for ( int i = 2; i < nbOfPoints; i++) {

                int cost = 0;
                int lowestCost = 0;
                int closestPixID = -1;


                for ( int j = 2; j < nbOfMarkers; j++ ) {
                    cost = (markers[j].x-points[i].x*width)*(markers[j].x-points[i].x*width) + (markers[j].y-points[i].y*heigth)*(markers[j].y-points[i].y*heigth);
                    if ( cost < lowestCost ) {
                        lowestCost = cost;
                        closestPixID = j;
                    }
                    if (closestPixID == -1) {
                        //printf("COS JEST SPORO NIE W PORZADKU, CHECK HERE\n");
                        break;
                    }
                    tmp.x = markers[i].x;
                    tmp.y = markers[i].y;
                    markers[i].x=markers[closestPixID].x;
                    markers[i].x=markers[closestPixID].y;
                    markers[closestPixID].x = tmp.x;
                    markers[closestPixID].y = tmp.y;
                }
            }
        }

        for ( int currentPixelID = 0; currentPixelID < nbOfMarkers; currentPixelID++) {
            if (markers[currentPixelID].x == 0) {
                continue;
            }

            if ( threshed[markers[currentPixelID].y][markers[currentPixelID].x] < 128 ) {
                printf("PIXEL %d LOST\n",currentPixelID);

                for ( int neighbSize = 2; neighbSize < maxNeighbSize; neighbSize = neighbSize + 2 ) {

                    int x1 = markers[currentPixelID].x - neighbSize/2;
                    if ( x1 < intoDepthX(0) ) {
                        x1 = (int)intoDepthX(0);
                    }

                    int y1 = (int)(markers[currentPixelID].y-neighbSize/2);
                    if (  y1 < intoDepthY(0) ) {
                        y1 = intoDepthY(0);
                    }

                    int y2 = markers[currentPixelID].y+neighbSize/2;
                    if (  y2 > intoDepthY(480)  ) {
                        y2 = intoDepthY(480);
                    }

                    int x2 = markers[currentPixelID].x+neighbSize/2;
                    if ( x2 > intoDepthX(640) ) {
                        y2 = intoDepthX(640);
                    }

                    bool found = false;
                    for ( int y = y1; y < y2; y++) {
                        for ( int x = x1; x < x2; x++) {
                            bool g2g = true;
                            if (threshed[y][x] > 128) {
                                for ( int ID2 = 0; ID2<nbOfMarkers; ID2++) {
                                    if ( currentPixelID == ID2 )
                                        continue;
                                    if ( (abs(markers[ID2].y-y)<proximityLimit) && (abs(markers[ID2].x-x)<proximityLimit)) {
                                        g2g = false;
                                        break;
                                    }
                                }

                                if ( g2g ) {
                                    markers[currentPixelID].x = x;
                                    markers[currentPixelID].y = y;
                                    found = true;
                                    printf("Pixel %d, FOUND\n",currentPixelID);
                                    break;
                                }
                            }
                        }
                        if (found == true ) {
                            break;
                        }
                    }
                    if (found == true ) {
                        break;
                    }
                }
            }

            paintMarkerOnBoth(markers[currentPixelID]);

        }
        faceImage = cvCreateImage(cvGetSize(&iplImage), 8, 1);
        paintFace();

		// normal kinect depth
        cvShowImage("Depth_Kinect", kinectDepthImage);
		// depth within 80 - 200 mm, normalized 
        cvShowImage("Depth_Kinect_2", kinectDepthImage2);
		// rgb with tracking points
        cvShowImage("RGB_Kinect", &iplImage);
		// colour detector 
        cvShowImage("RGB_Threshed", imgThreshed);
		// attempt to draw a face 
        cvShowImage("Face Image", faceImage);

        cvWaitKey(50);           // wait 20 ms

        if ( avi == NULL) {
            printf ("dupa%d \n",1);
        }
        //cvWriteFrame (avi, &iplImage);
	}

//    cvReleaseImageHeader(kinectDepthImage);
    cvReleaseVideoWriter(&avi);
//    cvReleaseHaarClassifierCascade( &cascade );
    context.Shutdown();

	return 0;
}
예제 #12
0
int Init()
{
	
	XnStatus rc;

	//Make sure our image types are the same as the OpenNI image types.
	assert(sizeof(XnRGB24Pixel) == sizeof(ColorPixel));
	assert(sizeof(XnDepthPixel) == sizeof(DepthPixel));
	assert(sizeof(XnStatus) == sizeof(int));

	// Load OpenNI xml settings
	char filePath[255];
	int length = Util::Helpers::GetExeDirectory(filePath, sizeof(filePath));
	filePath[length] = '\\';
	strcpy(&filePath[length+1], SAMPLE_XML_PATH);

	EnumerationErrors errors;
	rc = deviceContext.InitFromXmlFile(filePath, &errors);
	if (rc == XN_STATUS_NO_NODE_PRESENT)
	{
		//One reason would be if Microsoft SDK is installed beside PrimeSense.  Device manager should say PrimeSense instead of Microsoft Kinect.
		
		//XnChar strError[1024];
		//errors.ToString(strError, 1024);
		//LOGE("%s\n", strError);
		return -1;
	}
	else if (rc != XN_STATUS_OK)
	{
		fprintf(stderr, "%s\n", xnGetStatusString(rc));
		/*LOGE("Open failed: %s\n", xnGetStatusString(rc));*/
		return (rc);
	}

	// Retrieve colour and depth nodes
	rc = deviceContext.FindExistingNode(XN_NODE_TYPE_IMAGE, colorImageGenerator);
	rc = deviceContext.FindExistingNode(XN_NODE_TYPE_DEPTH, depthImageGenerator);

	// Set mirror mode to off
	SetMirrorMode(false);

	// Get a frame to perform checks on it
	ImageMetaData colorImageMetaData;
	DepthMetaData depthImageMetaData;
	depthImageGenerator.GetMetaData(depthImageMetaData);
	colorImageGenerator.GetMetaData(colorImageMetaData);

	// Hybrid mode isn't supported in this sample
	if (colorImageMetaData.FullXRes() != depthImageMetaData.FullXRes() || colorImageMetaData.FullYRes() != depthImageMetaData.FullYRes())
	{
		/*LOGE("The device depth and image resolution must be equal!\n");*/
		return 1;
	}

	// RGB is the only image format supported.
	if (colorImageMetaData.PixelFormat() != XN_PIXEL_FORMAT_RGB24)
	{
		/*LOGE("The device image format must be RGB24\n");*/
		return 1;
	}
	
	// Need to make sure the automatic alignment of colour and depth images is supported.
	XnBool isSupported = depthImageGenerator.IsCapabilitySupported("AlternativeViewPoint");
	if(!isSupported)
	{
		/*LOGE("Cannot set AlternativeViewPoint!\n");*/
		return 1;
	}

	
	// Set it to VGA maps at 30 FPS
	/*XnMapOutputMode mapMode;
	mapMode.nXRes = XN_VGA_X_RES;
	mapMode.nYRes = XN_VGA_Y_RES;
	mapMode.nFPS = 60;
	rc = g_depth.SetMapOutputMode(mapMode);
	if(rc)
	{
		LOGE("Failed to set depth map mode: %s\n", xnGetStatusString(rc));
		return 1;
	}
	mapMode.nFPS = 30;
	rc = g_image.SetMapOutputMode(mapMode);
	if(rc)
	{
		LOGE("Failed to set image map mode: %s\n", xnGetStatusString(rc));
		return 1;
	}*/


	// Set automatic alignment of the colour and depth images.
	rc = depthImageGenerator.GetAlternativeViewPointCap().SetViewPoint(colorImageGenerator);
	if(rc)
	{
		/*LOGE("Failed to set depth map mode: %s\n", xnGetStatusString(rc));*/
		return 1;
	}


	return XN_STATUS_OK;
}
예제 #13
0
//////////////////// Entry point //////////////////// 
int main(int argc, char* argv[]) 
{
	depthmask_for_mesh = cvCreateImage(MESH_SIZE, IPL_DEPTH_8U, 1);
	markerSize.width = -1; 
	markerSize.height = -1;

  //init OpenNI
  EnumerationErrors errors;
	switch (XnStatus rc = niContext.InitFromXmlFile(KINECT_CONFIG_FILENAME, &errors)) {
		case XN_STATUS_OK:
			break;
		case XN_STATUS_NO_NODE_PRESENT:
			XnChar strError[1024];	errors.ToString(strError, 1024);
			printf("%s\n", strError);
			return rc; break;
		default:
			printf("Open failed: %s\n", xnGetStatusString(rc));
			return rc;
	}

  //set camera parameter
  capture = new Camera(0, CAPTURE_SIZE, CAMERA_PARAMS_FILENAME);
	RegistrationParams = scaleParams(capture->getParameters(), double(REGISTRATION_SIZE.width)/double(CAPTURE_SIZE.width));

  //init parameter for rendering
  osg_init(calcProjection(RegistrationParams, capture->getDistortion(), REGISTRATION_SIZE));

  //for Kinect view
  loadKinectParams(KINECT_PARAMS_FILENAME, &kinectParams, &kinectDistort);
	kinectDistort =0;
	kinectParams->data.db[2]=320.0; 
	kinectParams->data.db[5]=240.0;

	//setting kinect context
	niContext.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth);
	niContext.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image);
	g_depth.GetMirrorCap().SetMirror(false);
	g_depth.GetAlternativeViewPointCap().SetViewPoint(g_image);

	//registration
	kinectReg = new RegistrationOPIRA(new OCVSurf());
	kinectReg->addResizedMarker(MARKER_FILENAME, 400);

	//physics
	m_world = new bt_ARMM_world();
	ground_grid = new float[GRID_SIZE];
	for (int i =0;i < GRID_SIZE; i++) {
		ground_grid[i] = 0; 
	}
#ifdef SIM_PARTICLES
	voxel_grid = new float[1200];
	for (int i =0;i < 1200; i++) {
		voxel_grid[i] = 0;
	}
#endif

	//controls
	KeyboardController *kc = new KeyboardController(m_world);
	XboxController *xc = new XboxController(m_world);

	loadKinectTransform(KINECT_TRANSFORM_FILENAME);

#ifdef USE_ARMM_VRPN
	//----->Server part
	m_Connection = new vrpn_Connection_IP();
	ARMM_server = new ARMM_Communicator(m_Connection	);

	//Open the imager server and set up channel zero to send our data.
	//if ( (ARMM_img_server = new vrpn_Imager_Server("ARMM_Image", m_Connection, MESH_SIZE.width, MESH_SIZE.height)) == NULL) {
	//	fprintf(stderr, "Could not open imager server\n");
	//	return -1;
	//}
	//if ( (channel_id = ARMM_img_server->add_channel("Grid")) == -1) {
	//	fprintf(stderr, "Could not add channel\n");
	//	return -1;
	//}
	ARMM_server->SetObjectsData(&(m_world->Objects_Body));
	ARMM_server->SetHandsData(&(m_world->HandObjectsArray));

  cout << "Created VRPN server." << endl;
	//<-----
#ifdef USE_ARMM_VRPN_RECEIVER 	//----->Receiver part
	ARMM_sever_receiver = new vrpn_Tracker_Remote (ARMM_CLIENT_IP);
	ARMM_sever_receiver->register_change_handler(NULL, handle_object);
#endif 	//<----- 

#endif

#ifdef USE_SKIN_SEGMENTATION	//Skin color look up
	_HandRegion.LoadSkinColorProbTable();
#endif

#ifdef USE_OPTICAL_FLOW
	prev_gray = cvCreateImage(cvSize(OPFLOW_SIZE.width, OPFLOW_SIZE.height), IPL_DEPTH_8U, 1);
	curr_gray = cvCreateImage(cvSize(OPFLOW_SIZE.width, OPFLOW_SIZE.height), IPL_DEPTH_8U, 1);
	flow_capture = new FlowCapture();
	flow_capture->Init();
#endif

/////////////////////////////////////////////Main Loop////////////////////////////////////////////////
	while (running) {
    //start kinect
		if (XnStatus rc = niContext.WaitAnyUpdateAll() != XN_STATUS_OK) {
			printf("Read failed: %s\n", xnGetStatusString(rc));
			return rc;
		}

    //get image and depth data from Kinect
		g_depth.GetMetaData(niDepthMD);
		g_image.GetMetaData(niImageMD);

		colourIm = cvCreateImage(cvSize(niImageMD.XRes(), niImageMD.YRes()), IPL_DEPTH_8U, 3);
		memcpy(colourIm->imageData, niImageMD.Data(), colourIm->imageSize); cvCvtColor(colourIm, colourIm, CV_RGB2BGR);
		cvFlip(colourIm, colourIm, 1);

		depthIm = cvCreateImage(cvSize(niDepthMD.XRes(), niDepthMD.YRes()), IPL_DEPTH_16U, 1);
		transDepth160 = cvCreateImage(cvSize(MESH_SIZE.width, MESH_SIZE.height), IPL_DEPTH_32F, 1);
		transDepth320 = cvCreateImage(cvSize(SKIN_SEGM_SIZE.width, SKIN_SEGM_SIZE.height), IPL_DEPTH_32F, 1);
		transColor320 = cvCreateImage(cvSize(SKIN_SEGM_SIZE.width, SKIN_SEGM_SIZE.height), IPL_DEPTH_8U, 3);
		memcpy(depthIm->imageData, niDepthMD.Data(), depthIm->imageSize);	
		//cvCircle(colourIm, cvPoint(marker_origin.x,marker_origin.y), 5, CV_BLUE, 3);
		cvShowImage("Kinect View", colourIm);
		IplImage *arImage = capture->getFrame();
		cvWaitKey(1); 

		//check input device 
		input_key = kc->check_input(); 
#ifdef USE_ARMM_VRPN_RECEIVER
		if( pass_key != 0){
			kc->check_input(pass_key);
			pass_key = 0;
		}
#endif
		xc->check_input();

		if(kinectTransform) { // kinect transform as cvmat* for use
			if( counter >= SIM_FREQUENCY) {
#ifdef UPDATE_TRIMESH
				inpaintDepth(&niDepthMD, true); 
				memcpy(depthIm->imageData, niDepthMD.Data(), depthIm->imageSize);				
				TransformImage(depthIm, transDepth160, MARKER_DEPTH, MESH_SIZE, true);
				GenerateTrimeshGroundFromDepth(transDepth160, MARKER_DEPTH); /*Trimesh generation*/
				m_world->updateTrimeshRefitTree(ground_grid);//opencl?
				osg_UpdateHeightfieldTrimesh(ground_grid);//opencl?
#endif

#ifdef SIM_PARTICLES
/*World spheres simulation*/
//				GenerateVoxelFromDepth(depthIm, MARKER_DEPTH);
//				m_world->updateWorldSphereTransform(voxel_grid);
//				osgUpdateWorldSphereTransform(voxel_grid);
#endif
				counter = 0;
			} else {
#ifdef USE_SKIN_SEGMENTATION /*Skin color segmentation*/ // may be reduce resolution first as well as cut off depth make processing faster
				// (2)Sphere representation
				FindHands(depthIm, colourIm);
				UpdateAllHands();
#endif

#ifdef USE_PARTICLES
//XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#endif
				counter++;
			}
			//do hand pose recognition
			m_world->Update();
			//(B)normal client only rendering
			RenderScene(arImage, capture);
		}
//		TickCountAverageEnd();
#ifdef USE_ARMM_VRPN
	//Send Car position+orientation			
	ARMM_server->mainloop();
#ifdef USE_ARMM_VRPN_RECEIVER
	ARMM_sever_receiver->mainloop();
#endif
	////Copy depth info
	//for (int i = 0; i < GRID_SIZE;i++) {
	//	ARMM_img_buffer[i] = ground_grid[i];
	//}

	//Send depth grid info	
	//ARMM_img_server->send_begin_frame(0, MESH_SIZE.width-1, 0, MESH_SIZE.height-1);
 //   ARMM_img_server->mainloop();
 //   int nRowsPerRegion= ((int) vrpn_IMAGER_MAX_REGIONf32)/ MESH_SIZE.width;
 //   for(int y=0; y<MESH_SIZE.height; y+=nRowsPerRegion) {
 //     ARMM_img_server->send_region_using_base_pointer(channel_id,0,MESH_SIZE.width-1,y,min(MESH_SIZE.width,y+nRowsPerRegion)-1, ARMM_img_buffer, 1, MESH_SIZE.width, MESH_SIZE.height);
 //     ARMM_img_server->mainloop();
 //   }
 //   ARMM_img_server->send_end_frame(0, MESH_SIZE.width-1, 0, MESH_SIZE.height-1);
 //   ARMM_img_server->mainloop();
	//Exec data transmission
	m_Connection->mainloop();
#endif

#ifdef USE_OPTICAL_FLOW
		if(!RunOnce) RunOnce = true;
		cvCopyImage(curr_gray, prev_gray);
#endif

		cvReleaseImage(&arImage);
		cvReleaseImage(&depthIm); 
		cvReleaseImage(&colourIm);
		cvReleaseImage(&transDepth160);
#ifdef USE_SKIN_SEGMENTATION
		cvReleaseImage(&transDepth320);
		cvReleaseImage(&transColor320);
#endif
	}
#ifdef USE_OPTICAL_FLOW
	cvReleaseImage(&prev_gray); cvReleaseImage(&curr_gray);
#endif

	//memory release
	osg_uninit();
	delete m_world;
	delete kinectReg;
	cvReleaseMat(&RegistrationParams);
	delete kc;
	delete xc;

	return 0;
}
int main(int argc, char* argv[])
{
    XnStatus nRetVal = XN_STATUS_OK;
    nRetVal = xnLogInitFromXmlFile(SAMPLE_XML_PATH);
    if (nRetVal != XN_STATUS_OK)
    {
        printf("Log couldn't be opened: %s. Running without log", xnGetStatusString(nRetVal));
    }
    if (argc < 3)
    {
        printf("usage: %s <inputFile> <outputFile>\n", argv[0]);
        return -1;
    }
    const char* strInputFile = argv[1];
    const char* strOutputFile = argv[2];
    Context context;
    nRetVal = context.Init();
    CHECK_RC(nRetVal, "Init");
    // open input file
    Player player;
    nRetVal = context.OpenFileRecording("/media/6B58CB581C0AACF6/7.oni", player);
    CHECK_RC(nRetVal, "Open input file");
    // Get depth node from recording
    DepthGenerator depth;
    nRetVal = context.FindExistingNode(XN_NODE_TYPE_DEPTH, depth);
    CHECK_RC(nRetVal, "Find depth generator");
    // Create mock node based on depth node from recording
    MockDepthGenerator mockDepth;
    nRetVal = mockDepth.CreateBasedOn(depth);
    CHECK_RC(nRetVal, "Create mock depth node");


    ImageGenerator image;
    nRetVal = context.FindExistingNode(XN_NODE_TYPE_IMAGE, image);
    CHECK_RC(nRetVal, "Find depth generator");
    // Create mock node based on depth node from recording
    MockImageGenerator mockImage;
    nRetVal = mockImage.CreateBasedOn(image);
    CHECK_RC(nRetVal, "Create mock depth node");
    // create recorder
    Recorder recorder;
    nRetVal = recorder.Create(context);
    CHECK_RC(nRetVal, "Create recorder");
    nRetVal = recorder.SetDestination(XN_RECORD_MEDIUM_FILE, "/home/shaghayegh/up.oni");
    CHECK_RC(nRetVal, "Set recorder destination file");
    // add depth node to recorder
    nRetVal = recorder.AddNodeToRecording(mockDepth);
    CHECK_RC(nRetVal, "Add node to recording");
    // nRetVal = recorder.AddNodeToRecording(mockImage);
    // CHECK_RC(nRetVal, "Add node to recording");

    nRetVal = player.SetRepeat(FALSE);
    XN_IS_STATUS_OK(nRetVal);
    XnUInt32 nNumFrames = 0;
    nRetVal = player.GetNumFrames(depth.GetName(), nNumFrames);
    CHECK_RC(nRetVal, "Get player number of frames");
    DepthMetaData depthMD;
    ImageMetaData imageMD;
    int frameNum = 0;
    String path = "/media/6B58CB581C0AACF6/ebook/Articles/activity_recognition/data1/0512164529/";
    while ((nRetVal = depth.WaitAndUpdateData()) != XN_STATUS_EOF)
    {
        ++frameNum;
        CHECK_RC(nRetVal, "Read next frame");
        // Get depth meta data
        depth.GetMetaData(depthMD);
        image.GetMetaData(imageMD);

        //-----------------------------------------------//
        // Transform depth! This is the interesting part //
        //-----------------------------------------------//
        /* Enable the depth data to be modified. This is done implicitly by depthMD.WritableDepthMap(),
but we're calling it just to be clear. */
        nRetVal = depthMD.MakeDataWritable();
        CHECK_RC(nRetVal, "Make depth data writable");

        // nRetVal = imageMD.MakeDataWritable();
        // CHECK_RC(nRetVal, "Make depth data writable");

        String ficheroActualRGB;
        // ficheroActualRGB = path  +"RGB_" + boost::to_string(frameNum) + ".png";
        String ficheroActualDepth = path +"Depth_"+ boost::to_string(frameNum) + ".png";

        // Mat matFrameImage = imread(ficheroActualRGB, 1);
        // resize(matFrameImage, matFrameImage, Size(640, 480), 0, 0, INTER_CUBIC);
        Mat matFrameDepth = imread(ficheroActualDepth,1);
        resize(matFrameDepth, matFrameDepth, Size(480, 640), 0, 0, INTER_CUBIC);

        transformDepthMD(matFrameDepth,depthMD);
        // transformImageMD(matFrameImage,imageMD);
//         Pass the transformed data to the mock depth generator
        nRetVal = mockDepth.SetData(depthMD);
        CHECK_RC(nRetVal, "Set mock node new data");

        // nRetVal = mockImage.SetData(imageMD);
        // CHECK_RC(nRetVal, "Set mock node new data");

        /* We need to call recorder.Record explicitly because we're not using WaitAndUpdateAll(). */
        nRetVal = recorder.Record();
        CHECK_RC(nRetVal, "Record");
        printf("Recorded: frame %u out of %u\r", depthMD.FrameID(), nNumFrames);
    }
    printf("\n");
    return 0;
}
/**
 * Initialize XN functions
 */
void initialize() 
{
    ImageMetaData imageMD;
    XnStatus status;
    int dummy;
    
    srand ( time(NULL) );

    // Initializing context and checking for enumeration errors
    status = g_Context.InitFromXmlFile(XML_CONFIG_FILE, &g_Error);
    checkEnumError(status, g_Error);

    // Finding nodes and checking for errors
    STATUS_CHECK(g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator), "Finding depth node");
    STATUS_CHECK(g_Context.FindExistingNode(XN_NODE_TYPE_SCENE, g_SceneAnalyzer), "Finding scene analizer");


    //  Note: when the image generation node is handled the program gets
    //  too slow.

    // STATUS_CHECK(g_Context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_ImageGenerator), "Finding image node");

    // Set view point of Depth generator to the image generator point of
    // view.
    // STATUS_CHECK(g_DepthGenerator.GetAlternativeViewPointCap().SetViewPoint(g_ImageGenerator), "Set View Point");

    STATUS_CHECK(g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator), "Finding user node");
    
    //g_ImageGenerator.GetMetaData(imageMD);
    
    // Checking camera pixel format
    //if (imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24) {
    //    reportError("Camera format not supported...!\n");
    //}

    // Checking user generator capabilities
    if(!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON)) {
        reportError("Skeleton capability not supported\n");
    }
    
    if(!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION)) {
        reportError("Pose detection capability not supported\n");
    }

    printf("Number of players: ");
    dummy = scanf("%d", &g_MaxPlayers);
    printf("\n");

    //Initialize user detector object
    g_UserDetector = UserDetector(g_UserGenerator, g_DepthGenerator);
    g_UserDetector.registerCallbacks();
    
    g_ZamusDetector = new Zamus(&g_UserDetector);
    g_LinqDetector = new Linq(&g_UserDetector);
    g_BusterDetector = new BusterDetector(g_ZamusDetector, &g_UserDetector);
    g_IceRodDetector = new IceRodDetector(g_LinqDetector, &g_UserDetector);

    // Initialize image render object
    g_SceneRenderer = SceneRenderer(&g_ImageGenerator,
                                    &g_DepthGenerator,
                                    &g_SceneAnalyzer,
                                    &g_UserDetector,
                                    g_ZamusDetector,
                                    g_LinqDetector);

    STATUS_CHECK(g_Context.StartGeneratingAll(), "Context generation");

    g_SFBgame = SuperFiremanBrothers(&g_UserDetector, 
                                     &g_SceneAnalyzer, 
                                     g_ZamusDetector, 
                                     g_LinqDetector, 
                                     g_MaxPlayers
                                    );

}
int main(int argc, char* argv[])
{
	XnStatus nRetVal = XN_STATUS_OK;

	if (argc < 3)
	{
		printUsage(argv[0]);
		return -1;
	}

	const char* strInputFile = argv[1];
	const char* strOutputFile = argv[2];

	// options
	const XnChar* astrNodeNames[MAX_NODES_COUNT];
	XnUInt32 nNodeNames = 0;
	XnProductionNodeType aNodeTypes[MAX_NODES_COUNT];
	XnUInt32 nNodeTypes = 0;
	const XnChar* strPrimaryNode = NULL;
	XnProductionNodeType primaryNodeType = XN_NODE_TYPE_INVALID;
	XnUInt32 nStartFrame = 0;
	XnUInt32 nEndFrame = XN_MAX_UINT32;

	//-----------------------------------------------------------------------
	// Parsing Options
	//-----------------------------------------------------------------------
	for (int i = 3; i < argc; ++i)
	{
		// look for '='
		char* equalSign = strchr(argv[i], '=');
		if (equalSign == NULL)
		{
			printUsage(argv[0]);
			return -1;
		}

		*equalSign = '\0';

		char* option = argv[i];
		char* optionArg = equalSign + 1;

		// now check which option is that
		if (strcmp(option, "--nodes") == 0)
		{
			for (;;)
			{
				char* commaPos = strchr(optionArg, ',');
				if (commaPos != NULL)
				{
					*commaPos = '\0';
				}

				if (strlen(optionArg) == 0)
				{
					printUsage(argv[0]);
					return -1;
				}

				astrNodeNames[nNodeNames++] = optionArg;

				if (commaPos == NULL)
				{
					break;
				}
				else
				{
					optionArg = commaPos + 1;
				}
			}
		}
		else if (strcmp(option, "--types") == 0)
		{
			for (;;)
			{
				char* commaPos = strchr(optionArg, ',');
				if (commaPos != NULL)
				{
					*commaPos = '\0';
				}

				nRetVal = xnProductionNodeTypeFromString(optionArg, &aNodeTypes[nNodeNames]);
				if (nRetVal != XN_STATUS_OK)
				{
					printf("%s is not a valid node type!\n", optionArg);
					return -1;
				}

				++nNodeTypes;

				if (commaPos == NULL)
				{
					break;
				}
				else
				{
					optionArg = commaPos + 1;
				}
			}
		}
		else if (strcmp(option, "--primary-node") == 0)
		{
			strPrimaryNode = optionArg;
		}
		else if (strcmp(option, "--primary-node-type") == 0)
		{
			nRetVal = xnProductionNodeTypeFromString(optionArg, &primaryNodeType);
			if (nRetVal != XN_STATUS_OK)
			{
				printf("%s is not a valid node type!\n", optionArg);
				return -1;
			}
		}
		else if (strcmp(option, "--start-frame") == 0)
		{
			nStartFrame = atoi(optionArg);
		}
		else if (strcmp(option, "--end-frame") == 0)
		{
			nEndFrame = atoi(optionArg);
		}
		else
		{
			printUsage(argv[0]);
			return -1;
		}
	}

	// validate options
	if (nNodeNames > 0 && nNodeTypes > 0)
	{
		printf("Cannot use --nodes and --types together.\n");
		return -1;
	}

	if (primaryNodeType != XN_NODE_TYPE_INVALID && strPrimaryNode != NULL)
	{
		printf("Cannot use --primary-node and --primary-node-type together.\n");
		return -1;
	}

	// start and end requires primary node
	if ((nStartFrame != 0 || nEndFrame != XN_MAX_UINT32) && 
		(strPrimaryNode == NULL && primaryNodeType == XN_NODE_TYPE_INVALID))
	{
		printf("A primary node must be defined for using --start-frame or --end-frame.\n");
		return -1;
	}

	//-----------------------------------------------------------------------
	// Execute
	//-----------------------------------------------------------------------
	Context context;
	nRetVal = context.Init();
	CHECK_RC(nRetVal, "Init");

	// open input file
	Player player;
	nRetVal = context.OpenFileRecording(strInputFile, player);
	CHECK_RC(nRetVal, "Open input file");

	// play as fast as you can
	nRetVal = player.SetPlaybackSpeed(XN_PLAYBACK_SPEED_FASTEST);
	CHECK_RC(nRetVal, "Setting playback speed");

	// don't rewind recording
	nRetVal = player.SetRepeat(FALSE);
	XN_IS_STATUS_OK(nRetVal);

	// get the list of all created nodes
	NodeInfoList nodes;
	nRetVal = player.EnumerateNodes(nodes);
	CHECK_RC(nRetVal, "Enumerate nodes");

	// first of all, find primary node
	ProductionNode primaryNode;
	if (primaryNodeType != XN_NODE_TYPE_INVALID)
	{
		nRetVal = context.FindExistingNode(primaryNodeType, primaryNode);
		if (nRetVal != XN_STATUS_OK)
		{
			printf("Input file does not contain any node of type %s\n", xnProductionNodeTypeToString(primaryNodeType));
			return -1;
		}
	}
	else if (strPrimaryNode != NULL)
	{
		nRetVal = context.GetProductionNodeByName(strPrimaryNode, primaryNode);
		if (nRetVal != XN_STATUS_OK)
		{
			printf("Input file does not contain any node named %s\n", strPrimaryNode);
			return -1;
		}
	}

	XnUInt32 nTotalFrames = 0;

	// first seek to end frame (to calculate total amount of work)
	if (nEndFrame != XN_MAX_UINT32)
	{
		nRetVal = player.SeekToFrame(primaryNode.GetName(), nEndFrame, XN_PLAYER_SEEK_SET);
		CHECK_RC(nRetVal, "Seeking to end frame");

		for (NodeInfoList::Iterator it = nodes.Begin(); it != nodes.End(); ++it)
		{
			NodeInfo nodeInfo = *it;
			XnUInt32 nNodeFrames = 0;
			nRetVal = player.TellFrame(nodeInfo.GetInstanceName(), nNodeFrames);
			CHECK_RC(nRetVal, "Tell frame");

			nTotalFrames += nNodeFrames;
		}
	}
	else
	{
		for (NodeInfoList::Iterator it = nodes.Begin(); it != nodes.End(); ++it)
		{
			NodeInfo nodeInfo = *it;
			XnUInt32 nNodeFrames = 0;
			nRetVal = player.GetNumFrames(nodeInfo.GetInstanceName(), nNodeFrames);
			CHECK_RC(nRetVal, "Get number of frames");

			nTotalFrames += nNodeFrames;
		}
	}

	// seek to start frame
	if (nStartFrame > 0)
	{
		nRetVal = player.SeekToFrame(primaryNode.GetName(), nStartFrame, XN_PLAYER_SEEK_SET);
		CHECK_RC(nRetVal, "Seeking to start frame");

		// remove skipped frames from total
		for (NodeInfoList::Iterator it = nodes.Begin(); it != nodes.End(); ++it)
		{
			NodeInfo nodeInfo = *it;
			XnUInt32 nNodeFrames = 0;
			nRetVal = player.TellFrame(nodeInfo.GetInstanceName(), nNodeFrames);
			CHECK_RC(nRetVal, "Tell frame");

			nTotalFrames -= nNodeFrames;
		}
	}
	
	// create recorder
	Recorder recorder;
	nRetVal = recorder.Create(context);
	CHECK_RC(nRetVal, "Create recorder");

	nRetVal = recorder.SetDestination(XN_RECORD_MEDIUM_FILE, strOutputFile);
	CHECK_RC(nRetVal, "Set recorder destination file");

	// add nodes to recorder
	if (nNodeNames > 0)
	{
		for (XnUInt32 i = 0; i < nNodeNames; ++i)
		{
			ProductionNode node;
			nRetVal = context.GetProductionNodeByName(astrNodeNames[i], node);
			if (nRetVal != XN_STATUS_OK)
			{
				printf("Input file does not contain any node named %s\n", astrNodeNames[i]);
				return -1;
			}

			nRetVal = recorder.AddNodeToRecording(node);
			CHECK_RC(nRetVal, "Add to recording");
		}
	}
	else if (nNodeTypes > 0)
	{
		XnBool bAnyNodeAdded = FALSE;

		for (NodeInfoList::Iterator it = nodes.Begin(); it != nodes.End(); ++it)
		{
			NodeInfo nodeInfo = *it;
			const XnProductionNodeDescription& description = nodeInfo.GetDescription();

			for (XnUInt32 i = 0; i < nNodeTypes; ++i)
			{
				if (description.Type == aNodeTypes[i])
				{
					ProductionNode node;
					nRetVal = nodeInfo.GetInstance(node);
					CHECK_RC(nRetVal, "Get Instance");

					nRetVal = recorder.AddNodeToRecording(node);
					CHECK_RC(nRetVal, "Add to recording");

					bAnyNodeAdded = TRUE;
					break;
				}
			}
		}

		if (!bAnyNodeAdded)
		{
			printf("No node was found in input which matches requested types.\n");
			return -1;
		}
	}
	else
	{
		// add all nodes
		for (NodeInfoList::Iterator it = nodes.Begin(); it != nodes.End(); ++it)
		{
			NodeInfo nodeInfo = *it;
			ProductionNode node;
			nRetVal = nodeInfo.GetInstance(node);
			CHECK_RC(nRetVal, "Get Instance");

			nRetVal = recorder.AddNodeToRecording(node);
			CHECK_RC(nRetVal, "Add to recording");
		}
	}

	XnUInt32 nFrame = 0;
	XnDouble fPercentageFraction = 100.0 / nTotalFrames;

	while ((nRetVal = context.WaitAnyUpdateAll()) != XN_STATUS_EOF)
	{
		CHECK_RC(nRetVal, "Read next frame");
		printf("Recording: %.1f%%\r", nFrame * fPercentageFraction);

		if (primaryNode.IsValid())
		{
			XnUInt32 nCurrentFrame;
			nRetVal = player.TellFrame(primaryNode.GetName(), nCurrentFrame);
			CHECK_RC(nRetVal, "Tell frame");

			if (nCurrentFrame == nEndFrame)
			{
				break;
			}
		}

		++nFrame;
	}

	recorder.Release();
	player.Release();
	context.Release();

	return 0;
}
예제 #17
0
/*
 *	Function:	KinectMonitor	(Constructor)
 *
 *	Initializes all the production nodes for the kinect to get and process data.
 *	Sets the camera tilt to the specified (or default) angle.
 *	Registers the OpenNI callbacks for user events.
 *	Registers the system signal handlers (for terminating the program).
 *	Initializes the global and member variables.
 *
 *	Parameters:
 *		int*	tilt	-	A pointer to the desired tilt angle of the camera.
 *									If it is NULL, the default value is used.
 */
KinectMonitor::KinectMonitor(char *tilt) {
    XnStatus status;
    EnumerationErrors errors;
    XnCallbackHandle userCallbacks;
    
    // Setup Context from an XML configuration file (the default supplied by OpenNI)
		// CONTEXT_XML is defined in monitor.h
    status = context.InitFromXmlFile(CONTEXT_XML, scriptNode, &errors);
    // Check to ensure that the context was initialized properly.
		if( status == XN_STATUS_NO_NODE_PRESENT ) {
        XnChar strError[1024];
        errors.ToString(strError, 1024);
        printf("%s\n", strError);
        
        return;
    } else if( status != XN_STATUS_OK ) {
        printf("Could not initialize Context: %s\n", xnGetStatusString(status));
        
        return;
    }
    
    // Setup Depth Generator production node from the context
    status = context.FindExistingNode(XN_NODE_TYPE_DEPTH, depthGenerator);
	
	// Setup User Generator production node from the context
	status = context.FindExistingNode(XN_NODE_TYPE_USER, userGenerator);
	// Check that the user generator is available
	if( status != XN_STATUS_OK ) {
		// If the context did not define a UserGenerator node, then try to create one
		status = userGenerator.Create(context);
		CHECK_RC(status, "Find user generator");
	}
	
	// Set FPS
	status = xnFPSInit(&xnFPS, 180);
	
	// Check for Skeletal Mapping
	if( !userGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON) ) {
		printf("Skeletal mapping not supported.\n");
		
		return;
	}
	// Set the skeletal profile to only include the joints in the upper body.
	// Profile options are XN_SKEL_PROFILE_<option>
	// Where <option> could be: NONE, ALL, UPPER, LOWER, or HEAD_HANDS
	userGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_UPPER);
	
	// Tilt camera - This feature requires the program to be run with root privilege,
	// because it requires writing to the usb device.
	XN_USB_DEV_HANDLE dev;
	// Sets the angle to either the DEFAULT_TILT (defined in monitor.h) or the given tilt.
	int angle = (tilt == nullptr)? DEFAULT_TILT : atoi(tilt);
	// Open the kinect usb device
	status = xnUSBOpenDevice(VID_MICROSOFT, PID_NUI_MOTOR, NULL, NULL, &dev);
	// Send the proper code to the usb device to set the angle.
	uint8_t empty[0x1];
	status = xnUSBSendControl(
		dev, XN_USB_CONTROL_TYPE_VENDOR, 0x31, (XnUInt16)angle,
		0x0, empty, 0x0, 0
	);
	
	// Register Callbacks
	status = userGenerator.RegisterUserCallbacks(
	    foundUser, lostUser, NULL, userCallbacks
	);
	
	// Register Handlers
	signal(SIGABRT, &stop);
	signal(SIGTERM, &stop);
	signal(SIGINT, &stop);
	// Initialize globals
	quit = false;
	out = true;
}
int main(int argc, char* argv[])
{
        int nRetVal;
	XnStatus rc;
	EnumerationErrors errors;

        // get playback file if using 
        if (argc > 2 && strcmp(argv[2], "true") == 0) {
            rc = g_context.Init();

            rc = g_context.OpenFileRecording(RECORDING_PATH, g_player);
            CHECK_RC(rc, "Opening file");

            rc = g_player.SetRepeat(TRUE);
	    CHECK_RC(rc, "Turn repeat off");
        } else {
            // get context from xml
	    rc = g_context.InitFromXmlFile(SAMPLE_XML_PATH, g_scriptNode, &errors);
        }

        // error checking
	if (rc == XN_STATUS_NO_NODE_PRESENT)
	{
		XnChar strError[1024];
		errors.ToString(strError, 1024);
		printf("%s\n", strError);
		return (rc);
	}
	CHECK_RC(rc, "Context initialization");        

        // get hand and image generator from context, check errors
	rc = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image);
	CHECK_RC(rc, "Get image generator");

        rc = g_context.FindExistingNode(XN_NODE_TYPE_HANDS, g_hands);
	CHECK_RC(rc, "Get hand generator");       
       
        rc = g_context.FindExistingNode(XN_NODE_TYPE_GESTURE, g_gesture);
        CHECK_RC(rc, "Get gesture generator");

        // create and register callbacks
        XnCallbackHandle h1, h2;
        g_gesture.RegisterGestureCallbacks(Gesture_Recognized,
                                              Gesture_Process,
                                              NULL, h1);
        CHECK_RC(rc, "Get register gesture callback");     
  
        g_hands.RegisterHandCallbacks(Hand_Create, Hand_Update,
                                           Hand_Destroy, NULL, h2);
        CHECK_RC(rc, "Get hand callback");

        // add gestures to the generator
        rc = g_gesture.AddGesture("Click", NULL);
        CHECK_RC(rc, " add click gesture");
        rc = g_gesture.AddGesture("RaiseHand", NULL);
        CHECK_RC(rc, "add raise gesture");
        rc = g_gesture.AddGesture("Wave", NULL);
        CHECK_RC(rc, "add wave gesture");

        
	g_image.GetMetaData(g_imageMD);

	// RGB is the only image format supported.
	if (g_imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24)
	{
		printf("The device image format must be RGB24\n");
		return 1;
	}

        // if argument is set true, then record the session
        if (argc > 1 && strcmp(argv[1], "true") == 0) {
            std::cout << "recording to " << RECORDING_PATH << std::endl;
            // Create Recorder
            rc = recorder.Create(g_context);
            CHECK_RC(rc, "create recorder");

            // Init it
            rc = recorder.SetDestination(XN_RECORD_MEDIUM_FILE, RECORDING_PATH);
            CHECK_RC(rc, "init recorder");

            // Add nodes to recording
            rc = recorder.AddNodeToRecording(g_image);
            CHECK_RC(rc, "add image node");
            
            rc = recorder.AddNodeToRecording(g_hands);
            CHECK_RC(rc, "add hands node");
        }

        // initialize and run program
	glutInit(&argc, argv);                                      // GLUT initialization
	glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH );  // Display Mode
	glutInitWindowSize(WIDTH, HEIGHT);	// set window size
        glutInitWindowPosition(GL_WIN_POSITION_X, GL_WIN_POSITION_Y);
	glutCreateWindow(TITLE);	        // create Window
	glutDisplayFunc(glutDisplay);		// register Display Function
	glutIdleFunc(glutDisplay);		// register Idle Function
        glutKeyboardFunc(glutKeyboard );	// register Keyboard Handler
	initialize();
	glutMainLoop();	

        CleanUpExit();
	return 0;
}
int main()
{
	XnStatus nRetVal = XN_STATUS_OK;

	Context context;
	ScriptNode scriptNode;
	EnumerationErrors errors;

    XnUInt32 min_z1, min_z2, min_z3, maxGrad, distVal;

	const char *fn = NULL;
	if	(fileExists(SAMPLE_XML_PATH)) fn = SAMPLE_XML_PATH;
	else if (fileExists(SAMPLE_XML_PATH_LOCAL)) fn = SAMPLE_XML_PATH_LOCAL;
	else {
		printf("Could not find '%s' nor '%s'. Aborting.\n" , SAMPLE_XML_PATH, SAMPLE_XML_PATH_LOCAL);
		return XN_STATUS_ERROR;
	}
	printf("Reading config from: '%s'\n", fn);
	nRetVal = context.InitFromXmlFile(fn, scriptNode, &errors);

	if (nRetVal == XN_STATUS_NO_NODE_PRESENT)
	{
		XnChar strError[1024];
		errors.ToString(strError, 1024);
		printf("%s\n", strError);
		return (nRetVal);
	}
	else if (nRetVal != XN_STATUS_OK)
	{
		printf("Open failed: %s\n", xnGetStatusString(nRetVal));
		return (nRetVal);
	}

	DepthGenerator depth;
	nRetVal = context.FindExistingNode(XN_NODE_TYPE_DEPTH, depth);
	CHECK_RC(nRetVal, "Find depth generator");

	XnFPSData xnFPS;
	nRetVal = xnFPSInit(&xnFPS, 180);
	CHECK_RC(nRetVal, "FPS Init");

	DepthMetaData depthMD;

	//Initialize WiringPi
	if(wiringPiSetup() == -1)
		exit(1);

	//Enable SoftPWM on pin 1,2 and 3
	softPwmCreate(1, 0, RANGE);
	softPwmCreate(2, 0, RANGE);
	softPwmCreate(3, 0, RANGE);

	while (!xnOSWasKeyboardHit())
	{
		nRetVal = context.WaitOneUpdateAll(depth);
		if (nRetVal != XN_STATUS_OK)
		{
			printf("UpdateData failed: %s\n", xnGetStatusString(nRetVal));
			continue;
		}

		xnFPSMarkFrame(&xnFPS);

		depth.GetMetaData(depthMD);
		const XnDepthPixel* pDepthMap = depthMD.Data();
		int XRes = depthMD.XRes();
		int YRes = depthMD.YRes();

		//To find closest pixel value in Zone 1, Zone 2 and Zone 3
		min_z1    = getClosestPixel(  0        , 0, (XRes / 2)    , YRes, depthMD);
		min_z2    = getClosestPixel( (XRes / 4), 0, (3 * XRes / 4), YRes, depthMD);
		min_z3    = getClosestPixel( (XRes / 2), 0,  XRes         , YRes, depthMD);

		double in_low = 600;
		double in_high = 2000;
		double in_diff = in_high - in_low;
		double out_low = 51;
		double out_high = 973;
		double out_diff = out_high - out_low;

		distVal = min_z1;
		XnUInt32 pwm_val1 = ( (out_diff) / ((in_diff)*(in_diff)*(in_diff)) ) * ((in_high - distVal) * (in_high - distVal) * (in_high - distVal)) + out_low;
		distVal = min_z2;
		XnUInt32 pwm_val2 = ( (out_diff) / ((in_diff)*(in_diff)*(in_diff)) ) * ((in_high - distVal) * (in_high - distVal) * (in_high - distVal)) + out_low;
		distVal = min_z3;
		XnUInt32 pwm_val3 = ( (out_diff) / ((in_diff)*(in_diff)*(in_diff)) ) * ((in_high - distVal) * (in_high - distVal) * (in_high - distVal)) + out_low;

		// Zone 1 - Left side (pin )

		if (pwm_val1 < out_low)
			pwm_val1 = 0;  		 // if object too far, set DUTY CYCLE to 0
		if (min_z1 == 9000.0)
			pwm_val1 = out_high; //if object too close, set DUTY CYCLE to max (here, 95%)
		if (min_z1 < 600)
			pwm_val1 = out_high;

		// Zone 2 - Center (pin )

		if (pwm_val2 < out_low)
			pwm_val2 = 0;  		 // if object too far, set DUTY CYCLE to 0
		if (min_z2 == 9000.0)
			pwm_val2 = out_high; //if object too close, set DUTY CYCLE to max (here, 95%)
		if (min_z2 < 600)
			pwm_val2 = out_high;

		// Zone 3 - Right side (pin )

		if (pwm_val3 < out_low)
			pwm_val3 = 0;  		 // if object too far, set DUTY CYCLE to 0
		if (min_z3 == 9000.0)
			pwm_val3 = out_high; //if object too close, set DUTY CYCLE to max (here, 95%)
		if (min_z3 < 600)
			pwm_val3 = out_high;

		pwm_val1 = ((pwm_val1 - out_low) / (1.0 * out_diff)) * 100.0;
		pwm_val2 = ((pwm_val2 - out_low) / (1.0 * out_diff)) * 100.0;
		pwm_val3 = ((pwm_val3 - out_low) / (1.0 * out_diff)) * 100.0;

		softPwmWrite(1,(int)pwm_val1);
		softPwmWrite(2,(int)pwm_val2);
		softPwmWrite(3,(int)pwm_val3);

		if ( (depthMD.FrameID() % 30) == 0)
		{
			printf("Frame %d", depthMD.FrameID());
			printf("\n");

			printf("Zone 1 value is %u \t", pwm_val1);
			printf("Zone 2 value is %u \t", pwm_val2);
			printf("Zone 3 value is %u \n", pwm_val3);

			printf("Zone1 min_dis   %u \t", min_z1);
			printf("Zone2 min_dis   %u \t", min_z2);
			printf("Zone3 min_dis   %u \n", min_z3);


		//To find a gradient value for the floor
		//maxGrad = getGradient( 5, 0, (YRes/2) + 1, depthMD.XRes(), depthMD.YRes(), depthMD);
		//printf("Frame %d max gradient for Floor is: %u. FPS: %f\n\n", depthMD.FrameID(), maxGrad, xnFPSCalc(&xnFPS));
		}

	}

	softPwmWrite(1,0);
	softPwmWrite(2,0);
	softPwmWrite(3,0);

	//release the nodes
	depth.Release();
	scriptNode.Release();
	context.Release();



	return 0;
}
예제 #20
0
int main(int argc, char* argv[]) {

	markerSize.width = -1; markerSize.height = -1;
	EnumerationErrors errors;
	switch (XnStatus rc = niContext.InitFromXmlFile(KINECT_CONFIG_FILENAME, &errors)) {
		case XN_STATUS_OK:
			break;
		case XN_STATUS_NO_NODE_PRESENT:
			XnChar strError[1024];	errors.ToString(strError, 1024);
			printf("%s\n", strError);
			return rc; break;
		default:
			printf("Open failed: %s\n", xnGetStatusString(rc));
			return rc;
	}

	capture = new Camera(CAPTURE_SIZE, CAMERA_PARAMS_FILENAME);

	RegistrationParams = scaleParams(capture->getParameters(), double(REGISTRATION_SIZE.width)/double(CAPTURE_SIZE.width));
	osg_init(calcProjection(RegistrationParams, capture->getDistortion(), REGISTRATION_SIZE));

	loadKinectParams(KINECT_PARAMS_FILENAME, &kinectParams, &kinectDistort);
	kinectDistort =0;
	kinectParams->data.db[2]=320.0; kinectParams->data.db[5]=240.0;

	niContext.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth);
	niContext.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image);

	g_depth.GetMirrorCap().SetMirror(false);
	g_depth.GetAlternativeViewPointCap().SetViewPoint(g_image);

	kinectReg = new RegistrationOPIRA(new OCVSurf());
	kinectReg->addResizedMarker(MARKER_FILENAME, 400);

	//physics
	m_world = new KCRPhysicsWorld();
	ground_grid = new float[19200];
	for (int i =0;i < 19200; i++) {
		ground_grid[i] = 0; 
	}
#ifdef SIM_PARTICLES
	voxel_grid = new float[1200];
	for (int i =0;i < 1200; i++) {
		voxel_grid[i] = 0;
	}
#endif

	//controls
	KeyboardController *kc = new KeyboardController(m_world);
	XboxController *xc = new XboxController(m_world);

	loadKinectTransform(KINECT_TRANSFORM_FILENAME);

#ifdef USE_ARMM_VRPN
	m_Connection = new vrpn_Connection_IP();
	ARMM_server = new ARMM_Communicator(m_Connection );
    cout << "Created VRPN server." << endl;
#endif

#ifdef USE_SKIN_SEGMENTATION	//Skin color look up
	_HandRegion.LoadSkinColorProbTable();
#endif

#ifdef USE_OPTICAL_FLOW
	prev_colourIm = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 3);
#endif
/////////////////////////////////////////////Main Loop////////////////////////////////////////////////
	while (running) {
		if (XnStatus rc = niContext.WaitAnyUpdateAll() != XN_STATUS_OK) {
			printf("Read failed: %s\n", xnGetStatusString(rc));
			return rc;
		}
		g_depth.GetMetaData(niDepthMD);
		g_image.GetMetaData(niImageMD);

		colourIm = cvCreateImage(cvSize(niImageMD.XRes(), niImageMD.YRes()), IPL_DEPTH_8U, 3);
		memcpy(colourIm->imageData, niImageMD.Data(), colourIm->imageSize); cvCvtColor(colourIm, colourIm, CV_RGB2BGR);
		cvFlip(colourIm, colourIm, 1);

		depthIm = cvCreateImage(cvSize(niDepthMD.XRes(), niDepthMD.YRes()), IPL_DEPTH_16U, 1);
		transDepth160 = cvCreateImage(cvSize(MESH_SIZE.width, MESH_SIZE.height), IPL_DEPTH_32F, 1);
		transDepth320 = cvCreateImage(cvSize(CV_OP_SIZE.width, CV_OP_SIZE.height), IPL_DEPTH_32F, 1);
		memcpy(depthIm->imageData, niDepthMD.Data(), depthIm->imageSize);	
		cvShowImage("Kinect View", colourIm);

		IplImage *arImage = capture->getFrame();
		cvWaitKey(1); 
		kc->check_input(); xc->check_input();

#ifdef USE_OPTICAL_FLOW
		if(RunOnce) SceneOpticalFlowLK(prev_colourIm, colourIm);
#endif

		if(kinectTransform) { // kinect transform as cvmat* for use
			if( counter >= 4) {
				inpaintDepth(&niDepthMD, true); 
				memcpy(depthIm->imageData, niDepthMD.Data(), depthIm->imageSize);				
				TransformDepth(depthIm, transDepth160, MARKER_DEPTH, MESH_SIZE);
				GenerateTrimeshGroundFromDepth(transDepth160, MARKER_DEPTH); /*Trimesh generation*/
				m_world->updateTrimeshRefitTree(ground_grid);//opencl?
				osg_UpdateHeightfieldTrimesh(ground_grid);//opencl?
#ifdef SIM_PARTICLES
/*World spheres simulation*/
//				GenerateVoxelFromDepth(depthIm, MARKER_DEPTH);
//				m_world->updateWorldSphereTransform(voxel_grid);
//				osgUpdateWorldSphereTransform(voxel_grid);
#endif
				counter = 0;
			} else {
#ifdef USE_SKIN_SEGMENTATION /*Skin color segmentation*/ // may be reduce resolution first as well as cut off depth make processing faster
				TransformDepth(depthIm, transDepth320, MARKER_DEPTH, CV_OP_SIZE);
				IplImage* depthTmp = cvCreateImage(cvSize(CV_OP_SIZE.width, CV_OP_SIZE.height), IPL_DEPTH_8U, 1);
				IplImage* colourImResized = cvCreateImage(cvSize(CV_OP_SIZE.width, CV_OP_SIZE.height), IPL_DEPTH_8U, 3);
				gray = cvCreateImage(cvSize(colourImResized->width, colourImResized->height),IPL_DEPTH_8U,1);
				hand_region = cvCreateImage(cvSize(colourImResized->width, colourImResized->height),IPL_DEPTH_8U,1);
				IplImage* colourIm640 = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 3);

//				cvCmpS(transDepth, 0, depthTmp, CV_CMP_LT);//dst must b 8U
				cvThreshold(transDepth320, depthTmp, 1, 255, CV_THRESH_BINARY_INV); //thres at 1cm above marker
				cvResize(colourIm, colourImResized, CV_INTER_NN);//use nearest neighbor interpolation
//				removeNoise( depthTmp, 100 );
//				cvSet(colourImResized, cvScalar(0), depthTmp);
				cvShowImage ("Marker Thresh", colourImResized);
				cvResize(colourImResized, colourIm640,CV_INTER_NN);
				cvShowImage ("Marker Thresh 640", colourIm640);

				cvCopyImage( _HandRegion.GetHandRegion( colourImResized, gray), hand_region );
//				removeNoise( hand_region, 20 );
				cvThreshold(hand_region, depthTmp, 0, 255, CV_THRESH_BINARY_INV);
//				removeNoise( depthTmp, 100 );
//				cvShowImage ("depthTmp", depthTmp);
//				cvShowImage ("hand_region", hand_region);

				cvSet(colourImResized, cvScalar(0), depthTmp);

//				cvShowImage ("Skin Color", colourImResized);

//				cvDilate(colourImResized,colourImResized,CV_SHAPE_RECT,1);
//				cvErode(colourImResized,colourImResized,CV_SHAPE_RECT,1);
//				cvMorphologyEx(colourImResized,colourImResized,NULL,CV_SHAPE_RECT,CV_MOP_OPEN,1);
				cvResize(colourImResized, colourIm640,CV_INTER_NN);
				cvShowImage ("Color Skin Color 640", colourIm640);
				cvReleaseImage(&depthTmp);
				cvReleaseImage(&colourImResized);
				cvReleaseImage(&colourIm640);
#endif

#ifdef USE_PARTICLES
/*
				IplImage* depthTmp1 = cvCreateImage(cvSize(depthIm->width, depthIm->height), IPL_DEPTH_32F, 1);
				IplImage* depthTmp2 = cvCreateImage(cvSize(depthIm->width, depthIm->height), IPL_DEPTH_8U, 1);
				cvConvertScale(depthIm, depthTmp1, 1);
//				cvThreshold(depthTmp1, depthTmp2, MARKER_DEPTH-5, 255, CV_THRESH_TOZERO_INV);//thresh 5mm above marker
				cvThreshold(depthTmp1, depthTmp2, MARKER_DEPTH-10, 255, CV_THRESH_TOZERO);
				//cvCmpS(depthTmp1, MARKER_DEPTH, depthTmp2, CV_CMP_GT);
//				cvShowImage("DEPTH640", depthTmp2);
//				IplImage* colourTmp = cvCreateImage(cvSize(colourIm->width, colourIm->height), IPL_DEPTH_8U, 3);
//				cvCopyImage(colourIm, colourTmp);
//				cvSet(colourTmp, cvScalar(0), depthTmp2);
//				cvShowImage ("Color640", colourTmp);
				cvSet(depthTmp1, cvScalar(0), depthTmp2);
				cvShowImage("DEPTH640_32F", depthTmp1);
*/
				inpaintDepth(&niDepthMD, true); 
//				TransformDepth(depthTmp1, transDepth, MARKER_DEPTH);
				memcpy(depthIm->imageData, niDepthMD.Data(), depthIm->imageSize);
				TransformDepth(depthIm, transDepth, MARKER_DEPTH);
				IplImage* depthTmp3 = cvCreateImage(cvSize(TRACKING_SIZE.width, TRACKING_SIZE.height), IPL_DEPTH_8U, 1);
				cvThreshold(transDepth, depthTmp3, 0.5, 255, CV_THRESH_BINARY_INV);
//				cvThreshold(transDepth, depthTmp3, 0, 255, CV_THRESH_TOZERO);
//				cvConvertScale(transDepth, depthTmp3, 1);
				cvShowImage("DEPTH160", transDepth);
				IplImage* colourImResized = cvCreateImage(cvSize(TRACKING_SIZE.width, TRACKING_SIZE.height), IPL_DEPTH_8U, 3);
				cvResize(colourIm, colourImResized, CV_INTER_NN);//use nearest neighbor interpolation
				cvSet(colourImResized, cvScalar(0), depthTmp3);
				cvShowImage ("Color160", colourImResized);

//				cvReleaseImage(&depthTmp1);
//				cvReleaseImage(&depthTmp2);
//				cvReleaseImage(&colourTmp);
				cvReleaseImage(&depthTmp3);
				cvReleaseImage(&colourImResized);

/*
				IplImage* depthTmp1 = cvCreateImage(cvSize(depthIm->width, depthIm->height), IPL_DEPTH_32F, 1);
				IplImage* depthTmp2 = cvCreateImage(cvSize(depthIm->width, depthIm->height), IPL_DEPTH_8U, 1);
				cvConvertScale(depthIm, depthTmp1, 1);
//				cvThreshold(depthTmp1, depthTmp2, MARKER_DEPTH-5, 255, CV_THRESH_TOZERO_INV);//thresh 5mm above marker
				cvThreshold(depthTmp1, depthTmp2, MARKER_DEPTH-5, 255, CV_THRESH_TOZERO);
				//cvCmpS(depthTmp1, MARKER_DEPTH, depthTmp2, CV_CMP_GT);
				cvShowImage("TMP_DEPTH", depthTmp2);
				IplImage* colourTmp = cvCreateImage(cvSize(colourIm->width, colourIm->height), IPL_DEPTH_8U, 3);
				cvCopyImage(colourIm, colourTmp);
				cvSet(colourTmp, cvScalar(0), depthTmp2);
				cvShowImage ("Basic Thresh", colourTmp);
				cvReleaseImage(&depthTmp1);
				cvReleaseImage(&depthTmp2);
*/
#endif
				counter++;
//			} else {
//				counter++;
			}
			//do hand pose recognition
			m_world->Update();
			RenderScene(arImage, capture);
		}
#ifdef USE_ARMM_VRPN
		ARMM_server->mainloop();
		m_Connection->mainloop();
#endif

#ifdef USE_OPTICAL_FLOW
		if(!RunOnce) RunOnce = true;
		cvCopyImage(colourIm, prev_colourIm);
		memcpy(prev_colourIm->imageData, niImageMD.Data(), prev_colourIm->imageSize);
		cvCvtColor(prev_colourIm, prev_colourIm, CV_RGB2BGR);
#endif

		cvReleaseImage(&arImage);
		cvReleaseImage(&depthIm); cvReleaseImage(&colourIm);
		cvReleaseImage(&transDepth320);cvReleaseImage(&transDepth160);
#ifdef USE_SKIN_SEGMENTATION
		cvReleaseImage(&gray); cvReleaseImage(&hand_region);
#endif
	}

	cvReleaseImage(&prev_colourIm);
	osg_uninit();
	delete m_world;
	delete kinectReg;

	cvReleaseMat(&RegistrationParams);

	delete kc;

	return 0;
}