Exemple #1
0
bool CvCapture_OpenNI::setImageGeneratorProperty( int propIdx, double propValue )
{
    bool isSet = false;
    if( !imageGenerator.IsValid() )
        return isSet;

    switch( propIdx )
    {
    case CV_CAP_PROP_OPENNI_OUTPUT_MODE :
    {
        XnMapOutputMode mode;

        switch( cvRound(propValue) )
        {
        case CV_CAP_OPENNI_VGA_30HZ :
            mode.nXRes = XN_VGA_X_RES;
            mode.nYRes = XN_VGA_Y_RES;
            mode.nFPS = 30;
            break;
        case CV_CAP_OPENNI_SXGA_15HZ :
            mode.nXRes = XN_SXGA_X_RES;
            mode.nYRes = XN_SXGA_Y_RES;
            mode.nFPS = 15;
            break;
        case CV_CAP_OPENNI_SXGA_30HZ :
            mode.nXRes = XN_SXGA_X_RES;
            mode.nYRes = XN_SXGA_Y_RES;
            mode.nFPS = 30;
            break;
        case CV_CAP_OPENNI_QVGA_30HZ :
             mode.nXRes = XN_QVGA_X_RES;
             mode.nYRes = XN_QVGA_Y_RES;
             mode.nFPS = 30;
             break;
        case CV_CAP_OPENNI_QVGA_60HZ :
             mode.nXRes = XN_QVGA_X_RES;
             mode.nYRes = XN_QVGA_Y_RES;
             mode.nFPS = 60;
             break;
        default :
            CV_Error( CV_StsBadArg, "Unsupported image generator output mode.\n");
        }

        XnStatus status = imageGenerator.SetMapOutputMode( mode );
        if( status != XN_STATUS_OK )
            std::cerr << "CvCapture_OpenNI::setImageGeneratorProperty : " << xnGetStatusString(status) << std::endl;
        else
            isSet = true;
        break;
    }
    default:
    {
        std::stringstream ss;
        ss << "Image generator does not support such parameter (propIdx=" << propIdx << ") for setting.\n";
        CV_Error( CV_StsBadArg, ss.str().c_str() );
    }
    }

    return isSet;
}
Exemple #2
0
bool CvCapture_OpenNI::grabFrame()
{
    if( !isOpened() )
        return false;

    bool isGrabbed = false;
    if( !approxSyncGrabber.empty() && approxSyncGrabber->isRun() )
    {
        isGrabbed = approxSyncGrabber->grab( depthMetaData, imageMetaData );
    }
    else
    {
        XnStatus status = context.WaitAndUpdateAll();
        if( status != XN_STATUS_OK )
            return false;

        if( depthGenerator.IsValid() )
            depthGenerator.GetMetaData( depthMetaData );
        if( imageGenerator.IsValid() )
            imageGenerator.GetMetaData( imageMetaData );
        isGrabbed = true;
    }

    return isGrabbed;
}
bool SetupImage(xn::Context& g_context)
{
	XnStatus nRetVal = XN_STATUS_OK;
	fprintf(stderr,"Setting up the image generator\n");

	if ((nRetVal = g_image.Create(g_context))!= XN_STATUS_OK)
	{
		printf("Could not create depth generator: %s\n", xnGetStatusString(nRetVal));
		return FALSE;
	}


	if ((nRetVal = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image)) != XN_STATUS_OK)
	{
		fprintf(stderr,"Could not find image sensor: %s\n", xnGetStatusString(nRetVal));
		return FALSE;
	}

	XnMapOutputMode mapMode;
	mapMode.nXRes = XN_VGA_X_RES;
	mapMode.nYRes = XN_VGA_Y_RES;
	mapMode.nFPS = 30;
	if ((nRetVal = g_image.SetMapOutputMode(mapMode)) != XN_STATUS_OK)
	{
		fprintf(stderr,"Could not set image mode: %s\n", xnGetStatusString(nRetVal));
		return FALSE;
	}
	return TRUE;
}
Exemple #4
0
XnStatus ConfigureGenerators(const RecConfiguration& config, xn::Context& context, xn::DepthGenerator& depthGenerator, xn::ImageGenerator& imageGenerator)
{
	XnStatus nRetVal = XN_STATUS_OK;
	xn::EnumerationErrors errors;

	// Configure the depth, if needed
	if (config.bRecordDepth)
	{
		nRetVal = context.CreateAnyProductionTree(XN_NODE_TYPE_DEPTH, NULL, depthGenerator, &errors);
		CHECK_RC_ERR(nRetVal, "Create Depth", errors);
		nRetVal = depthGenerator.SetMapOutputMode(*config.pDepthMode);
		CHECK_RC(nRetVal, "Set Mode");
		if (config.bMirrorIndicated && depthGenerator.IsCapabilitySupported(XN_CAPABILITY_MIRROR))
		{
			depthGenerator.GetMirrorCap().SetMirror(config.bMirror);
		}

		// Set Hole Filter
		depthGenerator.SetIntProperty("HoleFilter", TRUE);
	}
	// Configure the image, if needed
	if (config.bRecordImage)
	{
		nRetVal = context.CreateAnyProductionTree(XN_NODE_TYPE_IMAGE, NULL, imageGenerator, &errors);
		CHECK_RC_ERR(nRetVal, "Create Image", errors);
		nRetVal = imageGenerator.SetMapOutputMode(*config.pImageMode);
		CHECK_RC(nRetVal, "Set Mode");

		if (config.bMirrorIndicated && imageGenerator.IsCapabilitySupported(XN_CAPABILITY_MIRROR))
		{
			imageGenerator.GetMirrorCap().SetMirror(config.bMirror);
		}
	}

	// Configuration for when there are both streams
	if (config.bRecordDepth && config.bRecordImage)
	{
		// Registration
		if (config.bRegister && depthGenerator.IsCapabilitySupported(XN_CAPABILITY_ALTERNATIVE_VIEW_POINT))
		{
			nRetVal = depthGenerator.GetAlternativeViewPointCap().SetViewPoint(imageGenerator);
			CHECK_RC(nRetVal, "Registration");
		}
		// Frame Sync
		if (config.bFrameSync && depthGenerator.IsCapabilitySupported(XN_CAPABILITY_FRAME_SYNC))
		{
			if (depthGenerator.GetFrameSyncCap().CanFrameSyncWith(imageGenerator))
			{
				nRetVal = depthGenerator.GetFrameSyncCap().FrameSyncWith(imageGenerator);
				CHECK_RC(nRetVal, "Frame sync");
			}
		}
	}

	return XN_STATUS_OK;
}
Exemple #5
0
	information()
	{
		RC(context.Init(),							"Context Intialized");
		
		XnMapOutputMode mode;
		mode.nXRes = XN_VGA_X_RES;
		mode.nYRes = XN_VGA_Y_RES;
		mode.nFPS  = 30;
		
		RC(image.Create(context),					"Create image buffer");
		RC(image.SetMapOutputMode(mode),			"Set image mode");
		
		RC(depth.Create(context),					"Create depth buffer");
		RC(depth.SetMapOutputMode(mode),			"Set depth mode");
		
		xn::Query q;
		RC(q.AddSupportedCapability(XN_CAPABILITY_SKELETON), "Request skeleton");

		try {
			RC(context.FindExistingNode(XN_NODE_TYPE_USER, user), "User generator");
		} catch (...) {
			RC(user.Create(context),					"Get skeleton!!!");
		}
//		RC(user.Create(context, &q),					"Get skeleton!!!");
//		
//		xn::NodeInfoList il;
//		RC(context.EnumerateProductionTrees(XN_NODE_TYPE_USER, &q, il, NULL),
//													"Enumerate nodes");
//		
//		xn::NodeInfo i = *il.Begin();
//		RC(context.CreateProductionTree(i),			"Create skeleton node");
//		RC(i.GetInstance(user),						"Get skeleton");
		
		user.RegisterUserCallbacks(User_NewUser, NULL, NULL, hUserCallbacks);
		user.GetSkeletonCap().RegisterCalibrationCallbacks(UserCalibration_CalibrationStart, UserCalibration_CalibrationEnd, &user, hCalibrationCallbacks);
		
		if (user.GetSkeletonCap().NeedPoseForCalibration())
		{
			if (!user.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION))
			{
				post("Pose required, but not supported\n");
			}
			else
			{
				user.GetPoseDetectionCap().RegisterToPoseCallbacks(UserPose_PoseDetected, NULL, &user, hPoseCallbacks);
				user.GetSkeletonCap().GetCalibrationPose(g_strPose);
				user.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL);
			}
		}
		
		RC(context.StartGeneratingAll(),			"Start generating data");
		
		post("Kinect initialized!\n");
	}
void Loop(void)
{
	XnStatus nRetVal = XN_STATUS_OK;


	while (g_notDone)
	{

		if ((nRetVal = g_context.WaitOneUpdateAll(g_depth)) != XN_STATUS_OK)
			//if ((nRetVal = g_context.WaitAndUpdateAll()) != XN_STATUS_OK)
		{
			fprintf(stderr,"Could not update data: %s\n", xnGetStatusString(nRetVal));
			continue;
		}
		if (g_haveDepth)
		{
			const XnDepthPixel* pDepthMap = g_depth.GetDepthMap();
			ProcessDepthFrame(pDepthMap, g_depthWidth, g_depthHeight);
			FindFingertip();
		}

		if (g_haveImage)
		{
			const XnRGB24Pixel* pImageMap = g_image.GetRGB24ImageMap();
			ProcessImageFrame(pImageMap, g_depthWidth, g_depthHeight);
		}



		ShowFrame();

		CheckKeys();
	}
}
Exemple #7
0
	// Save new data from OpenNI
	void Update(const xn::DepthGenerator& depthGenerator, const xn::ImageGenerator& imageGenerator)
	{
		if (m_bDepth)
		{
			// Save latest depth frame
			xn::DepthMetaData dmd;
			depthGenerator.GetMetaData(dmd);
			m_pFrames[m_nNextWrite].depthFrame.CopyFrom(dmd);
		}
		if (m_bImage)
		{
			// Save latest image frame
			xn::ImageMetaData imd;
			imageGenerator.GetMetaData(imd);
			m_pFrames[m_nNextWrite].imageFrame.CopyFrom(imd);
		}

		// See if buffer is already full
		if (m_nBufferCount < m_nBufferSize)
		{
			m_nBufferCount++;
		}
		// Make sure cylic buffer pointers are good
		m_nNextWrite++;
		if (m_nNextWrite == m_nBufferSize)
		{
			m_nNextWrite = 0;
		}
	}
Exemple #8
0
bool CvCapture_OpenNI::setCommonProperty( int propIdx, double propValue )
{
    bool isSet = false;

    switch( propIdx )
    {
    // There is a set of properties that correspond to depth generator by default
    // (is they are pass without particular generator flag).
    case CV_CAP_PROP_OPENNI_REGISTRATION:
        isSet = setDepthGeneratorProperty( propIdx, propValue );
        break;
    case CV_CAP_PROP_OPENNI_APPROX_FRAME_SYNC :
        if( propValue && depthGenerator.IsValid() && imageGenerator.IsValid() )
        {
            // start synchronization
            if( approxSyncGrabber.empty() )
            {
                approxSyncGrabber = new ApproximateSyncGrabber( context, depthGenerator, imageGenerator, maxBufferSize, isCircleBuffer, maxTimeDuration );
            }
            else
            {
                approxSyncGrabber->finish();

                // update params
                approxSyncGrabber->setMaxBufferSize(maxBufferSize);
                approxSyncGrabber->setIsCircleBuffer(isCircleBuffer);
                approxSyncGrabber->setMaxTimeDuration(maxTimeDuration);
            }
            approxSyncGrabber->start();
        }
        else if( !propValue && !approxSyncGrabber.empty() )
        {
            // finish synchronization
            approxSyncGrabber->finish();
        }
        break;
    case CV_CAP_PROP_OPENNI_MAX_BUFFER_SIZE :
        maxBufferSize = cvRound(propValue);
        if( !approxSyncGrabber.empty() )
            approxSyncGrabber->setMaxBufferSize(maxBufferSize);
        break;
    case CV_CAP_PROP_OPENNI_CIRCLE_BUFFER :
        if( !approxSyncGrabber.empty() )
            approxSyncGrabber->setIsCircleBuffer(isCircleBuffer);
        break;
    case CV_CAP_PROP_OPENNI_MAX_TIME_DURATION :
        maxTimeDuration = cvRound(propValue);
        if( !approxSyncGrabber.empty() )
            approxSyncGrabber->setMaxTimeDuration(maxTimeDuration);
        break;
    default:
    {
        std::stringstream ss;
        ss << "Such parameter (propIdx=" << propIdx << ") isn't supported for setting.\n";
        CV_Error( CV_StsBadArg, ss.str().c_str() );
    }
    }

    return isSet;
}
Exemple #9
0
// Save new data from OpenNI
void NiRecorder::update(const xn::DepthGenerator &dg, const xn::ImageGenerator &ig)
{
    // Save latest depth frame
    xn::DepthMetaData dmd;
    dg.GetMetaData(dmd);
    frames[next_to_write].depth_frame.CopyFrom(dmd);

    // Save latest image frame
    xn::ImageMetaData imd;
    ig.GetMetaData(imd);
    frames[next_to_write].image_frame.CopyFrom(imd);

    // See if buffer is already full
    if (buffer_count < buffer_size)
    {
        buffer_count++;
    }

    // Make sure cylic buffer pointers are good
    next_to_write++;
    if (next_to_write == buffer_size)
    {
        next_to_write = 0;
    }
}
// this function is called each frame
void glutDisplay ()
{
	clock_t t1 = clock();
	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Setup the OpenGL viewpoint
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();

	xn::SceneMetaData sceneMD;
	xn::DepthMetaData depthMD;
        xn::ImageMetaData imageMD;
	g_DepthGenerator.GetMetaData(depthMD);
	g_ImageGenerator.GetMetaData(imageMD);
#ifndef USE_GLES
	glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
#else
	glOrthof(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
#endif

	glDisable(GL_TEXTURE_2D);

	if (!g_bPause)
	{
		// Read next available data
		g_Context.WaitOneUpdateAll(g_UserGenerator);
	}

		// Process the data
	g_DepthGenerator.GetMetaData(depthMD);
	g_ImageGenerator.GetMetaData(imageMD);
	g_UserGenerator.GetUserPixels(0, sceneMD);
	if(Show_Image == FALSE)
		DrawDepthMap(depthMD, sceneMD,COM_tracker,Bounding_Box);
	else
	{
		DrawImageMap(imageMD, depthMD, sceneMD,COM_tracker,Bounding_Box);
	}
#ifndef USE_GLES
	glutSwapBuffers();
#endif
	clock_t t2 = clock();
        std::cout << t2 - t1 << std::endl;
}
Exemple #11
0
void CleanupExit()
{
    g_scriptNode.Release();
    g_DepthGenerator.Release();
    g_UserGenerator.Release();
    g_Player.Release();
    g_Context.Release();
    g_ImageGenerator.Release();
    exit (1);
}
Exemple #12
0
bool CvCapture_OpenNI::setDepthGeneratorProperty( int propIdx, double propValue )
{
    bool isSet = false;

    CV_Assert( depthGenerator.IsValid() );

    switch( propIdx )
    {
    case CV_CAP_PROP_OPENNI_REGISTRATION:
        {
            if( propValue != 0.0 ) // "on"
            {
                // if there isn't image generator (i.e. ASUS XtionPro doesn't have it)
                // then the property isn't avaliable
                if( imageGenerator.IsValid() )
                {
                    if( !depthGenerator.GetAlternativeViewPointCap().IsViewPointAs(imageGenerator) )
                    {
                        if( depthGenerator.GetAlternativeViewPointCap().IsViewPointSupported(imageGenerator) )
                        {
                            XnStatus status = depthGenerator.GetAlternativeViewPointCap().SetViewPoint(imageGenerator);
                            if( status != XN_STATUS_OK )
                                std::cerr << "CvCapture_OpenNI::setDepthGeneratorProperty : " << xnGetStatusString(status) << std::endl;
                            else
                                isSet = true;
                        }
                        else
                            std::cerr << "CvCapture_OpenNI::setDepthGeneratorProperty : Unsupported viewpoint." << std::endl;
                    }
                    else
                        isSet = true;
                }
            }
            else // "off"
            {
                XnStatus status = depthGenerator.GetAlternativeViewPointCap().ResetViewPoint();
                if( status != XN_STATUS_OK )
                    std::cerr << "CvCapture_OpenNI::setDepthGeneratorProperty : " << xnGetStatusString(status) << std::endl;
                else
                    isSet = true;
            }
        }
        break;
    default:
    {
        std::stringstream ss;
        ss << "Depth generator does not support such parameter (propIdx=" << propIdx << ") for setting.\n";
        CV_Error( CV_StsBadArg, ss.str().c_str() );
    }
    }

    return isSet;
}
void InitialKinect()
{
	    //1. Initial Context
		mContext.Init();

		// 2. Set Map_mode (建構函數已定義)
		mapMode.nXRes = 640;
		mapMode.nYRes = 480;
		mapMode.nFPS = 30;

		// 3.a Create Depth_Generator
		mDepthGenerator.Create( mContext );
		mDepthGenerator.SetMapOutputMode( mapMode );

		// 3.b Create Image_Generator
		mImageGenerator.Create( mContext );
		mImageGenerator.SetMapOutputMode( mapMode );

		 // 4. Correct view port
		mDepthGenerator.GetAlternativeViewPointCap().SetViewPoint( mImageGenerator );
		std::cout << "正確:KinectSensor Intial Correct!" << std::endl;
}
Exemple #14
0
// this function is called each frame
void glutDisplay (void)
{
    xn::SceneMetaData sceneMD;
    xn::DepthMetaData depthMD;
    xn::ImageMetaData imageMD;

    glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

    // Setup the OpenGL viewpoint
    glMatrixMode(GL_PROJECTION);
    glPushMatrix();
    glLoadIdentity();

    g_DepthGenerator.GetMetaData(depthMD);
    g_ImageGenerator.GetMetaData(imageMD);

#ifndef USE_GLES
    glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
#else
    glOrthof(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
#endif

    glDisable(GL_TEXTURE_2D);

    // Read next available data
    g_Context.WaitOneUpdateAll(g_UserGenerator);

    // Process the data
    g_DepthGenerator.GetMetaData(depthMD);
    g_UserGenerator.GetUserPixels(0, sceneMD);
    g_ImageGenerator.GetMetaData(imageMD);

    // Draw the input fetched from the Kinect
    DrawKinectInput(depthMD, sceneMD, imageMD);

#ifndef USE_GLES
    glutSwapBuffers();
#endif
}
void changeRegistration(int nValue)
{
	if (!g_DepthGenerator.IsValid() || !g_DepthGenerator.IsCapabilitySupported(XN_CAPABILITY_ALTERNATIVE_VIEW_POINT))
	{
		return;
	}

	if(!nValue)
	{
		g_DepthGenerator.GetAlternativeViewPointCap().ResetViewPoint();
	}
	else if (g_ImageGenerator.IsValid())
	{
		g_DepthGenerator.GetAlternativeViewPointCap().SetViewPoint(g_ImageGenerator);
	}
}
Exemple #16
0
double CvCapture_OpenNI::getImageGeneratorProperty( int propIdx )
{
    double propValue = 0.;
    if( !imageGenerator.IsValid() )
        return propValue;

    XnMapOutputMode mode;
    switch( propIdx )
    {
    case CV_CAP_PROP_OPENNI_GENERATOR_PRESENT :
        CV_DbgAssert( imageGenerator.IsValid() );
        propValue = 1.;
        break;
    case CV_CAP_PROP_FRAME_WIDTH :
        if( imageGenerator.GetMapOutputMode(mode) == XN_STATUS_OK )
            propValue = mode.nXRes;
        break;
    case CV_CAP_PROP_FRAME_HEIGHT :
        if( imageGenerator.GetMapOutputMode(mode) == XN_STATUS_OK )
            propValue = mode.nYRes;
        break;
    case CV_CAP_PROP_FPS :
        if( imageGenerator.GetMapOutputMode(mode) == XN_STATUS_OK )
            propValue = mode.nFPS;
        break;
    case CV_CAP_PROP_POS_MSEC :
        propValue = imageGenerator.GetTimestamp();
        break;
    case CV_CAP_PROP_POS_FRAMES :
        propValue = imageGenerator.GetFrameID();
        break;
    default :
    {
        std::stringstream ss;
        ss << "Image generator does not support such parameter (propIdx=" << propIdx << ") for getting.\n";
        CV_Error( CV_StsBadArg, ss.str().c_str() );
    }
    }

    return propValue;
}
Exemple #17
0
bool DataCapture::captureOne()
{
    XnStatus rc = context_.WaitAndUpdateAll(); // want this to be WaitOneUpdateAll(RGB image)
    if( rc != XN_STATUS_OK )
    {
        std::cout << "WaitAndUpdateAll: " << xnGetStatusString(rc) << std::endl;
        return false;
    }

    // grab image
    imageGen_.GetMetaData(imageMd_);
    const XnRGB24Pixel* rgbData = imageMd_.RGB24Data();
    for( unsigned int i = 0; i < 640 * 480; ++i )
    {
        pRgbData_[3*i] = rgbData->nRed;
        pRgbData_[3*i + 1] = rgbData->nGreen;
        pRgbData_[3*i + 2] = rgbData->nBlue;
        ++rgbData;
    }

    // grab depth image
    depthGen_.GetMetaData(depthMd_);
    const uint16_t* pDepthDataU16 = depthMd_.Data();
    for( int i = 0; i < 640 * 480; ++i)
    {
        uint16_t d = pDepthDataU16[i];
        if( d != 0 )
        {
            pDepthData_[i] = (d * 255)/2048;
        }
        else
        {
            pDepthData_[i] = 0; // should be NAN
        }
    }
    return true;
}
Exemple #18
0
	void matrixCalc(void *outputs)
	{
		TML::Matrix out1(outputs, 0);
		TML::Matrix out2(outputs, 1);
		TML::Matrix out3(outputs, 2);
		TML::Matrix out4(outputs, 3);
		
		xn::DepthMetaData depthMD;
		xn::SceneMetaData sceneMD;
		xn::ImageMetaData imageMD;
		
		depth.GetMetaData(depthMD);
		user.GetUserPixels(0, sceneMD);
		image.GetMetaData(imageMD);
		
		context.WaitNoneUpdateAll();
		
		t_jit_matrix_info tmi;
		memset(&tmi, 0, sizeof(tmi));
		tmi.dimcount = 2;
		tmi.planecount = 1;
		tmi.dimstride[0] = 4;
		tmi.dimstride[1] = depthMD.XRes()*4;
		int width = tmi.dim[0] = depthMD.XRes();
		int height = tmi.dim[1] = depthMD.YRes();
		tmi.type = _jit_sym_float32;
		
		out1.resizeTo(&tmi);
		
		tmi.planecount = 1;
		tmi.dimstride[0] = 1;
		tmi.dimstride[1] = depthMD.XRes();
		tmi.type = _jit_sym_char;
		out2.resizeTo(&tmi);
		
		tmi.planecount = 4;
		tmi.dimstride[0] = 4;
		tmi.dimstride[1] = depthMD.XRes()*4;
		tmi.type = _jit_sym_char;
		out3.resizeTo(&tmi);
		
		const XnDepthPixel* pDepth = depthMD.Data();
		float *depthData = (float*)out1.data();
		
		//Copy depth data
		int x,y;
		for (y=0; y<height; y++)
		{
			for (x=0; x<width; x++)
			{
				depthData[0] = (float)pDepth[0]/powf(2, 15);
				
				depthData++;
				pDepth++;
			}
		}
		
		//Get the users
		unsigned char *userData = (unsigned char*)out2.data();
		const XnLabel* pLabels = sceneMD.Data();
		for (y=0; y<height; y++)
		{
			for (x=0; x<width; x++)
			{
				userData[0] = pLabels[0];
				
				userData++;
				pLabels++;
			}
		}
		
		//Get the colors
		const XnRGB24Pixel* pPixels = imageMD.RGB24Data();
		unsigned char *pixData = (unsigned char*)out3.data();
		for (y=0; y<height; y++)
		{
			for (x=0; x<width; x++)
			{
				pixData[0] = 0;
				pixData[1] = pPixels[0].nRed;
				pixData[2] = pPixels[0].nGreen;
				pixData[3] = pPixels[0].nBlue;
				
				pixData+=4;
				pPixels++;
			}
		}
		
		//For all the users -- output the joint info...
		XnUserID aUsers[15];
		XnUInt16 nUsers = 15;
		user.GetUsers(aUsers, nUsers);
		
		int rUsers = 0;
		
		xn::SkeletonCapability sc = user.GetSkeletonCap();
		
		int i;
		for (i=0; i<nUsers; i++)
		{
			if (user.GetSkeletonCap().IsTracking(aUsers[i]))
				rUsers++;
		}
		
		tmi.dimcount = 2;
		tmi.planecount = 3;
		tmi.dimstride[0] = 3*4;
		tmi.dimstride[1] = 24*3*4;
		tmi.dim[0] = 24;
		tmi.dim[1] = rUsers;
		tmi.type = _jit_sym_float32;
		out4.resizeTo(&tmi);
		
		
		float *sData = (float*)out4.data();
		
			
		if (rUsers == 0)
		{
			int n;
			for (n=0; n<24; n++)
			{					
				sData[0] = 0;
				sData[1] = 0;
				sData[2] = 0;
				
				sData+=3;
			}
		}
		else
		{
			for (i=0; i<nUsers; i++)
			{
				if (user.GetSkeletonCap().IsTracking(aUsers[i]))
				{
					int n;
					for (n=0; n<24; n++)
					{
						XnSkeletonJointPosition jp;
						user.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], (XnSkeletonJoint)(n+1), jp);
						
						sData[0] = (1280 - jp.position.X) / 2560;
						sData[1] = (1280 - jp.position.Y) / 2560;
						sData[2] = jp.position.Z * 7.8125 / 10000;
						
//						if (n == 0)
//						{
//							post("%f %f %f\n", sData[0], sData[1], sData[2]);
//						}
						
						sData+=3;
					}
				}
			}
		}
		//post("%i\n", rUsers);
	}
void glut_display() {
	xn::DepthMetaData pDepthMapMD;
	xn::ImageMetaData pImageMapMD;
#ifdef DEBUGOUT
	ofstream datei;
#endif

	glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();
	gluPerspective(45, WINDOW_SIZE_X/WINDOW_SIZE_Y, 1000, 5000);
//	glOrtho(0, WINDOW_SIZE_X, WINDOW_SIZE_Y, 0, -128, 128);

	glMatrixMode(GL_TEXTURE);
	glLoadIdentity();
//	glTranslatef(-12.8/640.0, 9.0/480.0, 0);
//	glTranslatef(-12.8/630.0, 9.0/480.0,0);
	glScalef(scalex, scaley, 1.0);
	glTranslatef(transx/630, transy/480, 0.0);

	glMatrixMode(GL_MODELVIEW);
	glLoadIdentity();
	
	rot_angle+=0.7;

	// Warten auf neue Daten vom Tiefengenerator
	nRetVal = context.WaitAndUpdateAll();
	checkError("Fehler beim Aktualisieren der Daten", nRetVal);

	// Aktuelle Depth Metadaten auslesen
	depth.GetMetaData(pDepthMapMD);
	// Aktuelle Depthmap auslesen
	const XnDepthPixel* pDepthMap = depth.GetDepthMap();

	if(maxdepth==-1)
		maxdepth = getMaxDepth(pDepthMap);

	// Aktuelle Image Metadaten auslesen 
	image.GetMetaData(pImageMapMD);
	//Aktuelles Bild auslesen
	const XnRGB24Pixel* pImageMap = image.GetRGB24ImageMap();

	glColor3f(1, 1, 1);
//	XnDepthPixel maxdepth = depth.GetDeviceMaxDepth();
	const unsigned int xres = pDepthMapMD.XRes();
	const unsigned int yres = pDepthMapMD.YRes();

#ifdef DEBUGOUT
	datei.open("daniel.txt", ios::out);
#endif

	for(unsigned int y=0; y<yres-1; y++) {
		for(unsigned int x=0; x<xres; x++) {
			aDepthMap[x+y*xres] = static_cast<GLubyte>(static_cast<float>(pDepthMap[x+y*xres])/static_cast<float>(maxdepth)*255);
		}
	}

	/*
	glEnable(GL_TEXTURE_2D);
	glPushMatrix();
	glLoadIdentity();
	glTranslatef(-800, 0, -2000);
	glBindTexture(GL_TEXTURE_2D, texture_rgb);
	glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 640, 480, 0, GL_RGB, GL_UNSIGNED_BYTE, pImageMap);
	glBegin(GL_QUADS);
		glTexCoord2f(0,1); glVertex3f(0,0,0);
		glTexCoord2f(1,1); glVertex3f(640,0,0);
		glTexCoord2f(1,0); glVertex3f(640,480,0);
		glTexCoord2f(0,0); glVertex3f(0,480,0);
	glEnd();
	glPopMatrix();

	glPushMatrix();
	glLoadIdentity();
	glTranslatef(-800, 600, -2000);
	glBindTexture(GL_TEXTURE_2D, texture_depth);
	glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE8, 640, 480, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, aDepthMap);
	glBegin(GL_QUADS);
		glTexCoord2f(0,1); glVertex3f(0,0,0);
		glTexCoord2f(1,1); glVertex3f(640,0,0);
		glTexCoord2f(1,0); glVertex3f(640,480,0);
		glTexCoord2f(0,0); glVertex3f(0,480,0);
	glEnd();
	glPopMatrix();*/

	glPushMatrix();
	glLoadIdentity();
	glTranslatef(-100, -100, -2000);
	glRotatef(cx,0,1,0);
	glRotatef(cy,1,0,0);
	glTranslatef(-320, -240, 1000);
	glEnable(GL_TEXTURE_2D);
	glBindTexture(GL_TEXTURE_2D, texture_rgb);
	glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 640, 480, 0, GL_RGB, GL_UNSIGNED_BYTE, pImageMap);
	glBegin(GL_POINTS);
	for(unsigned int y=0; y<yres-1; y++) {
		for(unsigned int x=0; x<630; x++) {
			if(pDepthMap[x+y*xres]!=0) {
				glTexCoord2f(static_cast<float>(x)/static_cast<float>(630), static_cast<float>(y)/static_cast<float>(480)); 
				glVertex3f(x, (yres-y), -pDepthMap[x+y*xres]/2.00);
#ifdef DEBUGOUT
				datei << t_gamma[pDepthMap[x+y*xres]] << endl;
#endif
			}
		}
	}
	glEnd();
	glPopMatrix();
	glDisable(GL_TEXTURE_2D);
	glutSwapBuffers();
#ifdef DEBUGOUT
	datei.close();
	exit(-1);
#endif
}
Exemple #20
0
int main(int argc, char **argv)
{
    XnStatus nRetVal = XN_STATUS_OK;
    xn::EnumerationErrors errors;
    
    if( USE_RECORED_DATA ){
        g_Context.Init();
        g_Context.OpenFileRecording(RECORD_FILE_PATH);
        xn::Player player;
        
        // Player nodeの取得
        nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_PLAYER, player);
        CHECK_RC(nRetVal, "Find player");
        
        LOG_D("PlaybackSpeed: %d", player.GetPlaybackSpeed());
        
        xn:NodeInfoList nodeList;
        player.EnumerateNodes(nodeList);
        for( xn::NodeInfoList::Iterator it = nodeList.Begin();
            it != nodeList.End(); ++it){
            
            if( (*it).GetDescription().Type == XN_NODE_TYPE_IMAGE ){
                nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_ImageGenerator);
                CHECK_RC(nRetVal, "Find image node");
                LOG_D("%s", "ImageGenerator created.");
            }
            else if( (*it).GetDescription().Type == XN_NODE_TYPE_DEPTH ){
                nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator);
                CHECK_RC(nRetVal, "Find depth node");
                LOG_D("%s", "DepthGenerator created.");            
            }
            else{
                LOG_D("%s %s %s", ::xnProductionNodeTypeToString((*it).GetDescription().Type ),
                      (*it).GetInstanceName(),
                      (*it).GetDescription().strName);
            }
        }
    }
    else{
        LOG_I("Reading config from: '%s'", CONFIG_XML_PATH);
        
        nRetVal = g_Context.InitFromXmlFile(CONFIG_XML_PATH, g_scriptNode, &errors);
        if (nRetVal == XN_STATUS_NO_NODE_PRESENT){
            XnChar strError[1024];
            errors.ToString(strError, 1024);
            LOG_E("%s\n", strError);
            return (nRetVal);
        }
        else if (nRetVal != XN_STATUS_OK){
            LOG_E("Open failed: %s", xnGetStatusString(nRetVal));
            return (nRetVal);
        }
        
        nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator);
        CHECK_RC(nRetVal,"No depth");
        
        // ImageGeneratorの作成
        nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_ImageGenerator);
        CHECK_RC(nRetVal, "Find image generator");
        
    }
    // UserGeneratorの取得
    nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator);
    if(nRetVal!=XN_STATUS_OK){
        nRetVal = g_UserGenerator.Create(g_Context); 
        CHECK_RC(nRetVal, "Create user generator");
    }

    
    XnCallbackHandle hUserCallbacks, hCalibrationStart, hCalibrationComplete, hPoseDetected;
    if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON)){
        LOG_E("%s", "Supplied user generator doesn't support skeleton");
        return 1;
    }
    nRetVal = g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks);
    CHECK_RC(nRetVal, "Register to user callbacks");

    g_SkeletonCap = g_UserGenerator.GetSkeletonCap();
    nRetVal = g_SkeletonCap.RegisterToCalibrationStart(UserCalibration_CalibrationStart, NULL, hCalibrationStart);
    CHECK_RC(nRetVal, "Register to calibration start");

    nRetVal = g_SkeletonCap.RegisterToCalibrationComplete(UserCalibration_CalibrationComplete, NULL, hCalibrationComplete);
    CHECK_RC(nRetVal, "Register to calibration complete");
    
    if (g_SkeletonCap.NeedPoseForCalibration()){
        g_bNeedPose = TRUE;
        if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION)){
            LOG_E("%s", "Pose required, but not supported");
            return 1;
        }
        nRetVal = g_UserGenerator.GetPoseDetectionCap().RegisterToPoseDetected(UserPose_PoseDetected, NULL, hPoseDetected);
        CHECK_RC(nRetVal, "Register to Pose Detected");
        g_SkeletonCap.GetCalibrationPose(g_strPose);
    }
    
    g_SkeletonCap.SetSkeletonProfile(XN_SKEL_PROFILE_ALL);
    
    nRetVal = g_Context.StartGeneratingAll();
    CHECK_RC(nRetVal, "StartGenerating");
    
    // 表示用の画像データの作成
    XnMapOutputMode mapMode;
    g_ImageGenerator.GetMapOutputMode(mapMode);
    g_rgbImage = cvCreateImage(cvSize(mapMode.nXRes, mapMode.nYRes), IPL_DEPTH_8U, 3);

    LOG_I("%s", "Starting to run");
    if(g_bNeedPose){
        LOG_I("%s", "Assume calibration pose");
    }

    xn::Recorder recorder;
    if( DO_RECORED && !USE_RECORED_DATA ){
        // レコーダーの作成
        LOG_I("%s", "Setup Recorder");
        nRetVal = recorder.Create(g_Context);
        CHECK_RC(nRetVal, "Create recorder");
        
        // 保存設定
        nRetVal = recorder.SetDestination(XN_RECORD_MEDIUM_FILE, RECORD_FILE_PATH);
        CHECK_RC(nRetVal, "Set recorder destination file");
        
        // 深度、ビデオカメラ入力を保存対象として記録開始
        nRetVal = recorder.AddNodeToRecording(g_DepthGenerator, XN_CODEC_NULL);
        CHECK_RC(nRetVal, "Add depth node to recording");
        nRetVal = recorder.AddNodeToRecording(g_ImageGenerator, XN_CODEC_NULL);
        CHECK_RC(nRetVal, "Add image node to recording");
        
        LOG_I("%s", "Recorder setup done.");
    }

    while (!xnOSWasKeyboardHit())
    {
        g_Context.WaitOneUpdateAll(g_UserGenerator);
        if( DO_RECORED  && !USE_RECORED_DATA ){
            nRetVal = recorder.Record();
            CHECK_RC(nRetVal, "Record");
        }

        // ビデオカメラ画像の生データを取得
        xn::ImageMetaData imageMetaData;
        g_ImageGenerator.GetMetaData(imageMetaData);
        // メモリコピー
        xnOSMemCopy(g_rgbImage->imageData, imageMetaData.RGB24Data(), g_rgbImage->imageSize);
        // BGRからRGBに変換して表示
        cvCvtColor(g_rgbImage, g_rgbImage, CV_RGB2BGR);

        // UserGeneratorからユーザー識別ピクセルを取得
        xn::SceneMetaData sceneMetaData;
        g_UserGenerator.GetUserPixels(0, sceneMetaData);
        
        XnUserID allUsers[MAX_NUM_USERS];
        XnUInt16 nUsers = MAX_NUM_USERS;
        g_UserGenerator.GetUsers(allUsers, nUsers);
        for (int i = 0; i < nUsers; i++) {
            
            // キャリブレーションに成功しているかどうか
            if (g_SkeletonCap.IsTracking(allUsers[i])) {
                // スケルトンを描画
                DrawSkelton(allUsers[i], i);
            }
        }
        
        // 表示
        cvShowImage("User View", g_rgbImage);

        // ESCもしくはqが押されたら終了させる
        if (cvWaitKey(10) == 27) {
            break;
        }
    }

    if( !USE_RECORED_DATA ){
        g_scriptNode.Release();
    }
    g_DepthGenerator.Release();
    g_UserGenerator.Release();
    g_Context.Release();

	if (g_rgbImage != NULL) {
		cvReleaseImage(&g_rgbImage);	
	}
	g_Context.Shutdown();

    
}
Exemple #21
0
cv::Mat xncv::captureRGB(const xn::ImageGenerator& generator)
{
	xn::ImageMetaData meta;
	generator.GetMetaData(meta);
	return cv::Mat(meta.YRes(), meta.XRes(),cv::DataType<cv::Vec3b>::type, (void*)meta.RGB24Data());
}
Exemple #22
0
int main ( int argc, char ** argv )
{
	// 
	// Initializing Calibration Related
	// 

	// ARTagHelper artagHelper	( colorImgWidth, colorImgHeight, ARTAG_CONFIG_FILE,		ARTAG_POS_FILE );
	ARTagHelper artagHelper		( colorImgWidth, colorImgHeight, ARTAG_CONFIG_A3_FILE,	ARTAG_POS_A3_FILE );

	ExtrCalibrator extrCalibrator ( 6, KINECT_INTR_FILE, KINECT_DIST_FILE );
	// unsigned char * kinectImgBuf = new unsigned char[colorImgWidth * colorImgHeight * 3];

	// 
	// Initializing OpenNI Settings
	// 

	int ctlWndKey = -1;

	XnStatus nRetVal = XN_STATUS_OK;
	xn::EnumerationErrors errors;

	// 
	// Initialize Context Object
	// 
	
	nRetVal = g_Context.InitFromXmlFile ( CONFIG_XML_PATH, g_ScriptNode, &errors );
	if ( nRetVal == XN_STATUS_NO_NODE_PRESENT ) 
	{
		XnChar strError[1024];
		errors.ToString ( strError, 1024 );
		printf ( "XN_STATUS_NO_NODE_PRESENT:\n%s\n", strError );
		
		system ( "pause" );
		return ( nRetVal );
	}
	else if ( nRetVal != XN_STATUS_OK )
	{
		printf ( "Open FAILED:\n%s\n", xnGetStatusString ( nRetVal ) );	
	
		system ( "pause" );
		return ( nRetVal );
	}

	// 
	// Handle the Depth Generator Node.
	// 
	
	nRetVal = g_Context.FindExistingNode ( XN_NODE_TYPE_DEPTH, g_DepthGen );
	if ( nRetVal != XN_STATUS_OK )
	{
		printf ( "No Depth Node Exists! Please Check your XML.\n" );
		return ( nRetVal );
	}
	
	// 
	// Handle the Image Generator node
	// 
	
	nRetVal = g_Context.FindExistingNode ( XN_NODE_TYPE_IMAGE, g_ImageGen );
	if ( nRetVal != XN_STATUS_OK )
	{
		printf ( "No Image Node Exists! Please Check your XML.\n" );
		return ( nRetVal );
	}

	// g_DepthGen.GetAlternativeViewPointCap().SetViewPoint( g_ImageGen );

	g_DepthGen.GetMetaData ( g_DepthMD );
	g_ImageGen.GetMetaData ( g_ImageMD );

	assert ( g_ImageMD.PixelFormat() == XN_PIXEL_FORMAT_RGB24 );
	assert ( g_DepthMD.PixelFormat() == XN_PIXEL_FORMAT_GRAYSCALE_16_BIT );

	// 
	// Create OpenCV Showing Window and Related Data Structures
	// 

	cv::namedWindow ( IMAGE_WIN_NAME, CV_WINDOW_AUTOSIZE );
	cv::namedWindow ( DEPTH_WIN_NAME, CV_WINDOW_AUTOSIZE );

	cv::Mat depthImgMat  ( g_DepthMD.YRes(), g_DepthMD.XRes(), CV_16UC1 );
	cv::Mat depthImgShow ( g_DepthMD.YRes(), g_DepthMD.XRes(), CV_8UC3  );
	cv::Mat colorImgMat  ( g_ImageMD.YRes(), g_ImageMD.XRes(), CV_8UC3 );

#define ARTAG_DEBUG

#ifdef ARTAG_DEBUG
	
	cv::setMouseCallback ( IMAGE_WIN_NAME, ClickOnMouse, 0 );

#endif

	bool flipColor = true;

	// 
	// Start to Loop
	// 

	while ( ctlWndKey != ESC_KEY_VALUE ) 
	{
		// 
		// Try to Get New Frame From Kinect
		// 
	
		nRetVal = g_Context.WaitAnyUpdateAll ();

		g_DepthGen.GetMetaData ( g_DepthMD );
		g_ImageGen.GetMetaData ( g_ImageMD );

		assert ( g_DepthMD.FullXRes() == g_DepthMD.XRes() && g_DepthMD.FullYRes() == g_DepthMD.YRes() );
		assert ( g_ImageMD.FullXRes() == g_ImageMD.XRes() && g_ImageMD.FullYRes() == g_ImageMD.YRes() );

		GlobalUtility::CopyColorRawBufToCvMat8uc3 ( (const XnRGB24Pixel *)(g_ImageMD.Data()), colorImgMat );

#ifdef SHOW_DEPTH_WINDOW

		GlobalUtility::CopyDepthRawBufToCvMat16u  ( (const XnDepthPixel *)(g_DepthMD.Data()), depthImgMat );
		// GlobalUtility::ConvertDepthCvMat16uToYellowCvMat ( depthImgMat, depthImgShow );
		GlobalUtility::ConvertDepthCvMat16uToGrayCvMat ( depthImgMat, depthImgShow );

		cv::imshow ( DEPTH_WIN_NAME, depthImgShow );

#endif

		ctlWndKey = cvWaitKey ( 15 );

		if ( ctlWndKey == 'f' || ctlWndKey == 'F' ) 
		{
			artagHelper.Clear();

			artagHelper.FindMarkerCorners ( (unsigned char *)(g_ImageMD.Data()) );
			artagHelper.PrintMarkerCornersPos2dInCam ();
			extrCalibrator.ExtrCalib ( artagHelper );

			std::cout << "\nKinect Extr Matrix:" << std::endl;
			extrCalibrator.PrintMatrix ( extrCalibrator.GetMatrix ( ExtrCalibrator::EXTR ) );

			std::cout	<< "Reprojection ERROR = " 
						<< extrCalibrator.ComputeReprojectionErr ( artagHelper ) << std::endl
						// << extrCalibrator.ComputeReprojectionErr ( artagHelper.m_MarkerCornerPosCam2d, artagHelper.m_MarkerCornerPos3d, 24 ) << std::endl
						<< "Valid Marker Number = " << artagHelper.GetValidMarkerNumber() << std::endl
						<< std::endl;

			extrCalibrator.SaveMatrix ( ExtrCalibrator::EXTR, KINECT_EXTR_FILE );
		}
		if ( ctlWndKey == 's' || ctlWndKey == 'S' )
		{
			flipColor = !flipColor;
		}

		if ( flipColor ) { 
			cv::cvtColor ( colorImgMat, colorImgMat, CV_RGB2BGR );
		}

		artagHelper.DrawMarkersInCameraImage ( colorImgMat );
		cv::imshow ( IMAGE_WIN_NAME, colorImgMat );
	}

	g_Context.Release ();
	
	system	( "pause" );
	exit	( EXIT_SUCCESS );

}
Exemple #23
0
bool DataCapture::initialise()
{
    context_.Shutdown();

    XnStatus rc = context_.Init(); 
    if( rc != XN_STATUS_OK )
    {
        std::cout << "Init: " << xnGetStatusString(rc) << std::endl;
        return false;
    }

    rc = depthGen_.Create(context_);
    if( rc != XN_STATUS_OK )
    {
        std::cout << "depthGen.Create: " << xnGetStatusString(rc) << std::endl;
        return false;
    }

    rc = imageGen_.Create(context_);
    if( rc != XN_STATUS_OK )
    {
        std::cout << "imageGen.Create: " << xnGetStatusString(rc) << std::endl;
        return false;
    }

    rc = imageGen_.SetPixelFormat(XN_PIXEL_FORMAT_RGB24);
    if( rc != XN_STATUS_OK )
    {
        std::cout << "SetPixelFormat: " << xnGetStatusString(rc) << std::endl;
        return false;
    }

    XnMapOutputMode imgMode;
    imgMode.nXRes = 640; // XN_VGA_X_RES
    imgMode.nYRes = 480; // XN_VGA_Y_RES
    imgMode.nFPS = 30;
    rc = imageGen_.SetMapOutputMode(imgMode);
    if( rc != XN_STATUS_OK )
    {
        std::cout << "image SetMapOutputMode: " << xnGetStatusString(rc) << std::endl;
        return false;
    }

    rc = depthGen_.SetMapOutputMode(imgMode);
    if( rc != XN_STATUS_OK )
    {
        std::cout << "depth SetMapOutputMode: " << xnGetStatusString(rc) << std::endl;
        return false;
    }

    depthGen_.GetMetaData(depthMd_);
    std::cout << "Depth offset " << depthMd_.XOffset() << " " << depthMd_.YOffset() << std::endl;

    // set the depth image viewpoint
    depthGen_.GetAlternativeViewPointCap().SetViewPoint(imageGen_);

    // read off the depth camera field of view.  This is the FOV corresponding to
    // the IR camera viewpoint, regardless of the alternative viewpoint settings.
    XnFieldOfView fov;
    rc = depthGen_.GetFieldOfView(fov);
    std::cout << "Fov: " << fov.fHFOV << " " << fov.fVFOV << std::endl;

    pDepthData_ = new char [640 * 480];
    pRgbData_ = new char [640 * 480 * 3];

    return true;
}
Exemple #24
0
// this function is called each frame
void glutDisplay (void)
{

	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Setup the OpenGL viewpoint
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();

	// Check if Registration is done for Depth and RGB Images - Brandyn, Sravanthi
	g_DepthGenerator.GetAlternativeViewPointCap().SetViewPoint(g_ImageGenerator);
//	g_DepthGenerator.GetAlternativeViewPointCap().ResetViewPoint();

	xn::SceneMetaData sceneMD;
	xn::DepthMetaData depthMD;
	xn::ImageMetaData imageMD;
	g_DepthGenerator.GetMetaData(depthMD);
	g_ImageGenerator.GetMetaData(imageMD);
	#ifdef USE_GLUT
	glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
	#else
	glOrthof(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
	#endif

	glDisable(GL_TEXTURE_2D);

	if (!g_bPause)
	{
		// Read next available data
		g_Context.WaitAndUpdateAll();
	}

		// Process the data
		//DRAW
		// Check if Registration is done for Depth and RGB Images - Brandyn, Sravanthi
		g_DepthGenerator.GetAlternativeViewPointCap().SetViewPoint(g_ImageGenerator);
	//	g_DepthGenerator.GetAlternativeViewPointCap().ResetViewPoint();

		g_DepthGenerator.GetMetaData(depthMD);
		g_ImageGenerator.GetMetaData(imageMD);
		g_UserGenerator.GetUserPixels(0, sceneMD);


		DrawDepthMap(depthMD, imageMD, sceneMD, g_nPlayer);

		if (g_nPlayer != 0)
		{
			XnPoint3D com;
			g_UserGenerator.GetCoM(g_nPlayer, com);
			if (com.Z == 0)
			{
				g_nPlayer = 0;
				FindPlayer();
			}
		}

	#ifdef USE_GLUT
	glutSwapBuffers();
	#endif
}
int main(int argc, char **argv) {
	nRetVal = XN_STATUS_OK;

	/* Context initialisieren (Kameradaten) */
	nRetVal = context.Init();
	checkError("Fehler beim Initialisieren des Context", nRetVal)?0:exit(-1);



	/* Tiefengenerator erstellen */
	nRetVal = depth.Create(context);
	checkError("Fehler beim Erstellen des Tiefengenerators", nRetVal)?0:exit(-1);

	/* Tiefengenerator einstellen */
	XnMapOutputMode outputModeDepth;
	outputModeDepth.nXRes = 640;
	outputModeDepth.nYRes = 480;
	outputModeDepth.nFPS = 30;
	nRetVal = depth.SetMapOutputMode(outputModeDepth);
	checkError("Fehler beim Konfigurieren des Tiefengenerators", nRetVal)?0:exit(-1);


	/* Imagegenerator erstellen */
	nRetVal = image.Create(context);
	checkError("Fehler beim Erstellen des Bildgenerators", nRetVal)?0:exit(-1);

	/* Imagegenerator einstellen */
	XnMapOutputMode outputModeImage;
	outputModeImage.nXRes = 640;
	outputModeImage.nYRes = 480;
	outputModeImage.nFPS = 30;
	nRetVal = image.SetMapOutputMode(outputModeImage);
	checkError("Fehler beim Konfigurieren des Bildgenerators", nRetVal)?0:exit(-1);	

	/* Starten der Generatoren - volle Kraft vorraus! */
	nRetVal = context.StartGeneratingAll();
	checkError("Fehler beim Starten der Generatoren", nRetVal)?0:exit(-1);

	/* Glut initialisieren */
	glutInit(&argc, argv);
	glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
	glutInitWindowSize(WINDOW_SIZE_X, WINDOW_SIZE_Y);
	glutInitWindowPosition(300,150);
	win = glutCreateWindow("kinect-head-tracking");
	glClearColor(0, 0, 0, 0.0); //Hintergrundfarbe: Hier ein leichtes Blau
	glEnable(GL_DEPTH_TEST);          //Tiefentest aktivieren
	glDepthFunc(GL_LEQUAL);
//	glEnable(GL_CULL_FACE);           //Backface Culling aktivieren
//	glEnable(GL_ALPHA_TEST);
//	glAlphaFunc(GL_GEQUAL, 1);

	/* Texturen */
	glGenTextures(1, &texture_rgb);
	glBindTexture(GL_TEXTURE_2D, texture_rgb);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);


	glGenTextures(1, &texture_depth);
	glBindTexture(GL_TEXTURE_2D, texture_depth);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);

	glutDisplayFunc(glut_display);
	glutIdleFunc(glut_idle);
	glutMouseFunc(glut_mouse);
	glutMotionFunc(glut_mouse_motion);
	glutKeyboardFunc(glut_keyboard);
	glutMainLoop();
	return 0;
}
void start_kinect() {
    XnStatus nRetVal = XN_STATUS_OK;
    xn::EnumerationErrors errors;
    UsersCount = 0;

    const char *fn = NULL;
    if    (fileExists(SAMPLE_XML_PATH)) fn = SAMPLE_XML_PATH;
    else if (fileExists(SAMPLE_XML_PATH_LOCAL)) fn = SAMPLE_XML_PATH_LOCAL;
    else {
        printf("Could not find '%s' nor '%s'. Aborting.\n" , SAMPLE_XML_PATH, SAMPLE_XML_PATH_LOCAL);
        //return XN_STATUS_ERROR;
    }
    printf("Reading config from: '%s'\n", fn);

    nRetVal = g_Context.InitFromXmlFile(fn, g_scriptNode, &errors);
    if (nRetVal == XN_STATUS_NO_NODE_PRESENT)
    {
        XnChar strError[1024];
        errors.ToString(strError, 1024);
        printf("%s\n", strError);
        //return (nRetVal);
    }
    else if (nRetVal != XN_STATUS_OK)
    {
        printf("Open failed: %s\n", xnGetStatusString(nRetVal));
        //return (nRetVal);
    }

    nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth);      
    CHECK_RC(nRetVal,"No depth");
    
	  nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image);      
    CHECK_RC(nRetVal,"No image");

    nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator);
    if (nRetVal != XN_STATUS_OK)
    {
        nRetVal = g_UserGenerator.Create(g_Context);
        CHECK_RC(nRetVal, "Find user generator");
    }

    XnCallbackHandle hUserCallbacks, hCalibrationStart, hCalibrationComplete, hPoseDetected;
    if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON))
    {
        printf("Supplied user generator doesn't support skeleton\n");
        //return 1;
    }
    nRetVal = g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks);
    CHECK_RC(nRetVal, "Register to user callbacks");
    nRetVal = g_UserGenerator.GetSkeletonCap().RegisterToCalibrationStart(UserCalibration_CalibrationStart, NULL, hCalibrationStart);
    CHECK_RC(nRetVal, "Register to calibration start");
    nRetVal = g_UserGenerator.GetSkeletonCap().RegisterToCalibrationComplete(UserCalibration_CalibrationComplete, NULL, hCalibrationComplete);
    CHECK_RC(nRetVal, "Register to calibration complete");

    if (g_UserGenerator.GetSkeletonCap().NeedPoseForCalibration())
    {
        g_bNeedPose = TRUE;
        if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION))
        {
            printf("Pose required, but not supported\n");
            //return 1;
        }
        nRetVal = g_UserGenerator.GetPoseDetectionCap().RegisterToPoseDetected(UserPose_PoseDetected, NULL, hPoseDetected);
        CHECK_RC(nRetVal, "Register to Pose Detected");
        g_UserGenerator.GetSkeletonCap().GetCalibrationPose(g_strPose);
    }

    g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL);

    nRetVal = g_Context.StartGeneratingAll();
    CHECK_RC(nRetVal, "StartGenerating");

    XnUserID aUsers[MAX_NUM_USERS];
    XnUInt16 nUsers;

    XnSkeletonJointTransformation anyjoint;

    printf("Starting to run\n");
    if(g_bNeedPose)
    {
        printf("Assume calibration pose\n");
    }
    XnUInt32 epochTime = 0;
    while (!xnOSWasKeyboardHit())
    {
        g_Context.WaitOneUpdateAll(g_UserGenerator);
        // print the torso information for the first user already tracking
        nUsers=MAX_NUM_USERS;
        g_UserGenerator.GetUsers(aUsers, nUsers);
        int numTracked=0;
        int userToPrint=-1;

        WriteLock w_lock(myLock);
    	  pDepthMap = g_depth.GetDepthMap();
        pPixelMap = g_image.GetRGB24ImageMap();
            
    	  g_depth.GetMetaData(g_depthMD);    
        g_image.GetMetaData(g_imageMD);
        pPixelPoint = g_imageMD.RGB24Data();


        for(XnUInt16 i=0; i<nUsers; i++) {
    			if(g_UserGenerator.GetSkeletonCap().IsTracking(aUsers[i])==FALSE)
    				continue;
    			{
    				
    				
    				/* Writing all new movements into structure*/
    				/* Head */
    				g_UserGenerator.GetSkeletonCap().GetSkeletonJoint(aUsers[i],XN_SKEL_HEAD,anyjoint);
    				Skeletons[i]["Head"]["X"] = anyjoint.position.position.X;
    				Skeletons[i]["Head"]["Y"] = anyjoint.position.position.Y;
    				Skeletons[i]["Head"]["Z"] = anyjoint.position.position.Z;
    				/* Neck */
    				g_UserGenerator.GetSkeletonCap().GetSkeletonJoint(aUsers[i],XN_SKEL_NECK,anyjoint);
    				Skeletons[i]["Neck"]["X"] = anyjoint.position.position.X;
    				Skeletons[i]["Neck"]["Y"] = anyjoint.position.position.Y;
    				Skeletons[i]["Neck"]["Z"] = anyjoint.position.position.Z;
    				/* Left Shoulder */
    				g_UserGenerator.GetSkeletonCap().GetSkeletonJoint(aUsers[i],XN_SKEL_LEFT_SHOULDER,anyjoint);
    				Skeletons[i]["LeftShoulder"]["X"] = anyjoint.position.position.X;
    				Skeletons[i]["LeftShoulder"]["Y"] = anyjoint.position.position.Y;
    				Skeletons[i]["LeftShoulder"]["Z"] = anyjoint.position.position.Z;
    				/* Right Shoulder */
    				g_UserGenerator.GetSkeletonCap().GetSkeletonJoint(aUsers[i],XN_SKEL_RIGHT_SHOULDER,anyjoint);
    				Skeletons[i]["RightShoulder"]["X"] = anyjoint.position.position.X;
    				Skeletons[i]["RightShoulder"]["Y"] = anyjoint.position.position.Y;
    				Skeletons[i]["RightShoulder"]["Z"] = anyjoint.position.position.Z;
    				/* Torso */
    				g_UserGenerator.GetSkeletonCap().GetSkeletonJoint(aUsers[i],XN_SKEL_TORSO,anyjoint);
    				Skeletons[i]["Torso"]["X"] = anyjoint.position.position.X;
    				Skeletons[i]["Torso"]["Y"] = anyjoint.position.position.Y;
    				Skeletons[i]["Torso"]["Z"] = anyjoint.position.position.Z;
    				/* Left Elbow */
    				g_UserGenerator.GetSkeletonCap().GetSkeletonJoint(aUsers[i],XN_SKEL_LEFT_ELBOW,anyjoint);
    				Skeletons[i]["LeftElbow"]["X"] = anyjoint.position.position.X;
    				Skeletons[i]["LeftElbow"]["Y"] = anyjoint.position.position.Y;
    				Skeletons[i]["LeftElbow"]["Z"] = anyjoint.position.position.Z;
    				/* Right Elbow */
    				g_UserGenerator.GetSkeletonCap().GetSkeletonJoint(aUsers[i],XN_SKEL_RIGHT_ELBOW,anyjoint);
    				Skeletons[i]["RightElbow"]["X"] = anyjoint.position.position.X;
    				Skeletons[i]["RightElbow"]["Y"] = anyjoint.position.position.Y;
    				Skeletons[i]["RightElbow"]["Z"] = anyjoint.position.position.Z;
    				/* Left Hip */
    				g_UserGenerator.GetSkeletonCap().GetSkeletonJoint(aUsers[i],XN_SKEL_LEFT_HIP,anyjoint);
    				Skeletons[i]["LeftHip"]["X"] = anyjoint.position.position.X;
    				Skeletons[i]["LeftHip"]["Y"] = anyjoint.position.position.Y;
    				Skeletons[i]["LeftHip"]["Z"] = anyjoint.position.position.Z;
    				/* Right Hip */
    				g_UserGenerator.GetSkeletonCap().GetSkeletonJoint(aUsers[i],XN_SKEL_RIGHT_HIP,anyjoint);
    				Skeletons[i]["RightHip"]["X"] = anyjoint.position.position.X;
    				Skeletons[i]["RightHip"]["Y"] = anyjoint.position.position.Y;
    				Skeletons[i]["RightHip"]["Z"] = anyjoint.position.position.Z;
    				/* Left Hand */
    				g_UserGenerator.GetSkeletonCap().GetSkeletonJoint(aUsers[i],XN_SKEL_LEFT_HAND,anyjoint);
    				Skeletons[i]["LeftHand"]["X"] = anyjoint.position.position.X;
    				Skeletons[i]["LeftHand"]["Y"] = anyjoint.position.position.Y;
    				Skeletons[i]["LeftHand"]["Z"] = anyjoint.position.position.Z;
    				/* Right Hand */
    				g_UserGenerator.GetSkeletonCap().GetSkeletonJoint(aUsers[i],XN_SKEL_RIGHT_HAND,anyjoint);
    				Skeletons[i]["RightHand"]["X"] = anyjoint.position.position.X;
    				Skeletons[i]["RightHand"]["Y"] = anyjoint.position.position.Y;
    				Skeletons[i]["RightHand"]["Z"] = anyjoint.position.position.Z;
    				/* Left Knee */
    				g_UserGenerator.GetSkeletonCap().GetSkeletonJoint(aUsers[i],XN_SKEL_LEFT_KNEE,anyjoint);
    				Skeletons[i]["LeftKnee"]["X"] = anyjoint.position.position.X;
    				Skeletons[i]["LeftKnee"]["Y"] = anyjoint.position.position.Y;
    				Skeletons[i]["LeftKnee"]["Z"] = anyjoint.position.position.Z;
    				/* Right Knee */
    				g_UserGenerator.GetSkeletonCap().GetSkeletonJoint(aUsers[i],XN_SKEL_RIGHT_KNEE,anyjoint);
    				Skeletons[i]["RightKnee"]["X"] = anyjoint.position.position.X;
    				Skeletons[i]["RightKnee"]["Y"] = anyjoint.position.position.Y;
    				Skeletons[i]["RightKnee"]["Z"] = anyjoint.position.position.Z;
    				/* Left Foot */
    				g_UserGenerator.GetSkeletonCap().GetSkeletonJoint(aUsers[i],XN_SKEL_LEFT_FOOT,anyjoint);
    				Skeletons[i]["LeftFoot"]["X"] = anyjoint.position.position.X;
    				Skeletons[i]["LeftFoot"]["Y"] = anyjoint.position.position.Y;
    				Skeletons[i]["LeftFoot"]["Z"] = anyjoint.position.position.Z;
    				/* Right Foot */
    				g_UserGenerator.GetSkeletonCap().GetSkeletonJoint(aUsers[i],XN_SKEL_RIGHT_FOOT,anyjoint);
    				Skeletons[i]["RightFoot"]["X"] = anyjoint.position.position.X;
    				Skeletons[i]["RightFoot"]["Y"] = anyjoint.position.position.Y;
    				Skeletons[i]["RightFoot"]["Z"] = anyjoint.position.position.Z;
    
    				/*printf("user %d: head at (%6.2f,%6.2f,%6.2f)\n",aUsers[i],
                                                                      Skeletons[i]["Head"]["X"],
                                                                      Skeletons[i]["Head"]["Y"],
                                                                      Skeletons[i]["Head"]["Z"]);*/
    			}				
        }
        
    }
    g_scriptNode.Release();
    g_depth.Release();
    g_image.Release();
    g_UserGenerator.Release();
    g_Context.Release();
}
int main ( int argc, char * argv[] )
{
	// 
	// Initialize OpenNI Settings
	// 

	XnStatus nRetVal = XN_STATUS_OK;
	xn::ScriptNode scriptNode;
	xn::EnumerationErrors errors;

	// 
	// Initialize Context Object
	// 

	nRetVal = g_Context.InitFromXmlFile ( CONFIG_XML_PATH, scriptNode, &errors );
	
	if ( nRetVal == XN_STATUS_NO_NODE_PRESENT ) {
		XnChar strError[1024];
		errors.ToString(strError, 1024);
		printf ( "XN_STATUS_NO_NODE_PRESENT:\n%s\n", strError );
		system ( "pause" );

		return ( nRetVal );
	}
	else if ( nRetVal != XN_STATUS_OK ) {
		printf ( "Open failed: %s\n", xnGetStatusString(nRetVal) );	
		system ( "pause" );

		return ( nRetVal );
	}

	// 
	// Handle Image & Depth Generator Node
	// 

	bool colorFlag = true;
	bool depthFlag = true;

	nRetVal = g_Context.FindExistingNode ( XN_NODE_TYPE_DEPTH, g_DepthGen );
	if ( nRetVal != XN_STATUS_OK ) {
		printf("No depth node exists!\n");
		depthFlag = false;
	}
	nRetVal = g_Context.FindExistingNode ( XN_NODE_TYPE_IMAGE, g_ImageGen );
	if ( nRetVal != XN_STATUS_OK ) {
		printf("No image node exists!\n");
		colorFlag = false;
	}

	// g_DepthGen.GetAlternativeViewPointCap().SetViewPoint( g_ImageGen );

	if ( depthFlag ) {
		g_DepthGen.GetMetaData ( g_DepthMD );
		assert ( g_DepthMD.PixelFormat() == XN_PIXEL_FORMAT_GRAYSCALE_16_BIT );
	}
	if ( colorFlag ) {
		g_ImageGen.GetMetaData ( g_ImageMD );
		assert ( g_ImageMD.PixelFormat() == XN_PIXEL_FORMAT_RGB24 );
	}

	g_DepthImgShow = cv::Mat ( g_DepthMD.YRes(), g_DepthMD.XRes(), CV_8UC1  );
	g_DepthImgMat  = cv::Mat ( g_DepthMD.YRes(), g_DepthMD.XRes(), CV_16UC1 );
	g_ColorImgMat  = cv::Mat ( g_ImageMD.YRes(), g_ImageMD.XRes(), CV_8UC3  );
	
	// 
	// Start to Loop
	// 

	bool flipColor = true;
	int ctlWndKey = -1;

	g_StartTickCount = GetTickCount();
	g_HeadTrackingFrameCount = 0;
	
	while ( ctlWndKey != ESC_KEY_VALUE ) 
	{
		nRetVal = g_Context.WaitOneUpdateAll ( g_DepthGen );
		// nRetVal = g_Context.WaitAnyUpdateAll();

#ifdef HANDLING_IMAGE_DATA

		if ( colorFlag ) 
		{
			g_ImageGen.GetMetaData ( g_ImageMD );

			assert ( g_ImageMD.FullXRes() == g_ImageMD.XRes() );
			assert ( g_ImageMD.FullYRes() == g_ImageMD.YRes() );

			GlobalUtility::CopyColorRawBufToCvMat8uc3 ( (const XnRGB24Pixel *)(g_ImageMD.Data()), g_ColorImgMat );
	
			if ( ctlWndKey == 's' || ctlWndKey == 'S' ) {												// Switch
				flipColor = !flipColor;
			}
			if ( flipColor ) {
				cv::cvtColor ( g_ColorImgMat, g_ColorImgMat, CV_RGB2BGR );
			}

			cv::namedWindow ( IMAGE_WIN_NAME, CV_WINDOW_AUTOSIZE );
			cv::imshow ( IMAGE_WIN_NAME, g_ColorImgMat );
		}

#endif

#ifdef HANDLING_DEPTH_DATA

		if ( depthFlag ) 
		{
			g_DepthGen.GetMetaData(g_DepthMD);
		
			// assert ( g_DepthMD.FullXRes() == g_DepthMD.XRes() );
			// assert ( g_DepthMD.FullYRes() == g_DepthMD.YRes() );

			GlobalUtility::CopyDepthRawBufToCvMat16u ( (const XnDepthPixel *)(g_DepthMD.Data()), g_DepthImgMat );
			GlobalUtility::ConvertDepthCvMat16uToGrayCvMat ( g_DepthImgMat, g_DepthImgShow );

			/*
			cv::putText(colorImgMat, 
						GlobalUtility::DoubleToString(g_ImageMD.FPS()) + " FPS", 
						cv::Point(10, 450), 
						cv::FONT_ITALIC, 
						0.7, 
						cv::Scalar(255, 255, 255, 0),
						2,
						8,
						false);
						*/

			cv::namedWindow ( DEPTH_WIN_NAME, CV_WINDOW_AUTOSIZE );
			cv::imshow ( DEPTH_WIN_NAME, g_DepthImgShow );
		}

#endif

		XnFieldOfView fov;
		g_DepthGen.GetFieldOfView( fov );

		std::cout	<< "HFov = " << fov.fHFOV << std::endl
					<< "VFov = " << fov.fVFOV << std::endl;

		ctlWndKey = cvWaitKey ( 5 );

		g_HeadTrackingFrameCount++;
		g_CurrTickCount = GetTickCount();
		std::cout	<< "FPS = " 
					<< 1000 / ( ( double )( g_CurrTickCount - g_StartTickCount ) / ( double )( g_HeadTrackingFrameCount ) ) 
					<< std::endl;
	}

	g_Context.Release ();

	exit ( EXIT_SUCCESS );
}
Exemple #28
0
bool OpenNIVideo::init() {

	//open the video 
	//if you are using a device, you can open the device
	//if you are using video streaming, you can initialize the connection
	//if you are using video files, you can read the configuration, cache the data, etc.

	xn::EnumerationErrors errors;

	int resolutionX = 640;
	int resolutionY = 480;
	unsigned int FPS = 30;

	XnStatus nRetVal = XN_STATUS_OK;

	nRetVal = context.Init();
	CHECK_RC(nRetVal, "context global init");

	//xn::NodeInfoList list;
	//nRetVal = context.EnumerateProductionTrees(XN_NODE_TYPE_DEVICE, NULL, list, &errors);
	//CHECK_RC(nRetVal, "enumerate production tree");
	
	// HandsGenerator hands;
	//UserGenerator user;
	//GestureGenerator gesture;
	//SceneAnalyzer scene;

	nRetVal = depth_generator.Create(context);
	CHECK_RC(nRetVal, "creating depth generator");

	nRetVal = image_generator.Create(context);
	CHECK_RC(nRetVal, "creating image generator");

	
	if(depth_generator.IsCapabilitySupported(XN_CAPABILITY_ALTERNATIVE_VIEW_POINT))
	{
		nRetVal = depth_generator.GetAlternativeViewPointCap().SetViewPoint(image_generator);
		CHECK_RC(nRetVal, "creating registered image/depth generator");
	}
	else
	{
		printf("WARNING: XN_CAPABILITY_ALTERNATIVE_VIEW_POINT not supported");
	}

	if (depth_generator.IsCapabilitySupported(XN_CAPABILITY_FRAME_SYNC))
	{
		if( depth_generator.GetFrameSyncCap().CanFrameSyncWith(image_generator)) {
			//nRetVal=depth.GetFrameSyncCap().FrameSyncWith(image);
			//CHECK_RC(nRetVal, "creating frame sync image/depth generator");
		}
	}
	else
	{
		printf("WARNING: XN_CAPABILITY_FRAME_SYNC not supported");
	}

	XnMapOutputMode mode = {resolutionX,resolutionY,FPS};

	nRetVal = depth_generator.SetMapOutputMode(mode);
	CHECK_RC(nRetVal, "set output mode");

	//NOT NEEDED IF SYNCHRO
	nRetVal = image_generator.SetMapOutputMode(mode);
	CHECK_RC(nRetVal, "set output mode");
	
	_depthBufferShort=new unsigned short[resolutionX*resolutionY];
	_depthBufferByte=new unsigned char[resolutionX*resolutionY];
	
	//we need to create one video stream
	_videoStreamList.push_back(new osgART::VideoStream());
	
	_videoStreamList[0]->allocateImage(resolutionX,resolutionY, 1, GL_RGB, GL_UNSIGNED_BYTE);

	//we need to create one video stream
	_videoStreamList.push_back(new osgART::VideoStream());
	
//	_videoStreamList[1]->allocateImage(resolutionX,resolutionY, 1, GL_LUMINANCE, GL_FLOAT);
	_videoStreamList[1]->allocateImage(resolutionX,resolutionY, 1, GL_LUMINANCE, GL_UNSIGNED_BYTE);
	//_videoStreamList[1]->allocateImage(resolutionX,resolutionY, 1, GL_LUMINANCE, GL_UNSIGNED_SHORT);
	//_videoStreamList[1]->allocateImage(w, h, 1, GL_DEPTHCOMPONENT16, GL_UNSIGNED_BYTE);
	
	if (m_flip_vertical) 
	{
		_videoStreamList[0]->flipVertical();
		_videoStreamList[1]->flipVertical();
	}
	if (m_flip_horizontal) 
	{
		_videoStreamList[0]->flipHorizontal();
		_videoStreamList[1]->flipHorizontal();
	}
	return true;

}
Exemple #29
0
bool OpenNIVideo::update(osg::NodeVisitor* nv) {

	//this is the main function of your video plugin
	//you can either retrieve images from your video stream/camera/file
	//or communicate with a thread to synchronize and get the data out
	
	//the most important is to synchronize your data
	//and copy the result to the VideoImageSteam used in this plugin
	//

	//0. you can collect some stats, for that you can use a timer
	osg::Timer t;

	{

	//1. mutex lock access to the image video stream
	OpenThreads::ScopedLock<OpenThreads::Mutex> _lock(this->getMutex());

	osg::notify(osg::DEBUG_INFO)<<"osgART::OpenNIVideo::update() get new image.."<<std::endl;

	XnStatus nRetVal = XN_STATUS_OK;

	nRetVal=context.WaitAndUpdateAll();
	CHECK_RC(nRetVal, "Update Data");

	xnFPSMarkFrame(&xnFPS);

	depth_generator.GetMetaData(depthMD);
	const XnDepthPixel* pDepthMap = depthMD.Data();
	//depth pixel floating point depth map.
	
	image_generator.GetMetaData(imageMD);
	const XnUInt8* pImageMap = imageMD.Data();

	// Hybrid mode isn't supported in this sample
	if (imageMD.FullXRes() != depthMD.FullXRes() || imageMD.FullYRes() != depthMD.FullYRes())
	{
		std::cerr<<"The device depth and image resolution must be equal!"<<std::endl;
		exit(1);
	}

	// RGB is the only image format supported.
	if (imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24)
	{
		std::cerr<<"The device image format must be RGB24"<<std::endl;
		exit(1);
	}
	
	const XnDepthPixel* pDepth=pDepthMap;
	const XnUInt8* pImage=pImageMap;
	
	XnDepthPixel zMax = depthMD.ZRes();
    //convert float buffer to unsigned short
	for ( unsigned int i=0; i<(depthMD.XRes() * depthMD.YRes()); ++i )
    {
            *(_depthBufferByte + i) = 255 * (float(*(pDepth + i)) / float(zMax));
    }

	memcpy(_videoStreamList[0]->data(),pImage, _videoStreamList[0]->getImageSizeInBytes());
	
	memcpy(_videoStreamList[1]->data(),_depthBufferByte, _videoStreamList[1]->getImageSizeInBytes());

	//3. don't forget to call this to notify the rest of the application
	//that you have a new video image
	_videoStreamList[0]->dirty();
	_videoStreamList[1]->dirty();
	}

	//4. hopefully report some interesting data
	if (nv) {

		const osg::FrameStamp *framestamp = nv->getFrameStamp();

		if (framestamp && _stats.valid())
		{
			_stats->setAttribute(framestamp->getFrameNumber(),
				"Capture time taken", t.time_m());
		}
	}


	// Increase modified count every X ms to ensure tracker updates
	if (updateTimer.time_m() > 50) {
		_videoStreamList[0]->dirty();
		_videoStreamList[1]->dirty();
		updateTimer.setStartTick();
	}

	return true;
}
Exemple #30
0
int main() {

    const unsigned int nBackgroundTrain = 30;
    const unsigned short touchDepthMin = 10;
    const unsigned short touchDepthMax = 20;
    const unsigned int touchMinArea = 50;

    const bool localClientMode = true; // connect to a local client

    const double debugFrameMaxDepth = 4000; // maximal distance (in millimeters) for 8 bit debug depth frame quantization
    const char* windowName = "Debug";
    const char* colorWindowName = "image";
    const Scalar debugColor0(0, 0, 128);
    const Scalar debugColor1(255, 0, 0);
    const Scalar debugColor2(255, 255, 255);
    const Scalar debugColor3(0, 255, 255);
    const Scalar debugColor4(255, 0, 255);

    int xMin = 50;
    int xMax = 550;
    int yMin = 50;
    int yMax = 300;

    Mat1s depth(480, 640); // 16 bit depth (in millimeters)
    Mat1b depth8(480, 640); // 8 bit depth
    Mat3b rgb(480, 640); // 8 bit depth

    Mat3b debug(480, 640); // debug visualization

    Mat1s foreground(640, 480);
    Mat1b foreground8(640, 480);

    Mat1b touch(640, 480); // touch mask

    Mat1s background(480, 640);
    vector<Mat1s> buffer(nBackgroundTrain);

    IplImage * image = cvCreateImage(cvSize(640, 480), 8, 3);
    IplImage * convertedImage = cvCreateImage(cvSize(640, 480), 8, 3);

    initOpenNI("niConfig.xml");

    // TUIO server object
    TuioServer* tuio;
    if (localClientMode) {
        tuio = new TuioServer();
    } else {
        tuio = new TuioServer("192.168.0.2", 3333, false);
    }
    TuioTime time;

    namedWindow(colorWindowName);
    createTrackbar("xMin", colorWindowName, &xMin, 640);
    createTrackbar("xMax", colorWindowName, &xMax, 640);
    createTrackbar("yMin", colorWindowName, &yMin, 480);
    createTrackbar("yMax", colorWindowName, &yMax, 480);
    // create some sliders
    namedWindow(windowName);
    createTrackbar("xMin", windowName, &xMin, 640);
    createTrackbar("xMax", windowName, &xMax, 640);
    createTrackbar("yMin", windowName, &yMin, 480);
    createTrackbar("yMax", windowName, &yMax, 480);

    Keyboard * piano = new Keyboard();
    (*piano).initKeyMap();

    system("qjackctl &");
    sleep(4);
    JackByTheNotes * notesJack = new JackByTheNotes();
    notesJack->connect();
    sleep(2);
    system("sudo jack_connect Piano:Rubinstein system:playback_1 &");

    map<double, timeval> keys;

    // create background model (average depth)
    for (unsigned int i = 0; i < nBackgroundTrain; i++) {
        xnContext.WaitAndUpdateAll();
        depth.data = (uchar*) xnDepthGenerator.GetDepthMap();
        buffer[i] = depth;
    }
    average(buffer, background);

    while (waitKey(1) != 27) {
        // read available data
        xnContext.WaitAndUpdateAll();

        // update 16 bit depth matrix
        depth.data = (uchar*) xnDepthGenerator.GetDepthMap();
        //xnImgeGenertor.GetGrayscale8ImageMap()

        XnRGB24Pixel* xnRgb =
            const_cast<XnRGB24Pixel*>(xnImgeGenertor.GetRGB24ImageMap());
        //		IplImage * image = cvCreateImage(cvSize(640, 480), 8, 3);
        //		IplImage * convertedImage = cvCreateImage(cvSize(640, 480), 8, 3);
        cvSetData		(image, xnRgb, 640 * 3);
        cvConvertImage(image, convertedImage, CV_CVTIMG_SWAP_RB);
        bool color = true;
        rgb = convertedImage;
        //		cvtColor(rgb,rgb,CV_RGB2BGR);
        // update rgb image
        //		rgb.data = (uchar*) xnImgeGenertor.GetRGB24ImageMap(); // segmentation fault here
        //		cvCvtColor(rgb, rgb, CV_BGR2RGB);

        // extract foreground by simple subtraction of very basic background model
        foreground = background - depth;

        // find touch mask by thresholding (points that are close to background = touch points)
        touch = (foreground > touchDepthMin) & (foreground < touchDepthMax);

        // extract ROI
        Rect roi(xMin, yMin, xMax - xMin, yMax - yMin);
        Mat touchRoi = touch(roi);

        // find touch points
        vector<vector<Point2i> > contours;
        vector<Point2f> touchPoints;
        findContours(touchRoi, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE,
                     Point2i(xMin, yMin));
        for (unsigned int i = 0; i < contours.size(); i++) {
            Mat contourMat(contours[i]);
            // find touch points by area thresholding
            if (contourArea(contourMat) > touchMinArea) {
                Scalar center = mean(contourMat);
                Point2i touchPoint(center[0], center[1]);
                touchPoints.push_back(touchPoint);
            }
        }

        // send TUIO cursors
        time = TuioTime::getSessionTime();
        tuio->initFrame(time);

        for (unsigned int i = 0; i < touchPoints.size(); i++) { // touch points
            float cursorX = (touchPoints[i].x - xMin) / (xMax - xMin);
            float cursorY = 1 - (touchPoints[i].y - yMin) / (yMax - yMin);
            TuioCursor* cursor = tuio->getClosestTuioCursor(cursorX, cursorY);
            // TODO improve tracking (don't move cursors away, that might be closer to another touch point)
            if (cursor == NULL || cursor->getTuioTime() == time) {
                tuio->addTuioCursor(cursorX, cursorY);
            } else {
                tuio->updateTuioCursor(cursor, cursorX, cursorY);
            }
        }

        tuio->stopUntouchedMovingCursors();
        tuio->removeUntouchedStoppedCursors();
        tuio->commitFrame();

        // draw debug frame
        depth.convertTo(depth8, CV_8U, 255 / debugFrameMaxDepth); // render depth to debug frame
        cvtColor(depth8, debug, CV_GRAY2BGR);
        debug.setTo(debugColor0, touch); // touch mask
        rectangle(debug, roi, debugColor1, 2); // surface boundaries
        if (color)
            rectangle(rgb, roi, debugColor1, 2); // surface boundaries

        // draw 10 white keys within the roi
        int stride = (xMax - xMin) / 10;
        for (int keys = 1; keys < 10; keys++) {
            Point lower(xMin + keys * stride, yMax);
            if (keys == 3 || keys == 7) {
                Point upper(xMin + keys * stride, yMin);
                line(debug, upper, lower, debugColor3, 2, 0);
                if (color)
                    line(rgb, upper, lower, debugColor3, 2, 0);
                continue;
            } else {
                Point upper(xMin + keys * stride, (yMin + yMax) / 2);
                line(debug, upper, lower, debugColor3, 2, 0);
                if (color)
                    line(rgb, upper, lower, debugColor3, 2, 0);
            }
            Point blkUpper(xMin + keys * stride - stride / 3, yMin);
            Point blkLower(xMin + keys * stride + stride / 3,
                           (yMin + yMax) / 2);
            rectangle(debug, blkUpper, blkLower, debugColor4, 2);
            if (color)
                rectangle(rgb, blkUpper, blkLower, debugColor4, 2);
        }

        for (unsigned int i = 0; i < touchPoints.size(); i++) { // touch points
            circle(debug, touchPoints[i], 5, debugColor2, CV_FILLED);
            if (color)
                circle(rgb, touchPoints[i], 5, debugColor2, CV_FILLED);
            double frequency = piano->keyFrequency(touchPoints[i].y - 50,
                                                   touchPoints[i].x - 50);

            cout << frequency << " " << touchPoints[i].y - 50 << " "
                 << touchPoints[i].x - 50 << endl;

            if (keys.find(frequency) == keys.end()) {
                Note * note = new Note(frequency, 2, 4000);
                notesJack->playNote(*note);
                timeval now;
                gettimeofday(&now, NULL);
                keys[frequency] = now;
            } else {
                timeval now;
                gettimeofday(&now, NULL);
                if ((now.tv_sec - keys[frequency].tv_sec) * 1000
                        + (now.tv_usec - keys[frequency].tv_usec) / 1000
                        > 1000) {
                    Note * note = new Note(frequency, 2, 4000);
                    notesJack->playNote(*note);
                    keys[frequency] = now;
                }
            }
        }
        // render debug frame (with sliders)
//		IplImage grayScale = debug;
//		cvFlip(&grayScale, NULL, 1);
//		Mat gray(&grayScale);
//		imshow(windowName, gray);
//
//		IplImage colorful = rgb;
//		cvFlip(&colorful, NULL, 1);
//		Mat real(&colorful);
//		imshow("image", real);
        imshow(windowName, debug);
        imshow("image", rgb);
        //		cvShowImage("image", image);
    }

    return 0;
}