Exemplo n.º 1
0
bool CvCapture_OpenNI::setDepthGeneratorProperty( int propIdx, double propValue )
{
    bool isSet = false;

    CV_Assert( depthGenerator.IsValid() );

    switch( propIdx )
    {
    case CV_CAP_PROP_OPENNI_REGISTRATION:
        {
            if( propValue != 0.0 ) // "on"
            {
                // if there isn't image generator (i.e. ASUS XtionPro doesn't have it)
                // then the property isn't avaliable
                if( imageGenerator.IsValid() )
                {
                    if( !depthGenerator.GetAlternativeViewPointCap().IsViewPointAs(imageGenerator) )
                    {
                        if( depthGenerator.GetAlternativeViewPointCap().IsViewPointSupported(imageGenerator) )
                        {
                            XnStatus status = depthGenerator.GetAlternativeViewPointCap().SetViewPoint(imageGenerator);
                            if( status != XN_STATUS_OK )
                                std::cerr << "CvCapture_OpenNI::setDepthGeneratorProperty : " << xnGetStatusString(status) << std::endl;
                            else
                                isSet = true;
                        }
                        else
                            std::cerr << "CvCapture_OpenNI::setDepthGeneratorProperty : Unsupported viewpoint." << std::endl;
                    }
                    else
                        isSet = true;
                }
            }
            else // "off"
            {
                XnStatus status = depthGenerator.GetAlternativeViewPointCap().ResetViewPoint();
                if( status != XN_STATUS_OK )
                    std::cerr << "CvCapture_OpenNI::setDepthGeneratorProperty : " << xnGetStatusString(status) << std::endl;
                else
                    isSet = true;
            }
        }
        break;
    default:
    {
        std::stringstream ss;
        ss << "Depth generator does not support such parameter (propIdx=" << propIdx << ") for setting.\n";
        CV_Error( CV_StsBadArg, ss.str().c_str() );
    }
    }

    return isSet;
}
Exemplo n.º 2
0
void changeRegistration(int nValue)
{
	if (!g_DepthGenerator.IsValid() || !g_DepthGenerator.IsCapabilitySupported(XN_CAPABILITY_ALTERNATIVE_VIEW_POINT))
	{
		return;
	}

	if(!nValue)
	{
		g_DepthGenerator.GetAlternativeViewPointCap().ResetViewPoint();
	}
	else if (g_ImageGenerator.IsValid())
	{
		g_DepthGenerator.GetAlternativeViewPointCap().SetViewPoint(g_ImageGenerator);
	}
}
Exemplo n.º 3
0
XnStatus ConfigureGenerators(const RecConfiguration& config, xn::Context& context, xn::DepthGenerator& depthGenerator, xn::ImageGenerator& imageGenerator)
{
	XnStatus nRetVal = XN_STATUS_OK;
	xn::EnumerationErrors errors;

	// Configure the depth, if needed
	if (config.bRecordDepth)
	{
		nRetVal = context.CreateAnyProductionTree(XN_NODE_TYPE_DEPTH, NULL, depthGenerator, &errors);
		CHECK_RC_ERR(nRetVal, "Create Depth", errors);
		nRetVal = depthGenerator.SetMapOutputMode(*config.pDepthMode);
		CHECK_RC(nRetVal, "Set Mode");
		if (config.bMirrorIndicated && depthGenerator.IsCapabilitySupported(XN_CAPABILITY_MIRROR))
		{
			depthGenerator.GetMirrorCap().SetMirror(config.bMirror);
		}

		// Set Hole Filter
		depthGenerator.SetIntProperty("HoleFilter", TRUE);
	}
	// Configure the image, if needed
	if (config.bRecordImage)
	{
		nRetVal = context.CreateAnyProductionTree(XN_NODE_TYPE_IMAGE, NULL, imageGenerator, &errors);
		CHECK_RC_ERR(nRetVal, "Create Image", errors);
		nRetVal = imageGenerator.SetMapOutputMode(*config.pImageMode);
		CHECK_RC(nRetVal, "Set Mode");

		if (config.bMirrorIndicated && imageGenerator.IsCapabilitySupported(XN_CAPABILITY_MIRROR))
		{
			imageGenerator.GetMirrorCap().SetMirror(config.bMirror);
		}
	}

	// Configuration for when there are both streams
	if (config.bRecordDepth && config.bRecordImage)
	{
		// Registration
		if (config.bRegister && depthGenerator.IsCapabilitySupported(XN_CAPABILITY_ALTERNATIVE_VIEW_POINT))
		{
			nRetVal = depthGenerator.GetAlternativeViewPointCap().SetViewPoint(imageGenerator);
			CHECK_RC(nRetVal, "Registration");
		}
		// Frame Sync
		if (config.bFrameSync && depthGenerator.IsCapabilitySupported(XN_CAPABILITY_FRAME_SYNC))
		{
			if (depthGenerator.GetFrameSyncCap().CanFrameSyncWith(imageGenerator))
			{
				nRetVal = depthGenerator.GetFrameSyncCap().FrameSyncWith(imageGenerator);
				CHECK_RC(nRetVal, "Frame sync");
			}
		}
	}

	return XN_STATUS_OK;
}
Exemplo n.º 4
0
double CvCapture_OpenNI::getDepthGeneratorProperty( int propIdx )
{
    double propValue = 0;
    if( !depthGenerator.IsValid() )
        return propValue;

    XnMapOutputMode mode;

    switch( propIdx )
    {
    case CV_CAP_PROP_OPENNI_GENERATOR_PRESENT :
        CV_DbgAssert( depthGenerator.IsValid() );
        propValue = 1.;
        break;
    case CV_CAP_PROP_FRAME_WIDTH :
        if( depthGenerator.GetMapOutputMode(mode) == XN_STATUS_OK )
            propValue = mode.nXRes;
        break;
    case CV_CAP_PROP_FRAME_HEIGHT :
        if( depthGenerator.GetMapOutputMode(mode) == XN_STATUS_OK )
            propValue = mode.nYRes;
        break;
    case CV_CAP_PROP_FPS :
        if( depthGenerator.GetMapOutputMode(mode) == XN_STATUS_OK )
            propValue = mode.nFPS;
        break;
    case CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH :
        propValue = depthGenerator.GetDeviceMaxDepth();
        break;
    case CV_CAP_PROP_OPENNI_BASELINE :
        propValue = baseline;
        break;
    case CV_CAP_PROP_OPENNI_FOCAL_LENGTH :
        propValue = (double)depthFocalLength_VGA;
        break;
    case CV_CAP_PROP_OPENNI_REGISTRATION :
        propValue = depthGenerator.GetAlternativeViewPointCap().IsViewPointAs(imageGenerator) ? 1.0 : 0.0;
        break;
    case CV_CAP_PROP_POS_MSEC :
        propValue = depthGenerator.GetTimestamp();
        break;
    case CV_CAP_PROP_POS_FRAMES :
        propValue = depthGenerator.GetFrameID();
        break;
    default :
    {
        std::stringstream ss;
        ss << "Depth generator does not support such parameter (propIdx=" << propIdx << ") for getting.\n";
        CV_Error( CV_StsBadArg, ss.str().c_str() );
    }
    }

    return propValue;
}
void InitialKinect()
{
	    //1. Initial Context
		mContext.Init();

		// 2. Set Map_mode (建構函數已定義)
		mapMode.nXRes = 640;
		mapMode.nYRes = 480;
		mapMode.nFPS = 30;

		// 3.a Create Depth_Generator
		mDepthGenerator.Create( mContext );
		mDepthGenerator.SetMapOutputMode( mapMode );

		// 3.b Create Image_Generator
		mImageGenerator.Create( mContext );
		mImageGenerator.SetMapOutputMode( mapMode );

		 // 4. Correct view port
		mDepthGenerator.GetAlternativeViewPointCap().SetViewPoint( mImageGenerator );
		std::cout << "正確:KinectSensor Intial Correct!" << std::endl;
}
Exemplo n.º 6
0
bool DataCapture::initialise()
{
    context_.Shutdown();

    XnStatus rc = context_.Init(); 
    if( rc != XN_STATUS_OK )
    {
        std::cout << "Init: " << xnGetStatusString(rc) << std::endl;
        return false;
    }

    rc = depthGen_.Create(context_);
    if( rc != XN_STATUS_OK )
    {
        std::cout << "depthGen.Create: " << xnGetStatusString(rc) << std::endl;
        return false;
    }

    rc = imageGen_.Create(context_);
    if( rc != XN_STATUS_OK )
    {
        std::cout << "imageGen.Create: " << xnGetStatusString(rc) << std::endl;
        return false;
    }

    rc = imageGen_.SetPixelFormat(XN_PIXEL_FORMAT_RGB24);
    if( rc != XN_STATUS_OK )
    {
        std::cout << "SetPixelFormat: " << xnGetStatusString(rc) << std::endl;
        return false;
    }

    XnMapOutputMode imgMode;
    imgMode.nXRes = 640; // XN_VGA_X_RES
    imgMode.nYRes = 480; // XN_VGA_Y_RES
    imgMode.nFPS = 30;
    rc = imageGen_.SetMapOutputMode(imgMode);
    if( rc != XN_STATUS_OK )
    {
        std::cout << "image SetMapOutputMode: " << xnGetStatusString(rc) << std::endl;
        return false;
    }

    rc = depthGen_.SetMapOutputMode(imgMode);
    if( rc != XN_STATUS_OK )
    {
        std::cout << "depth SetMapOutputMode: " << xnGetStatusString(rc) << std::endl;
        return false;
    }

    depthGen_.GetMetaData(depthMd_);
    std::cout << "Depth offset " << depthMd_.XOffset() << " " << depthMd_.YOffset() << std::endl;

    // set the depth image viewpoint
    depthGen_.GetAlternativeViewPointCap().SetViewPoint(imageGen_);

    // read off the depth camera field of view.  This is the FOV corresponding to
    // the IR camera viewpoint, regardless of the alternative viewpoint settings.
    XnFieldOfView fov;
    rc = depthGen_.GetFieldOfView(fov);
    std::cout << "Fov: " << fov.fHFOV << " " << fov.fVFOV << std::endl;

    pDepthData_ = new char [640 * 480];
    pRgbData_ = new char [640 * 480 * 3];

    return true;
}
Exemplo n.º 7
0
int main(int argc, char **argv)
{
	XnStatus rc = XN_STATUS_OK;

	rc = g_Context.InitFromXmlFile(SAMPLE_XML_PATH);
	CHECK_RC(rc, "InitFromXml");

	rc = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator);
	CHECK_RC(rc, "Find depth generator");
	rc = g_Context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_ImageGenerator);
	CHECK_RC(rc, "Find Image generator");
	rc = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator);
	CHECK_RC(rc, "Find user generator");

	// Check if Registration is done for Depth and RGB Images - Brandyn, Sravanthi
	g_DepthGenerator.GetAlternativeViewPointCap().SetViewPoint(g_ImageGenerator);
	//g_DepthGenerator.GetAlternativeViewPointCap().ResetViewPoint();

	if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON) ||
		!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION))
	{
		printf("User generator doesn't support either skeleton or pose detection.\n");
		return XN_STATUS_ERROR;
	}

	g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL);

	rc = g_Context.StartGeneratingAll();
	CHECK_RC(rc, "StartGenerating");

	XnCallbackHandle hUserCBs, hCalibrationCBs, hPoseCBs;
	g_UserGenerator.RegisterUserCallbacks(NewUser, LostUser, NULL, hUserCBs);
	g_UserGenerator.GetSkeletonCap().RegisterCalibrationCallbacks(CalibrationStarted, CalibrationEnded, NULL, hCalibrationCBs);
	g_UserGenerator.GetPoseDetectionCap().RegisterToPoseCallbacks(PoseDetected, NULL, NULL, hPoseCBs);


	#ifdef USE_GLUT
	
	// Check if Registration is done for Depth and RGB Images - Brandyn, Sravanthi
	g_DepthGenerator.GetAlternativeViewPointCap().SetViewPoint(g_ImageGenerator);
	//g_DepthGenerator.GetAlternativeViewPointCap().ResetViewPoint();

	glInit(&argc, argv);
	glutMainLoop();

	#else

	if (!opengles_init(GL_WIN_SIZE_X, GL_WIN_SIZE_Y, &display, &surface, &context))
	{
		printf("Error initing opengles\n");
		CleanupExit();
	}

	glDisable(GL_DEPTH_TEST);
//	glEnable(GL_TEXTURE_2D);
	glEnableClientState(GL_VERTEX_ARRAY);
	glDisableClientState(GL_COLOR_ARRAY);

	while ((!_kbhit()) && (!g_bQuit))
	{
		glutDisplay();
		eglSwapBuffers(display, surface);
	}

	opengles_shutdown(display, surface, context);

	CleanupExit();

	#endif
}
Exemplo n.º 8
0
// this function is called each frame
void glutDisplay (void)
{

	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Setup the OpenGL viewpoint
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();

	// Check if Registration is done for Depth and RGB Images - Brandyn, Sravanthi
	g_DepthGenerator.GetAlternativeViewPointCap().SetViewPoint(g_ImageGenerator);
//	g_DepthGenerator.GetAlternativeViewPointCap().ResetViewPoint();

	xn::SceneMetaData sceneMD;
	xn::DepthMetaData depthMD;
	xn::ImageMetaData imageMD;
	g_DepthGenerator.GetMetaData(depthMD);
	g_ImageGenerator.GetMetaData(imageMD);
	#ifdef USE_GLUT
	glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
	#else
	glOrthof(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
	#endif

	glDisable(GL_TEXTURE_2D);

	if (!g_bPause)
	{
		// Read next available data
		g_Context.WaitAndUpdateAll();
	}

		// Process the data
		//DRAW
		// Check if Registration is done for Depth and RGB Images - Brandyn, Sravanthi
		g_DepthGenerator.GetAlternativeViewPointCap().SetViewPoint(g_ImageGenerator);
	//	g_DepthGenerator.GetAlternativeViewPointCap().ResetViewPoint();

		g_DepthGenerator.GetMetaData(depthMD);
		g_ImageGenerator.GetMetaData(imageMD);
		g_UserGenerator.GetUserPixels(0, sceneMD);


		DrawDepthMap(depthMD, imageMD, sceneMD, g_nPlayer);

		if (g_nPlayer != 0)
		{
			XnPoint3D com;
			g_UserGenerator.GetCoM(g_nPlayer, com);
			if (com.Z == 0)
			{
				g_nPlayer = 0;
				FindPlayer();
			}
		}

	#ifdef USE_GLUT
	glutSwapBuffers();
	#endif
}
Exemplo n.º 9
0
bool OpenNIVideo::init() {

	//open the video 
	//if you are using a device, you can open the device
	//if you are using video streaming, you can initialize the connection
	//if you are using video files, you can read the configuration, cache the data, etc.

	xn::EnumerationErrors errors;

	int resolutionX = 640;
	int resolutionY = 480;
	unsigned int FPS = 30;

	XnStatus nRetVal = XN_STATUS_OK;

	nRetVal = context.Init();
	CHECK_RC(nRetVal, "context global init");

	//xn::NodeInfoList list;
	//nRetVal = context.EnumerateProductionTrees(XN_NODE_TYPE_DEVICE, NULL, list, &errors);
	//CHECK_RC(nRetVal, "enumerate production tree");
	
	// HandsGenerator hands;
	//UserGenerator user;
	//GestureGenerator gesture;
	//SceneAnalyzer scene;

	nRetVal = depth_generator.Create(context);
	CHECK_RC(nRetVal, "creating depth generator");

	nRetVal = image_generator.Create(context);
	CHECK_RC(nRetVal, "creating image generator");

	
	if(depth_generator.IsCapabilitySupported(XN_CAPABILITY_ALTERNATIVE_VIEW_POINT))
	{
		nRetVal = depth_generator.GetAlternativeViewPointCap().SetViewPoint(image_generator);
		CHECK_RC(nRetVal, "creating registered image/depth generator");
	}
	else
	{
		printf("WARNING: XN_CAPABILITY_ALTERNATIVE_VIEW_POINT not supported");
	}

	if (depth_generator.IsCapabilitySupported(XN_CAPABILITY_FRAME_SYNC))
	{
		if( depth_generator.GetFrameSyncCap().CanFrameSyncWith(image_generator)) {
			//nRetVal=depth.GetFrameSyncCap().FrameSyncWith(image);
			//CHECK_RC(nRetVal, "creating frame sync image/depth generator");
		}
	}
	else
	{
		printf("WARNING: XN_CAPABILITY_FRAME_SYNC not supported");
	}

	XnMapOutputMode mode = {resolutionX,resolutionY,FPS};

	nRetVal = depth_generator.SetMapOutputMode(mode);
	CHECK_RC(nRetVal, "set output mode");

	//NOT NEEDED IF SYNCHRO
	nRetVal = image_generator.SetMapOutputMode(mode);
	CHECK_RC(nRetVal, "set output mode");
	
	_depthBufferShort=new unsigned short[resolutionX*resolutionY];
	_depthBufferByte=new unsigned char[resolutionX*resolutionY];
	
	//we need to create one video stream
	_videoStreamList.push_back(new osgART::VideoStream());
	
	_videoStreamList[0]->allocateImage(resolutionX,resolutionY, 1, GL_RGB, GL_UNSIGNED_BYTE);

	//we need to create one video stream
	_videoStreamList.push_back(new osgART::VideoStream());
	
//	_videoStreamList[1]->allocateImage(resolutionX,resolutionY, 1, GL_LUMINANCE, GL_FLOAT);
	_videoStreamList[1]->allocateImage(resolutionX,resolutionY, 1, GL_LUMINANCE, GL_UNSIGNED_BYTE);
	//_videoStreamList[1]->allocateImage(resolutionX,resolutionY, 1, GL_LUMINANCE, GL_UNSIGNED_SHORT);
	//_videoStreamList[1]->allocateImage(w, h, 1, GL_DEPTHCOMPONENT16, GL_UNSIGNED_BYTE);
	
	if (m_flip_vertical) 
	{
		_videoStreamList[0]->flipVertical();
		_videoStreamList[1]->flipVertical();
	}
	if (m_flip_horizontal) 
	{
		_videoStreamList[0]->flipHorizontal();
		_videoStreamList[1]->flipHorizontal();
	}
	return true;

}