Ejemplo n.º 1
0
bool SetupDepth(xn::Context& g_context)
{
	XnStatus nRetVal = XN_STATUS_OK;

	fprintf(stderr,"Setting up the depth generator\n");

	if ((nRetVal = g_depth.Create(g_context))!= XN_STATUS_OK)
	{
		printf("Could not create depth generator: %s\n", xnGetStatusString(nRetVal));
		return FALSE;
	}


/*
	if ((nRetVal = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth)) != XN_STATUS_OK)
	{
		fprintf(stderr,"Could not find depth sensor: %s\n", xnGetStatusString(nRetVal));
		return FALSE;
	}
	*/

	XnMapOutputMode mapMode;
	mapMode.nXRes = XN_VGA_X_RES;
	mapMode.nYRes = XN_VGA_Y_RES;
	mapMode.nFPS = 30;
	if ((nRetVal = g_depth.SetMapOutputMode(mapMode)) != XN_STATUS_OK)
	{
		fprintf(stderr,"Could not set depth mode: %s\n", xnGetStatusString(nRetVal));
		return FALSE;
	}

	g_depth.GetMetaData(g_depthMD);
	g_depthWidth  = g_depthMD.FullXRes();
	g_depthHeight = g_depthMD.FullYRes();	
	return TRUE;
}
Ejemplo n.º 2
0
int main ( int argc, char ** argv )
{
	// 
	// Initializing Calibration Related
	// 

	// ARTagHelper artagHelper	( colorImgWidth, colorImgHeight, ARTAG_CONFIG_FILE,		ARTAG_POS_FILE );
	ARTagHelper artagHelper		( colorImgWidth, colorImgHeight, ARTAG_CONFIG_A3_FILE,	ARTAG_POS_A3_FILE );

	ExtrCalibrator extrCalibrator ( 6, KINECT_INTR_FILE, KINECT_DIST_FILE );
	// unsigned char * kinectImgBuf = new unsigned char[colorImgWidth * colorImgHeight * 3];

	// 
	// Initializing OpenNI Settings
	// 

	int ctlWndKey = -1;

	XnStatus nRetVal = XN_STATUS_OK;
	xn::EnumerationErrors errors;

	// 
	// Initialize Context Object
	// 
	
	nRetVal = g_Context.InitFromXmlFile ( CONFIG_XML_PATH, g_ScriptNode, &errors );
	if ( nRetVal == XN_STATUS_NO_NODE_PRESENT ) 
	{
		XnChar strError[1024];
		errors.ToString ( strError, 1024 );
		printf ( "XN_STATUS_NO_NODE_PRESENT:\n%s\n", strError );
		
		system ( "pause" );
		return ( nRetVal );
	}
	else if ( nRetVal != XN_STATUS_OK )
	{
		printf ( "Open FAILED:\n%s\n", xnGetStatusString ( nRetVal ) );	
	
		system ( "pause" );
		return ( nRetVal );
	}

	// 
	// Handle the Depth Generator Node.
	// 
	
	nRetVal = g_Context.FindExistingNode ( XN_NODE_TYPE_DEPTH, g_DepthGen );
	if ( nRetVal != XN_STATUS_OK )
	{
		printf ( "No Depth Node Exists! Please Check your XML.\n" );
		return ( nRetVal );
	}
	
	// 
	// Handle the Image Generator node
	// 
	
	nRetVal = g_Context.FindExistingNode ( XN_NODE_TYPE_IMAGE, g_ImageGen );
	if ( nRetVal != XN_STATUS_OK )
	{
		printf ( "No Image Node Exists! Please Check your XML.\n" );
		return ( nRetVal );
	}

	// g_DepthGen.GetAlternativeViewPointCap().SetViewPoint( g_ImageGen );

	g_DepthGen.GetMetaData ( g_DepthMD );
	g_ImageGen.GetMetaData ( g_ImageMD );

	assert ( g_ImageMD.PixelFormat() == XN_PIXEL_FORMAT_RGB24 );
	assert ( g_DepthMD.PixelFormat() == XN_PIXEL_FORMAT_GRAYSCALE_16_BIT );

	// 
	// Create OpenCV Showing Window and Related Data Structures
	// 

	cv::namedWindow ( IMAGE_WIN_NAME, CV_WINDOW_AUTOSIZE );
	cv::namedWindow ( DEPTH_WIN_NAME, CV_WINDOW_AUTOSIZE );

	cv::Mat depthImgMat  ( g_DepthMD.YRes(), g_DepthMD.XRes(), CV_16UC1 );
	cv::Mat depthImgShow ( g_DepthMD.YRes(), g_DepthMD.XRes(), CV_8UC3  );
	cv::Mat colorImgMat  ( g_ImageMD.YRes(), g_ImageMD.XRes(), CV_8UC3 );

#define ARTAG_DEBUG

#ifdef ARTAG_DEBUG
	
	cv::setMouseCallback ( IMAGE_WIN_NAME, ClickOnMouse, 0 );

#endif

	bool flipColor = true;

	// 
	// Start to Loop
	// 

	while ( ctlWndKey != ESC_KEY_VALUE ) 
	{
		// 
		// Try to Get New Frame From Kinect
		// 
	
		nRetVal = g_Context.WaitAnyUpdateAll ();

		g_DepthGen.GetMetaData ( g_DepthMD );
		g_ImageGen.GetMetaData ( g_ImageMD );

		assert ( g_DepthMD.FullXRes() == g_DepthMD.XRes() && g_DepthMD.FullYRes() == g_DepthMD.YRes() );
		assert ( g_ImageMD.FullXRes() == g_ImageMD.XRes() && g_ImageMD.FullYRes() == g_ImageMD.YRes() );

		GlobalUtility::CopyColorRawBufToCvMat8uc3 ( (const XnRGB24Pixel *)(g_ImageMD.Data()), colorImgMat );

#ifdef SHOW_DEPTH_WINDOW

		GlobalUtility::CopyDepthRawBufToCvMat16u  ( (const XnDepthPixel *)(g_DepthMD.Data()), depthImgMat );
		// GlobalUtility::ConvertDepthCvMat16uToYellowCvMat ( depthImgMat, depthImgShow );
		GlobalUtility::ConvertDepthCvMat16uToGrayCvMat ( depthImgMat, depthImgShow );

		cv::imshow ( DEPTH_WIN_NAME, depthImgShow );

#endif

		ctlWndKey = cvWaitKey ( 15 );

		if ( ctlWndKey == 'f' || ctlWndKey == 'F' ) 
		{
			artagHelper.Clear();

			artagHelper.FindMarkerCorners ( (unsigned char *)(g_ImageMD.Data()) );
			artagHelper.PrintMarkerCornersPos2dInCam ();
			extrCalibrator.ExtrCalib ( artagHelper );

			std::cout << "\nKinect Extr Matrix:" << std::endl;
			extrCalibrator.PrintMatrix ( extrCalibrator.GetMatrix ( ExtrCalibrator::EXTR ) );

			std::cout	<< "Reprojection ERROR = " 
						<< extrCalibrator.ComputeReprojectionErr ( artagHelper ) << std::endl
						// << extrCalibrator.ComputeReprojectionErr ( artagHelper.m_MarkerCornerPosCam2d, artagHelper.m_MarkerCornerPos3d, 24 ) << std::endl
						<< "Valid Marker Number = " << artagHelper.GetValidMarkerNumber() << std::endl
						<< std::endl;

			extrCalibrator.SaveMatrix ( ExtrCalibrator::EXTR, KINECT_EXTR_FILE );
		}
		if ( ctlWndKey == 's' || ctlWndKey == 'S' )
		{
			flipColor = !flipColor;
		}

		if ( flipColor ) { 
			cv::cvtColor ( colorImgMat, colorImgMat, CV_RGB2BGR );
		}

		artagHelper.DrawMarkersInCameraImage ( colorImgMat );
		cv::imshow ( IMAGE_WIN_NAME, colorImgMat );
	}

	g_Context.Release ();
	
	system	( "pause" );
	exit	( EXIT_SUCCESS );

}
// Save new data from OpenNI
void MovieMng::Update(const xn::DepthMetaData& dmd, const xn::ImageMetaData& imd)
{
	// バッファ空き確認
	if((m_nBufferSizeFile == 0) || (m_nBufferSizeSend == 0))
	{
		printf("バッファ空きなし!! ForFile:%d ForSend:%d\n", m_nBufferSizeFile, m_nBufferSizeSend);
		return;
	}

	SingleFrame* pFrames = m_pFrames + m_nNextWrite;

	if(m_nNextWrite == 0)
	{
		m_nImageHeight = imd.FullYRes();
		m_nImageWidth = imd.FullXRes();
		m_nDepthHeight = dmd.FullYRes();
		m_nDepthWidth = dmd.FullXRes();

		printf("Image Height:%d Width:%d  Depth Height:%d Width:%d\n", 
			m_nImageHeight, m_nImageWidth, m_nDepthHeight, m_nDepthWidth);
	}
#ifdef LOG_WRITE_ENABLE
	fprintf(mmng_fp, "[%s] update. m_nNextWrite:%d\n", __FUNCTION__, m_nNextWrite);
#endif
	if (m_bDepth)
	{
		pFrames->depthFrame.CopyFrom(dmd);
	}
	
	if (m_bImage)
	{
		pFrames->imageFrame.CopyFrom(imd);
	}

	// バッファ管理情報書き換え
	if (m_bFileWrite)
	{
		EnterCriticalSection(&csFileWriter);
		m_nDataCountFile++;
		m_nBufferSizeFile--;
	}

	if (m_bSendDepth)
	{
		EnterCriticalSection(&csDepthSender);
		m_nDataCountSend++;
		m_nBufferSizeSend--;
	}
	
	m_nNextWrite++;
	if (m_nBufferSize == m_nNextWrite)
	{
		m_nNextWrite = 0;
	}

	if (m_bFileWrite)
	{
		LeaveCriticalSection(&csFileWriter);
	}
	
	if (m_bSendDepth)
	{
		LeaveCriticalSection(&csDepthSender);
	}
}
Ejemplo n.º 4
0
bool OpenNIVideo::update(osg::NodeVisitor* nv) {

	//this is the main function of your video plugin
	//you can either retrieve images from your video stream/camera/file
	//or communicate with a thread to synchronize and get the data out
	
	//the most important is to synchronize your data
	//and copy the result to the VideoImageSteam used in this plugin
	//

	//0. you can collect some stats, for that you can use a timer
	osg::Timer t;

	{

	//1. mutex lock access to the image video stream
	OpenThreads::ScopedLock<OpenThreads::Mutex> _lock(this->getMutex());

	osg::notify(osg::DEBUG_INFO)<<"osgART::OpenNIVideo::update() get new image.."<<std::endl;

	XnStatus nRetVal = XN_STATUS_OK;

	nRetVal=context.WaitAndUpdateAll();
	CHECK_RC(nRetVal, "Update Data");

	xnFPSMarkFrame(&xnFPS);

	depth_generator.GetMetaData(depthMD);
	const XnDepthPixel* pDepthMap = depthMD.Data();
	//depth pixel floating point depth map.
	
	image_generator.GetMetaData(imageMD);
	const XnUInt8* pImageMap = imageMD.Data();

	// Hybrid mode isn't supported in this sample
	if (imageMD.FullXRes() != depthMD.FullXRes() || imageMD.FullYRes() != depthMD.FullYRes())
	{
		std::cerr<<"The device depth and image resolution must be equal!"<<std::endl;
		exit(1);
	}

	// RGB is the only image format supported.
	if (imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24)
	{
		std::cerr<<"The device image format must be RGB24"<<std::endl;
		exit(1);
	}
	
	const XnDepthPixel* pDepth=pDepthMap;
	const XnUInt8* pImage=pImageMap;
	
	XnDepthPixel zMax = depthMD.ZRes();
    //convert float buffer to unsigned short
	for ( unsigned int i=0; i<(depthMD.XRes() * depthMD.YRes()); ++i )
    {
            *(_depthBufferByte + i) = 255 * (float(*(pDepth + i)) / float(zMax));
    }

	memcpy(_videoStreamList[0]->data(),pImage, _videoStreamList[0]->getImageSizeInBytes());
	
	memcpy(_videoStreamList[1]->data(),_depthBufferByte, _videoStreamList[1]->getImageSizeInBytes());

	//3. don't forget to call this to notify the rest of the application
	//that you have a new video image
	_videoStreamList[0]->dirty();
	_videoStreamList[1]->dirty();
	}

	//4. hopefully report some interesting data
	if (nv) {

		const osg::FrameStamp *framestamp = nv->getFrameStamp();

		if (framestamp && _stats.valid())
		{
			_stats->setAttribute(framestamp->getFrameNumber(),
				"Capture time taken", t.time_m());
		}
	}


	// Increase modified count every X ms to ensure tracker updates
	if (updateTimer.time_m() > 50) {
		_videoStreamList[0]->dirty();
		_videoStreamList[1]->dirty();
		updateTimer.setStartTick();
	}

	return true;
}
Ejemplo n.º 5
0
bool SetupPrimesense(void)
{
	XnStatus nRetVal = XN_STATUS_OK;

	if ((nRetVal = g_context.Init()) != XN_STATUS_OK)
	{
		fprintf(stderr,"Could not init OpenNI context: %s\n", xnGetStatusString(nRetVal));
		return FALSE;
	}

	if ((nRetVal = g_depth.Create(g_context))!= XN_STATUS_OK)
	{
		fprintf(stderr,"Could not create depth generator: %s\n", xnGetStatusString(nRetVal));
		g_haveDepth = FALSE;
	}
	else if ((nRetVal = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth)) != XN_STATUS_OK)
	{
		fprintf(stderr,"Could not find depth sensor: %s\n", xnGetStatusString(nRetVal));
		g_haveDepth = FALSE;
	}

	if ((nRetVal = g_image.Create(g_context))!= XN_STATUS_OK)
	{
		fprintf(stderr,"Could not create image generator: %s\n", xnGetStatusString(nRetVal));
		g_haveImage = FALSE;
	}
	else if ((nRetVal = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image)) != XN_STATUS_OK)
	{
		fprintf(stderr,"Could not find image sensor: %s\n", xnGetStatusString(nRetVal));
		g_haveImage = FALSE;
	}

	if (!g_haveImage &&  !g_haveDepth)
	{
		fprintf(stderr,"Could not find either depth or image sources.\n");
		return FALSE;
	}

	XnMapOutputMode mapMode;
	mapMode.nXRes = XN_VGA_X_RES;
	mapMode.nYRes = XN_VGA_Y_RES;
	mapMode.nFPS = 30;
	if (g_haveDepth && ( (nRetVal = g_depth.SetMapOutputMode(mapMode)) != XN_STATUS_OK))
	{
		fprintf(stderr,"Could not set depth mode: %s\n", xnGetStatusString(nRetVal));
		return FALSE;
	}

	if (g_haveDepth)
	{

		g_depth.GetMetaData(g_depthMD);
		g_depthWidth  = g_depthMD.FullXRes();
		g_depthHeight = g_depthMD.FullYRes();	

	}


	if (g_haveImage &&  (nRetVal = g_image.SetMapOutputMode(mapMode)) != XN_STATUS_OK)
	{
		fprintf(stderr,"Could not set image: %s\n", xnGetStatusString(nRetVal));
		return FALSE;
	}

	
	if ((nRetVal = g_context.StartGeneratingAll()) != XN_STATUS_OK)
	{
		fprintf(stderr,"Could not start: %s\n", xnGetStatusString(nRetVal));
		return FALSE;
	}
	return TRUE;
}