コード例 #1
0
void OpencvModule::DrawEdges(ImageMetaData& g_imageMD){


    int key=0;


    //for opencv Mat, accessing buffer
    Mat rgb(480,640,CV_8UC3,(uchar*)g_imageMD.WritableData());
    cvtColor(rgb,gray,CV_RGB2GRAY);



    //EdgesRgb
    Canny(gray,grayedge,fThresCanny1,fThresCanny2);
    cvtColor(grayedge,rgbedge,CV_GRAY2BGR);


    float aux=((float)g_imageMD.Timestamp())/1E6;
    QVariant time_double(aux);


    putText(rgbedge,"Time:", cvPoint(460,30),5,1,cvScalar(255, 255, 255, 0),1,1);
    putText(rgbedge,time_double.toString().toStdString(), cvPoint(535,30),6,0.6,cvScalar(255, 255, 255, 0),1,1);

    imshow("Caremedia Kinect Viewer",rgbedge);



    key = waitKey(5);


}
コード例 #2
0
ImageResource::ImageResource(const ds::Resource& res, const int flags)
		: mResource(res)
		, mFlags(flags) {
	ImageMetaData md;
	md.add(res.getAbsoluteFilePath(), ci::vec2(res.getWidth(), res.getHeight()));

}
コード例 #3
0
ImageProvider::ImageProvider(Context* pContext) : AbstractImageStreamProvider(pContext)
{
	CALL_XN( pContext->FindExistingNode(XN_NODE_TYPE_IMAGE, m_imageGen) );

	ImageMetaData md;
	m_imageGen.GetMetaData(md);
	CHECK_ERROR(md.PixelFormat() == XN_PIXEL_FORMAT_RGB24, "This camera's data format is not supported.");
	CHECK_ERROR(md.XRes() == 640 && md.YRes() == 480, "This camera's resolution is not supported.");
}
コード例 #4
0
ファイル: CameraDevice.cpp プロジェクト: animecomico/kth-rgbd
// -----------------------------------------------------------------------------------------------------
//  convertImageRGB
// -----------------------------------------------------------------------------------------------------
void convertImageRGB(const XnRGB24Pixel* pImageMap, IplImage* pImgRGB)
{
	// Convert from OpenNI buffer to IplImage 24 bit, 3 channels
	for(unsigned int i=0; i<g_imageMD.XRes()*g_imageMD.YRes(); i++)
	{
		pImgRGB->imageData[3*i+0]=pImageMap[i].nBlue;
		pImgRGB->imageData[3*i+1]=pImageMap[i].nGreen;
		pImgRGB->imageData[3*i+2]=pImageMap[i].nRed;
	}
}
コード例 #5
0
ファイル: KinectSensor_OpenNi.cpp プロジェクト: solson/DSAE
// Gets the colour and depth data from the Kinect sensor.
bool GetColorAndDepthImages(ColorImage& colorImage, DepthImage& depthImage)
{

	XnStatus rc = XN_STATUS_OK;
	
	// Read a new frame, blocking operation
	rc = deviceContext.WaitAnyUpdateAll();
	if (rc != XN_STATUS_OK)
	{
		/*LOGE("Read failed: %s\n", xnGetStatusString(rc));*/
		throw rc;
	}

	
	// Get handles to new data
	static ImageMetaData colorImageMetaData;
	static DepthMetaData depthImageMetaData;
	colorImageGenerator.GetMetaData(colorImageMetaData);
	depthImageGenerator.GetMetaData(depthImageMetaData);

	
	// Validate images
	if (!depthImageGenerator.IsValid() || !colorImageGenerator.IsValid())
	{
		/*LOGE("Error: Color or depth image is invalid.");*/
		throw 1;
	}

	if (colorImageMetaData.Timestamp() <= mostRecentRGB)
		return false;

	// Fetch pointers to data
	const XnRGB24Pixel* pColorImage = colorImageMetaData.RGB24Data(); //g_depth.GetRGB24ImageMap()
	const XnDepthPixel* pDepthImage = depthImageMetaData.Data();// g_depth.GetDepthMap();
	
	
	// Copy data over to arrays
	memcpy(colorImage.data, pColorImage, sizeof(colorImage.data));
	memcpy(depthImage.data, pDepthImage, sizeof(depthImage.data));
	
	colorImage.rows = colorImage.maxRows;
	colorImage.cols = colorImage.maxCols;

	depthImage.rows = depthImage.maxRows;
	depthImage.cols = depthImage.maxCols;

	mostRecentRGB = colorImageMetaData.Timestamp();
	
	return true;
}
コード例 #6
0
ファイル: imagescanner.cpp プロジェクト: DaveDaCoda/mythtv
void ImageScanThread<DBFS>::PopulateMetadata
(const QString &path, int type, QString &comment, uint &time, int &orientation)
{
    // Set orientation, date, comment from file meta data
    ImageMetaData *metadata = (type == kImageFile)
            ? ImageMetaData::FromPicture(path)
            : ImageMetaData::FromVideo(path);

    orientation  = metadata->GetOrientation();
    comment      = metadata->GetComment().simplified();
    QDateTime dt = metadata->GetOriginalDateTime();
    time         = (dt.isValid()) ? dt.toTime_t() : 0;

    delete metadata;
}
XnStatus prepare(char useScene, char useDepth, char useImage, char useIr, char useHistogram)
{
//TODO handle possible failures! Gotcha!
	if (useDepth)
	{
		mDepthGen.GetMetaData(depthMD);
		nXRes = depthMD.XRes();
		nYRes = depthMD.YRes();

		pDepth = depthMD.Data();

		if (useHistogram)
		{
			calcHist();

			// rewind the pointer
			pDepth = depthMD.Data();
		}
	}
	if (useScene) 
	{
		mUserGen.GetUserPixels(0, sceneMD);
		nXRes = sceneMD.XRes();
		nYRes = sceneMD.YRes();

		pLabels = sceneMD.Data();
	}
	if (useImage)
	{
		mImageGen.GetMetaData(imageMD);
		nXRes = imageMD.XRes();
		nYRes = imageMD.YRes();

		pRGB = imageMD.RGB24Data();
		// HISTOGRAM?????
	}
	if (useIr)
	{
		mIrGen.GetMetaData(irMD);
		nXRes = irMD.XRes();
		nYRes = irMD.YRes();

		pIR = irMD.Data();
		// HISTOGRAM????
	}
}
コード例 #8
0
ファイル: NiSimpleViewer.cpp プロジェクト: sledzias/libcvd-cl
void takePhoto() {
    static int index = 1;
    char fname[256] = {0,};
    sprintf(fname, "kinect%03d.txt", index++);

    g_depth.GetMetaData(g_depthMD);
    g_image.GetMetaData(g_imageMD);

    int const nx = g_depthMD.XRes();
    int const ny = g_depthMD.YRes();
    assert(nx == g_imageMD.XRes());
    assert(ny == g_imageMD.YRes());

    const XnDepthPixel* pDepth = g_depthMD.Data();
    const XnUInt8* pImage = g_imageMD.Data();

    FILE * file = fopen(fname, "wb");
    fprintf(file, "%d\n%d\n\n", nx, ny);

    for (int y = 0, di = 0, ri = 0, gi = 1, bi = 2; y < ny; y++) {
        for (int x = 0; x < nx; x++, di++, ri += 3, gi += 3, bi += 3) {
            int const r = pImage[ri];
            int const g = pImage[gi];
            int const b = pImage[bi];
            int const d = pDepth[di];

            assert(r >= 0);
            assert(g >= 0);
            assert(b >= 0);
            assert(d >= 0);

            assert(r <= 0xFF);
            assert(g <= 0xFF);
            assert(b <= 0xFF);
            assert(d <= 0xFFFF);

            fprintf(file, "%3d %3d %3d %5d\n", r, g, b, d);
        }

        fprintf(file, "\n");
    }

    fflush(file);
    fclose(file);
}
コード例 #9
0
void transformImageMD(Mat FrameImage,ImageMetaData& imageMD)
{
    RGB24Map& imageMap = imageMD.WritableRGB24Map();
    for (XnUInt32 y = 0; y < imageMD.YRes(); y++)
    {
        for (XnUInt32 x = 0; x <imageMD.XRes(); x++)
        {
            cout<<" x "<<x<<" y "<<y<<endl;
            XnRGB24Pixel imagePixel;
            imagePixel.nBlue=FrameImage.at<Vec3b>(y,x)[0];
            imagePixel.nGreen=FrameImage.at<Vec3b>(y,x)[1];
            imagePixel.nRed=FrameImage.at<Vec3b>(y,x)[2];
            imageMap(x,y) = imagePixel;
                        cout<<" 76 "<<endl;
        }
    }

}
コード例 #10
0
void OpencvModule::DrawRGB(ImageMetaData& g_imageMD){

    int key=0;

    Mat RGB(480,640,CV_8UC3,(uchar*)g_imageMD.WritableData());
    cvtColor(RGB,image_BGR,CV_RGB2BGR);

    float aux=((float)g_imageMD.Timestamp())/1E6;
    QVariant time_double(aux);

    putText(image_BGR,"Time:", cvPoint(460,30),5,1,cvScalar(255, 255, 255, 0),1,1);
    putText(image_BGR,time_double.toString().toStdString(), cvPoint(535,30),6,0.6,cvScalar(255, 255, 255, 0),1,1);

    imshow("Caremedia Kinect Viewer",image_BGR);

    key = waitKey(5);


}
コード例 #11
0
ファイル: main.cpp プロジェクト: alfiandosengkey/as3openni
void captureRGB(unsigned char* g_ucImageBuffer)
{
	ImageMetaData imd;
	_image.GetMetaData(imd);

	unsigned int nValue = 0;
	unsigned int nX = 0;
	unsigned int nY = 0;
	XnUInt16 g_nXRes = imd.XRes();
	XnUInt16 g_nYRes = imd.YRes();

	const XnRGB24Pixel * pImageMap = _image.GetRGB24ImageMap();
	for (nY=0; nY<g_nYRes; nY++) 
	{
		for (nX=0; nX < g_nXRes; nX++) 
		{
			((unsigned char*)g_ucImageBuffer)[(nY*g_nXRes+nX)*4+0] = pImageMap[nY*g_nXRes+nX].nBlue;
			((unsigned char*)g_ucImageBuffer)[(nY*g_nXRes+nX)*4+1] = pImageMap[nY*g_nXRes+nX].nGreen;
	        ((unsigned char*)g_ucImageBuffer)[(nY*g_nXRes+nX)*4+2] = pImageMap[nY*g_nXRes+nX].nRed;
			((unsigned char*)g_ucImageBuffer)[(nY*g_nXRes+nX)*4+3] = 0x00;
		}
	}
}
コード例 #12
0
ファイル: NiSimpleViewer.cpp プロジェクト: 3david/OpenNI
int main(int argc, char* argv[])
{
	XnStatus rc;

	EnumerationErrors errors;
	rc = g_context.InitFromXmlFile(SAMPLE_XML_PATH, &errors);
	if (rc == XN_STATUS_NO_NODE_PRESENT)
	{
		XnChar strError[1024];
		errors.ToString(strError, 1024);
		printf("%s\n", strError);
		return (rc);
	}
	else if (rc != XN_STATUS_OK)
	{
		printf("Open failed: %s\n", xnGetStatusString(rc));
		return (rc);
	}

	rc = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth);
	rc = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image);

	g_depth.GetMetaData(g_depthMD);
	g_image.GetMetaData(g_imageMD);

	// Hybrid mode isn't supported in this sample
	if (g_imageMD.FullXRes() != g_depthMD.FullXRes() || g_imageMD.FullYRes() != g_depthMD.FullYRes())
	{
		printf ("The device depth and image resolution must be equal!\n");
		return 1;
	}

	// RGB is the only image format supported.
	if (g_imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24)
	{
		printf("The device image format must be RGB24\n");
		return 1;
	}

	// Texture map init
	g_nTexMapX = (((unsigned short)(g_depthMD.FullXRes()-1) / 512) + 1) * 512;
	g_nTexMapY = (((unsigned short)(g_depthMD.FullYRes()-1) / 512) + 1) * 512;
	g_pTexMap = (XnRGB24Pixel*)malloc(g_nTexMapX * g_nTexMapY * sizeof(XnRGB24Pixel));

	// OpenGL init
	glutInit(&argc, argv);
	glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH);
	glutInitWindowSize(GL_WIN_SIZE_X, GL_WIN_SIZE_Y);
	glutCreateWindow ("OpenNI Simple Viewer");
	glutFullScreen();
	glutSetCursor(GLUT_CURSOR_NONE);

	glutKeyboardFunc(glutKeyboard);
	glutDisplayFunc(glutDisplay);
	glutIdleFunc(glutIdle);

	glDisable(GL_DEPTH_TEST);
	glEnable(GL_TEXTURE_2D);

	// Per frame code is in glutDisplay
	glutMainLoop();

	return 0;
}
コード例 #13
0
ファイル: main.cpp プロジェクト: Yusuke-Shimizu/depthkey
//----------------------------------------------------
// イメージ描画
//----------------------------------------------------
void drawImage(void){
	switch(g_nViewState){
		case DISPLAY_MODE_OVERLAY:		// ノーマル描画モード
		case DISPLAY_MODE_DEPTH:
		case DISPLAY_MODE_IMAGE:

			glMatrixMode(GL_PROJECTION);								// 射影変換の行列の設定
			glLoadIdentity();											// スタックのクリア
			gluOrtho2D(0, GL_WIN_SIZE_X, GL_WIN_SIZE_Y, 0);	// ワールド座標系を正規化デバイス座標系に平行投影(left, right, buttom, top, near, far)
															// ★平行投影する事によって,ポイントクラウドも平面に投影でき,クロマキーに最適
															// Kinectの距離は約500〜9000まで使える(設定は10000)
			glMatrixMode(GL_MODELVIEW);						// モデルビュー変換の行列の設定
			glLoadIdentity();

			glEnable(GL_TEXTURE_2D);	// テクスチャマッピングの有効化

			// テクスチャパラメータの設定と定義
			glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP_SGIS, GL_TRUE);
			glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
			glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
			glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, g_nTexMapX, g_nTexMapY, 0, GL_RGB, GL_UNSIGNED_BYTE, g_pTexMap);	// イメージデータ貼り付け

			// Display the OpenGL texture map
			glColor4f(1,1,1,1);

			// イメージデータの貼り付け
			glBegin(GL_QUADS);		// 四角形の描画を行う
			{
				int nXRes = g_depthMD.FullXRes();
				int nYRes = g_depthMD.FullYRes();

				// 左上
				glTexCoord2f(0, 0);
				glVertex2f(0, 0);	// 座標指定
				// 右上
				glTexCoord2f((float)nXRes/(float)g_nTexMapX, 0);
				glVertex2f(GL_WIN_SIZE_X, 0);	// 座標指定
				// 右下
				glTexCoord2f((float)nXRes/(float)g_nTexMapX, (float)nYRes/(float)g_nTexMapY);
				glVertex2f(GL_WIN_SIZE_X, GL_WIN_SIZE_Y);	// 座標指定
				// 左下
				glTexCoord2f(0, (float)nYRes/(float)g_nTexMapY);
				glVertex2f(0, GL_WIN_SIZE_Y);	// 座標指定
			}
			glEnd();

			glDisable(GL_TEXTURE_2D);	// テクスチャマッピングの無効化

			break;

		case DISPLAY_MODE_CHROMA:		// ポイントクラウド描画モード
		case DISPLAY_MODE_POINT_CLOUD:

			// 投影変換
			glMatrixMode(GL_PROJECTION);								// 射影変換の行列の設定
			glLoadIdentity();											// スタックのクリア
			glOrtho(0, KINECT_IMAGE_WIDTH, 
				KINECT_IMAGE_HEIGHT, 0, 
				-1.0, -KINECT_MAX_DEPTH - KINECT_VISIBLE_DELTA);	// ワールド座標系を正規化デバイス座標系に平行投影(left, right, buttom, top, near, far)
																	// ★平行投影する事によって,ポイントクラウドも平面に投影でき,クロマキーに最適
																	// Kinectの距離は約500〜9000まで使える(設定は10000)
			// 視野変換
			gluLookAt(
				g_lokEyeX, g_lokEyeY, g_lokEyeZ,	// 視点の位置(初期位置:(0,0,-1))
				g_lokDirX, g_lokDirY, g_lokDirZ,	// 視点先の位置(初期位置:(0,0,-2))
				0.0, 1.0, 0.0);						// 向き
	 
			// モデリング変換
			glMatrixMode(GL_MODELVIEW);								// モデルビュー変換の行列の設定
			glLoadIdentity();										// スタックのクリア

			glEnable(GL_DEPTH_TEST);	// 陰面処理の有効化

			// ポイントクラウド表示
			glPointSize(g_pointSize);			// 点のサイズ
			drawPointCloud(g_pBackTex, g_pBackDepth, g_pPoint);					//背景画像表示
			//drawPointCloud(g_imageMD.RGB24Data(), g_depthMD.Data(), 10, g_chromaThresh);	// 人物抜き出し(距離の閾値)
			drawPointCloudHuman(g_imageMD.RGB24Data(), g_depthMD.Data(), g_sceneMD.Data(), g_pPoint);	// 人物抜き出し(動くものを検出)

			glDisable(GL_DEPTH_TEST);	// 陰面処理の無効化
			break;

	}
}
コード例 #14
0
ファイル: main.cpp プロジェクト: Yusuke-Shimizu/depthkey
//----------------------------------------------------
// テクスチャの設定
//----------------------------------------------------
void setTexture(void){
	xnOSMemSet(g_pTexMap, 0, g_nTexMapX * g_nTexMapY * sizeof(XnRGB24Pixel));	// g_pTexMapの全てに0を代入

	// 描画モード1か3
	if (g_nViewState == DISPLAY_MODE_OVERLAY || g_nViewState == DISPLAY_MODE_IMAGE){
		const XnRGB24Pixel* pImageRow = g_imageMD.RGB24Data();	// g_imageMDのポインタ取得(画像データ取得)
		XnRGB24Pixel* pTexRow = g_pTexMap + g_imageMD.YOffset() * g_nTexMapX;

		for (XnUInt y = 0; y < KINECT_IMAGE_HEIGHT; ++ y){
			const XnRGB24Pixel* pImage = pImageRow;
			XnRGB24Pixel* pTex = pTexRow + g_imageMD.XOffset();

			for (XnUInt x = 0; x < KINECT_IMAGE_WIDTH; ++ x, ++ pImage, ++ pTex){
				*pTex = *pImage;
			}

			pImageRow += g_imageMD.XRes();
			pTexRow += g_nTexMapX;
		}
	}

	// 描画モード1か2
	if (g_nViewState == DISPLAY_MODE_OVERLAY || g_nViewState == DISPLAY_MODE_DEPTH){
		const XnDepthPixel* pDepthRow = g_depthMD.Data();
		XnRGB24Pixel* pTexRow = g_pTexMap + g_depthMD.YOffset() * g_nTexMapX;
		const XnLabel* pLabel = g_sceneMD.Data();

		for (XnUInt y = 0; y < KINECT_IMAGE_HEIGHT; ++ y){
			const XnDepthPixel* pDepth = pDepthRow;
			XnRGB24Pixel* pTex = pTexRow + g_depthMD.XOffset();

			for (XnUInt x = 0; x < KINECT_IMAGE_WIDTH; ++ x, ++ pDepth, ++ pTex, ++ pLabel){
				int nHistValue = g_pDepthHist[*pDepth];

				if(*pLabel){		// 人物なら
					*pTex = userColor[*pLabel];
				}else if (*pDepth != 0){
					if(*pDepth < 1000){
						*pTex = xnRGB24Pixel(nHistValue, 0, 0);		// red
					}else if(*pDepth < 2000){
						*pTex = xnRGB24Pixel(0, nHistValue, 0);		// green
					}else if(*pDepth < 3000){
						*pTex = xnRGB24Pixel(0, 0, nHistValue);		// blue
					}else if(*pDepth < 4000){
						*pTex = xnRGB24Pixel(nHistValue, nHistValue, 0);	// 水色
					}else if(*pDepth < 5000){
						*pTex = xnRGB24Pixel(0, nHistValue, nHistValue);	// yellow
					}else{
						*pTex = xnRGB24Pixel(nHistValue, 0, nHistValue);	// 紫
					}
				}
			}

			pDepthRow += g_depthMD.XRes();
			pTexRow += g_nTexMapX;
		}
	}

	// 描画モード4
	//if (g_nViewState == DISPLAY_MODE_CHROMA){
	//	// イメージデータ(カメラ映像)貼り付け
	//	const XnRGB24Pixel* pImageRow = g_imageMD.RGB24Data();	// g_imageMDのポインタ取得(画像データ取得)
	//	XnRGB24Pixel* pTexRow = g_pTexMap + g_imageMD.YOffset() * g_nTexMapX;

	//	for (XnUInt y = 0; y < KINECT_IMAGE_HEIGHT; ++ y){	// 480
	//		const XnRGB24Pixel* pImage = pImageRow;
	//		XnRGB24Pixel* pTex = pTexRow + g_imageMD.XOffset();

	//		for (XnUInt x = 0; x < KINECT_IMAGE_WIDTH; ++ x, ++ pImage, ++ pTex){	// 640
	//			*pTex = *pImage;
	//		}

	//		pImageRow += g_imageMD.XRes();
	//		pTexRow += g_nTexMapX;
	//	}

	//	// デプスデータを用いた人物抜き出し + 背景合成
	//	const XnDepthPixel* pDepthRow = g_depthMD.Data();		// デプスデータのポインタ取得
	//	pTexRow = g_pTexMap + g_depthMD.YOffset() * g_nTexMapX;
	//	GLuint g_backWidth = g_back.GetWidth();						// 背景の横幅の大きさ
	//	GLubyte* pBackData = g_back.GetData() + g_back.GetImageSize() - 3 * g_backWidth;	// 背景のポインタ取得(最後から見ていく)

	//	for (XnUInt y = 0; y < KINECT_IMAGE_HEIGHT; ++ y){	// 480
	//		const XnDepthPixel* pDepth = pDepthRow;			// デプスデータのポインタ取得
	//		XnRGB24Pixel* pTex = pTexRow + g_depthMD.XOffset();

	//		for (XnUInt x = 0; x < KINECT_IMAGE_WIDTH; ++ x, ++ pDepth, ++ pTex){	// 640
	//			// 深さが0か閾値以上なら背景画像を描画(閾値以下ならその部分を残す)
	//			if (*pDepth == 0 || *pDepth >= g_chromaThresh){
	//				pTex->nRed		= *pBackData;
	//				pTex->nGreen	= *(pBackData + 1);
	//				pTex->nBlue		= *(pBackData + 2);
	//			}

	//			pBackData += 3;
	//		}

	//		pDepthRow += g_depthMD.XRes();
	//		pTexRow += g_nTexMapX;
	//		pBackData -= 2 * 3 * g_backWidth;
	//	}
	//}
}
コード例 #15
0
ファイル: main.cpp プロジェクト: Yusuke-Shimizu/depthkey
//----------------------------------------------------
// OpenNI関連の初期化
//----------------------------------------------------
void xnInit(void){
	XnStatus rc;

	EnumerationErrors errors;
	rc = g_context.InitFromXmlFile(SAMPLE_XML_PATH, &errors);
	if (rc == XN_STATUS_NO_NODE_PRESENT){
		XnChar strError[1024];
		errors.ToString(strError, 1024);
		printf("%s\n", strError);
		exit(1);
	}else if (rc != XN_STATUS_OK){
		printf("Open failed: %s\n", xnGetStatusString(rc));
		exit(1);
	}
	
	//playerInit();

	rc = xnFPSInit(&g_xnFPS, 180);	// FPSの初期化
	//CHECK_RC(rc, "FPS Init");

	// デプス・イメージ・ユーザジェネレータの作成
	rc = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth);
	errorCheck(rc, "g_depth");		// エラーチェック
	rc = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image);
	errorCheck(rc, "g_image");
	rc = g_context.FindExistingNode(XN_NODE_TYPE_USER, g_user);
	//rc = g_user.Create(g_context);
	errorCheck(rc, "g_user");

	// ユーザー検出機能をサポートしているか確認
	if (!g_user.IsCapabilitySupported(XN_CAPABILITY_SKELETON)) {
		//throw std::runtime_error("ユーザー検出をサポートしてません");
		cout << "ユーザー検出をサポートしてません" << endl;
		exit(1);
	}

	// レコーダーの設定
	//rc = setRecorder(g_recorder, rc);

	// ユーザコールバックの登録
	XnCallbackHandle userCallbacks;
	g_user.RegisterUserCallbacks(UserDetected, UserLost, NULL, userCallbacks);

	// デプス・イメージ・ユーザデータの取得
	g_depth.GetMetaData(g_depthMD);
	g_image.GetMetaData(g_imageMD);
	g_user.GetUserPixels(0, g_sceneMD);

	// Hybrid mode isn't supported in this sample
	// イメージとデプスの大きさが違うとエラー
	if (g_imageMD.FullXRes() != g_depthMD.FullXRes() || g_imageMD.FullYRes() != g_depthMD.FullYRes()){
		printf ("The device depth and image resolution must be equal!\n");
		exit(1);
	}

	// RGB is the only image format supported.
	// フォーマットの確認
	if (g_imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24){
		printf("The device image format must be RGB24\n");
		exit(1);
	}

	// Texture map init
	// フルスクリーン画面の大きさ調整
	g_nTexMapX = (((unsigned short)(g_depthMD.FullXRes() - 1) / 512) + 1) * 512;	// 大きさによって512の倍数に調整(1024)
	g_nTexMapY = (((unsigned short)(g_depthMD.FullYRes() - 1) / 512) + 1) * 512;	// 512
	g_pTexMap = (XnRGB24Pixel*)malloc(g_nTexMapX * g_nTexMapY * sizeof(XnRGB24Pixel));	// スクリーンの大きさ分の色情報の容量を確保

	// 座標ポインタの初期化
	g_pPoint = (XnPoint3D*)malloc(KINECT_IMAGE_SIZE * sizeof(XnPoint3D));			// 座標を入れるポインタを作成
	g_pBackTex = (XnRGB24Pixel*)malloc(KINECT_IMAGE_SIZE * sizeof(XnRGB24Pixel));	// 背景画像を入れるポインタを作成
	g_pBackPoint = (XnPoint3D*)malloc(KINECT_IMAGE_SIZE * sizeof(XnPoint3D));		// 背景座標を入れるポインタを作成
	g_pBackDepth = (XnDepthPixel*)malloc(KINECT_IMAGE_SIZE * sizeof(XnDepthPixel));		// 背景座標を入れるポインタを作成
}
コード例 #16
0
int main(int argc, char* argv[])
{

	EnumerationErrors errors;


    //rc = context.Init();
    rc = context.InitFromXmlFile(strPathToXML,&errors);
    if (rc == XN_STATUS_NO_NODE_PRESENT)
	{
		XnChar strError[1024];
		errors.ToString(strError, 1024);
		printf("%s\n", strError);
		return (rc);
	}
	else if (rc != XN_STATUS_OK)
	{
		printf("Open failed: %s\n", xnGetStatusString(rc));
		return (rc);
	}
	
	/* UNCOMMENT TO GET FILE READING 
    //rc = context.OpenFileRecording(strInputFile);
	//CHECK_RC(rc, "Open input file");

	//rc = context.FindExistingNode(XN_NODE_TYPE_PLAYER, player);
	//CHECK_RC(rc, "Get player node"); */ 

	rc = context.FindExistingNode(XN_NODE_TYPE_DEPTH, depth);
	CHECK_RC(rc, "Find depth generator");

	rc = context.FindExistingNode(XN_NODE_TYPE_IMAGE, image);
	CHECK_RC(rc, "Find image generator");

    depth.GetMetaData(depthMD);
	image.GetMetaData(imageMD);

    //rc = player.SetRepeat(FALSE);
	XN_IS_STATUS_OK(rc);

    //rc = player.GetNumFrames(image.GetName(), nNumFrames);
	//CHECK_RC(rc, "Get player number of frames");
	//printf("%d\n",nNumFrames);

    //rc = player.GetNumFrames(depth.GetName(), nNumFrames);
	//CHECK_RC(rc, "Get player number of frames");
	//printf("%d\n",nNumFrames);

	// Hybrid mode isn't supported
	if (imageMD.FullXRes() != depthMD.FullXRes() || imageMD.FullYRes() != depthMD.FullYRes())
	{
		printf ("The device depth and image resolution must be equal!\n");
		return 1;
	}

	// RGB is the only image format supported.
	if (imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24)
	{
		printf("The device image format must be RGB24\n");
		return 1;
	}

    avi = cvCreateVideoWriter(strOutputFile, 0, 30, cvSize(640,480), TRUE);

    depthMetersMat = cvCreateMat(480, 640, CV_16UC1);
    kinectDepthImage = cvCreateImage( cvSize(640,480),16,1 );

    depthMetersMat2 = cvCreateMat(480, 640, CV_16UC1);
    kinectDepthImage2 = cvCreateImage( cvSize(640,480),16,1 );

    colorArr[0] = cv::Mat(imageMD.YRes(),imageMD.XRes(),CV_8U);
    colorArr[1] = cv::Mat(imageMD.YRes(),imageMD.XRes(),CV_8U);
    colorArr[2] = cv::Mat(imageMD.YRes(),imageMD.XRes(),CV_8U);

    //prepare_for_face_detection();

    int b;
    int g;
    int r;

	while ((rc = image.WaitAndUpdateData()) != XN_STATUS_EOF && (rc = depth.WaitAndUpdateData()) != XN_STATUS_EOF) {
        if (rc != XN_STATUS_OK) {
            printf("Read failed: %s\n", xnGetStatusString(rc));
            break;
        }
        depth.GetMetaData(depthMD);
        image.GetMetaData(imageMD);

        //XnUInt32 a;
        //a = g_imageMD.FPS;
        printf("%d\n",imageMD.FrameID());
        //a = g_depthMD.DataSize();
        //printf("%d\n",a);

        pDepth = depthMD.Data();
        pImageRow = imageMD.RGB24Data();

        for (unsigned int y=0; y<imageMD.YRes(); y++) {
            pPixel = pImageRow;
            uchar* Bptr = colorArr[0].ptr<uchar>(y);
            uchar* Gptr = colorArr[1].ptr<uchar>(y);
            uchar* Rptr = colorArr[2].ptr<uchar>(y);

            for(unsigned int x=0;x<imageMD.XRes();++x , ++pPixel){
                Bptr[x] = pPixel->nBlue;
                Gptr[x] = pPixel->nGreen;
                Rptr[x] = pPixel->nRed;

                depthMetersMat->data.s[y * XN_VGA_X_RES + x ] = 7*pDepth[y * XN_VGA_X_RES + x];
                depthMetersMat2->data.s[y * XN_VGA_X_RES + x ] = pDepth[y * XN_VGA_X_RES + x];
            }
            pImageRow += imageMD.XRes();
        }
        cv::merge(colorArr,3,colorImage);
        iplImage = colorImage;

        //cvThreshold(depthMetersMat2, depthMetersMat2, 150, 1500, THRESH_BINARY);

        cvGetImage(depthMetersMat,kinectDepthImage);
        cvGetImage(depthMetersMat2,kinectDepthImage2);

        depthImage = Bw2Image(kinectDepthImage2);
        printf("1. Middle pixel is %u millimeters away\n",depthImage[240][320]);

        rgbImage = RgbImage(&iplImage);

		// we want to see on up to 2000 MM 
        int THRESH = 2000;

        for (unsigned int y=0; y<imageMD.YRes(); y++) {
            for(unsigned int x=0;x<imageMD.XRes();++x){
                if ( depthImage[y][x] >= THRESH ) {
                    depthImage[y][x] = 0;
                } else {
                    float tmp = depthImage[y][x];
                    tmp = tmp / THRESH * (65536)*(-1) + 65536;
                    depthImage[y][x] = (unsigned int)tmp;
                }
            }
        }
		
		// THE PART ABOUT FILTERING COLOURS IN HSV TO SEE ONLY SPECIFIC ONE 
		// AFTER ONE FEW MORPHOLOGICAL OPERATIONS TO MAKE IT LOOK BETTER 

        IplImage* imgHSV = cvCreateImage(cvGetSize(&iplImage), 8, 3);
        cvCvtColor(&iplImage, imgHSV, CV_BGR2HSV);
        imgThreshed = cvCreateImage(cvGetSize(&iplImage), 8, 1);
        //cvInRangeS(imgHSV, cvScalar(100, 60, 80), cvScalar(110, 255, 255), imgThreshed); // BLUE
        cvInRangeS(imgHSV, cvScalar(29, 95, 95), cvScalar(35, 255, 255), imgThreshed); // YELLOW
        //cvInRangeS(imgHSV, cvScalar(29, 60, 60), cvScalar(35, 255, 255), imgThreshed); // YELLOW DARK
        //cvInRangeS(imgHSV, cvScalar(150, 70, 70), cvScalar(160, 255, 255), imgThreshed); // PINK
        //cvInRangeS(imgHSV, cvScalar(40, 76, 76), cvScalar(70, 255, 255), imgThreshed); // GREEN
        IplConvKernel* kernel = cvCreateStructuringElementEx(3, 3, 1, 1, CV_SHAPE_RECT, NULL);
        //cvDilate(imgThreshed,imgThreshed,kernel);
        //cvErode(imgThreshed,imgThreshed,kernel);
        Mat mat = Mat(imgThreshed);
        blur(Mat(imgThreshed),mat,cvSize(3,3));
        imgThreshed = &IplImage(mat);
        //cvInRangeS(imgThreshed,cvScalar(100),cvScalar(255),imgThreshed);
        //cvErode(imgThreshed,imgThreshed,kernel);
        cvDilate(imgThreshed,imgThreshed,kernel);
        cvDilate(imgThreshed,imgThreshed,kernel);
        cvErode(imgThreshed,imgThreshed,kernel);
        cvErode(imgThreshed,imgThreshed,kernel);
        mat = Mat(imgThreshed);
        blur(Mat(imgThreshed),mat,cvSize(6,6));
        imgThreshed = &IplImage(mat);
        cvInRangeS(imgThreshed,cvScalar(100),cvScalar(255),imgThreshed);
        cvReleaseImage(&imgHSV);
        BwImage threshed = BwImage(imgThreshed);



        if ( initialize == true ) {

            normalizeReferenceFace();
            int currentID = 0;

                for ( int y = 30; y<480; y++ ) {
                    for ( int x = 30; x<640; x++ ) {
                        bool g2g = true;
                        //printf("%d %d %d\n",ID, y,x);
                        if ( threshed[y][x]!=0 ) {
                            for ( int ID2 = 0; ID2<nbOfPoints; ID2++) {
                                if ( (abs(markers[ID2].y-y)<proximityLimit) && (abs(markers[ID2].x-x)<proximityLimit)) {
                                    g2g = false;
                                }
                            }
                            if (currentID >= nbOfPoints || g2g == false ) {
                                break;
                            }
                            markers[currentID].y=y;
                            markers[currentID].x=x;
                            currentID++;
                            printf("WHITE PIXEL INITIALIZED %d: %d %d\n",currentID, x,y);
                        }
                    }
                }


            if (isDebugConf==true || currentID == nbOfMarkers) {
                printf("%d PIXELS INITIALIZED\n", currentID);
                initialize = false;
                //printf("%d,%d\n", currentID, nbOfPoints);
                //return 0;
            } else {
                printf("WAITING FOR %d PIXELS TO APPEAR, %d SO FAR \n",nbOfMarkers, currentID);
                continue;
            }


            // FIND TOP RIGHT AND CHIN PIXEL

            int refPixID = 0;
            int chinPixID = 0;

            for ( int i = 0; i < nbOfMarkers; i++) {
                if ( (markers[i].x + markers[i].y)*(markers[i].x + markers[i].y) < (markers[refPixID].x + markers[refPixID].y)* (markers[refPixID].x + markers[refPixID].y)) {
                    refPixID = i;
                }
                if (markers[i].y > markers[chinPixID].y) {
                    chinPixID = i;
                }
            }

            float width = (markers[1].x-markers[0].x)*2;
            float heigth = abs(markers[1].y-markers[0].y);

            // WE GOT WIDTH & HEIGTH OF THE FACE, LETS ADJUST POINTS

            // SET 0 to REF, SET 1 to CHIN

            MyPoint tmp = MyPoint(markers[refPixID].x,markers[refPixID].y);
            markers[refPixID].x = markers[0].x;
            markers[refPixID].y = markers[0].y;
            markers[0].x = tmp.x;
            markers[0].y = tmp.y;

            tmp = MyPoint(markers[chinPixID].x,markers[chinPixID].y);
            markers[chinPixID].x = markers[1].x;
            markers[chinPixID].y = markers[1].y;
            markers[1].x = tmp.x;
            markers[1].y = tmp.y;


            // REST OF THE POINTS

            for ( int i = 2; i < nbOfPoints; i++) {

                int cost = 0;
                int lowestCost = 0;
                int closestPixID = -1;


                for ( int j = 2; j < nbOfMarkers; j++ ) {
                    cost = (markers[j].x-points[i].x*width)*(markers[j].x-points[i].x*width) + (markers[j].y-points[i].y*heigth)*(markers[j].y-points[i].y*heigth);
                    if ( cost < lowestCost ) {
                        lowestCost = cost;
                        closestPixID = j;
                    }
                    if (closestPixID == -1) {
                        //printf("COS JEST SPORO NIE W PORZADKU, CHECK HERE\n");
                        break;
                    }
                    tmp.x = markers[i].x;
                    tmp.y = markers[i].y;
                    markers[i].x=markers[closestPixID].x;
                    markers[i].x=markers[closestPixID].y;
                    markers[closestPixID].x = tmp.x;
                    markers[closestPixID].y = tmp.y;
                }
            }
        }

        for ( int currentPixelID = 0; currentPixelID < nbOfMarkers; currentPixelID++) {
            if (markers[currentPixelID].x == 0) {
                continue;
            }

            if ( threshed[markers[currentPixelID].y][markers[currentPixelID].x] < 128 ) {
                printf("PIXEL %d LOST\n",currentPixelID);

                for ( int neighbSize = 2; neighbSize < maxNeighbSize; neighbSize = neighbSize + 2 ) {

                    int x1 = markers[currentPixelID].x - neighbSize/2;
                    if ( x1 < intoDepthX(0) ) {
                        x1 = (int)intoDepthX(0);
                    }

                    int y1 = (int)(markers[currentPixelID].y-neighbSize/2);
                    if (  y1 < intoDepthY(0) ) {
                        y1 = intoDepthY(0);
                    }

                    int y2 = markers[currentPixelID].y+neighbSize/2;
                    if (  y2 > intoDepthY(480)  ) {
                        y2 = intoDepthY(480);
                    }

                    int x2 = markers[currentPixelID].x+neighbSize/2;
                    if ( x2 > intoDepthX(640) ) {
                        y2 = intoDepthX(640);
                    }

                    bool found = false;
                    for ( int y = y1; y < y2; y++) {
                        for ( int x = x1; x < x2; x++) {
                            bool g2g = true;
                            if (threshed[y][x] > 128) {
                                for ( int ID2 = 0; ID2<nbOfMarkers; ID2++) {
                                    if ( currentPixelID == ID2 )
                                        continue;
                                    if ( (abs(markers[ID2].y-y)<proximityLimit) && (abs(markers[ID2].x-x)<proximityLimit)) {
                                        g2g = false;
                                        break;
                                    }
                                }

                                if ( g2g ) {
                                    markers[currentPixelID].x = x;
                                    markers[currentPixelID].y = y;
                                    found = true;
                                    printf("Pixel %d, FOUND\n",currentPixelID);
                                    break;
                                }
                            }
                        }
                        if (found == true ) {
                            break;
                        }
                    }
                    if (found == true ) {
                        break;
                    }
                }
            }

            paintMarkerOnBoth(markers[currentPixelID]);

        }
        faceImage = cvCreateImage(cvGetSize(&iplImage), 8, 1);
        paintFace();

		// normal kinect depth
        cvShowImage("Depth_Kinect", kinectDepthImage);
		// depth within 80 - 200 mm, normalized 
        cvShowImage("Depth_Kinect_2", kinectDepthImage2);
		// rgb with tracking points
        cvShowImage("RGB_Kinect", &iplImage);
		// colour detector 
        cvShowImage("RGB_Threshed", imgThreshed);
		// attempt to draw a face 
        cvShowImage("Face Image", faceImage);

        cvWaitKey(50);           // wait 20 ms

        if ( avi == NULL) {
            printf ("dupa%d \n",1);
        }
        //cvWriteFrame (avi, &iplImage);
	}

//    cvReleaseImageHeader(kinectDepthImage);
    cvReleaseVideoWriter(&avi);
//    cvReleaseHaarClassifierCascade( &cascade );
    context.Shutdown();

	return 0;
}
コード例 #17
0
void WorldRenderer::drawBackground()
{
	m_rctx->orthoMatrix.PushMatrix();
	{
		//TODO: find out what this does
		//m_rctx->orthoMatrix.Translate(
		//	float(m_rng.gaussian(0.6)) * currentIntensity * 0.01f,
		//	float(m_rng.gaussian(0.6)) * currentIntensity * 0.01f,
		//	0);

		// setup shader
		m_rctx->shaderMan->UseStockShader(GLT_SHADER_SHADED, m_rctx->orthoMatrix.GetMatrix());

		// get depth buffer
		DepthMetaData dmd;
		m_depthGen->GetMetaData(dmd);
		const XnDepthPixel* dp = dmd.Data();

		// get image buffer
		ImageMetaData imd;
		m_imageGen->GetMetaData(imd);
		const XnRGB24Pixel* ip = imd.RGB24Data();

		// get working buffers
		M3DVector3f* vp = m_vertexBuf;
		M3DVector4f* cp = m_colorBuf;
		XnUInt32 numPoints = getNumPoints();

		// setup henshin-related information
		const float Z_SCALE = 10.0f;
		XnUserID userID = 0;
		const XnLabel* lp = NULL;
		XV3 headCenter, headDirection;
		getHenshinData(&userID, &lp, &headCenter, &headDirection);

		float lightRadius = 900.0f;

		bool isTracked = userID && lp;

		const int NUM_BALLS = 3;
		XV3 ball_centers[NUM_BALLS];
		bool ball_enabled_flags[NUM_BALLS];

		float ball_radius[3];
		float ball_core_radius[3];
		float ball_core_radius2[3];

		//get the ball centres and transform into projective coords
		//Also calculate an appropriate radius to make the ball scale as it moves away from the camera
		for (int j=0; j< NUM_BALLS; j++) {
			 m_ball_manager->GetBallInfo(j, &ball_enabled_flags[j],&ball_centers[j]);

			 if(!ball_enabled_flags[j]) continue;

			 XV3 ball_top(ball_centers[j]); //copy the ball center before transformation
			 
			 m_depthGen->ConvertRealWorldToProjective(1, &ball_centers[j], &ball_centers[j]);
			 normalizeProjective(&ball_centers[j]);
			 
			 //this is probably a clunky way to transform the radius into projectiev coods but it seems to work ok
			 ball_top.Y +=lightRadius;
			 m_depthGen->ConvertRealWorldToProjective(1, &ball_top, &ball_top);
			 normalizeProjective(&ball_top);
			 ball_radius[j] = fabs(ball_top.Y-ball_centers[j].Y);
			 ball_core_radius[j]  = ball_radius[j]*0.1f;
			 ball_core_radius2[j] = square(ball_core_radius[j]);
		}

		XnUInt32 ix = 0, iy = 0;
		float nearZ = PERSPECTIVE_Z_MIN + m_depthAdjustment;
		for (XnUInt32 i = 0; i < numPoints; i++, dp++, ip++, vp++, cp++, lp++, ix++) {

			if (ix == m_width) {
				ix = 0;
				iy++;
			}

			// (*vp)[0] (x) is already set
			// (*vp)[1] (y) is already set
			(*vp)[2] = (*dp) ? getNormalizedDepth(*dp, nearZ, PERSPECTIVE_Z_MAX) : Z_INFINITE;

			setRGB(cp, *ip);

			//highlight the tracked user
			if(isTracked) {
				if(*lp == userID) {
					(*cp)[0] *= 1.2f;
					(*cp)[1] *= 1.2f;
					(*cp)[2] *= 1.2f;
				}
			}

			// draw balls
			for(int j=0; j < NUM_BALLS; j++) {
				if(!ball_enabled_flags[j]) continue;

				XV3& lightCenter = ball_centers[j];
				//float ball_depth = (*dp) ? getNormalizedDepth(ball_radius[j], nearZ, PERSPECTIVE_Z_MAX) : 0;

				if((*vp)[2] < (lightCenter.Z - 0.001*ball_radius[j])) continue; //don't draw obscured pixels
				{
					// TODO: Should we use 3D object?
					XV3 flatCoords(*vp);
					flatCoords.Z = lightCenter.Z;
					float flatDistance2 = lightCenter.distance2(flatCoords);

					if (flatDistance2 < ball_core_radius2[j]) {
						float r = (1.0f - sqrt(flatDistance2) / ball_core_radius[j]) * (1.0f + 0.8f * ball_radius[j]);
						float r2 = r * r;
						float a = (r <= 1.0f) ? (2 * r2 - r2 * r2) : 1.0f;

						(*cp)[0] *= 1.2;
						(*cp)[1] *= 1.2;
						(*cp)[2] *= 1.2;

						//assuming we only have three balls cycle through red,green and blue for each one
						(*cp)[j] = interpolate((*cp)[j], 1.0f, a);
						//(*cp)[1] = interpolate((*cp)[1], 1.0f, a);
						//(*cp)[2] = interpolate((*cp)[2], 1.0f, a);
					}
				}
			}
		}

		glEnable(GL_POINT_SIZE);
		glPointSize(getPointSize());
		m_batch.draw(m_vertexBuf, m_colorBuf);
	}
	m_rctx->orthoMatrix.PopMatrix();
}
コード例 #18
0
ファイル: NiSimpleViewer.cpp プロジェクト: 3david/OpenNI
void glutDisplay (void)
{
	XnStatus rc = XN_STATUS_OK;

	// Read a new frame
	rc = g_context.WaitAnyUpdateAll();
	if (rc != XN_STATUS_OK)
	{
		printf("Read failed: %s\n", xnGetStatusString(rc));
		return;
	}

	g_depth.GetMetaData(g_depthMD);
	g_image.GetMetaData(g_imageMD);

	const XnDepthPixel* pDepth = g_depthMD.Data();
	const XnUInt8* pImage = g_imageMD.Data();

	unsigned int nImageScale = GL_WIN_SIZE_X / g_depthMD.FullXRes();

	// Copied from SimpleViewer
	// Clear the OpenGL buffers
	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Setup the OpenGL viewpoint
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();
	glOrtho(0, GL_WIN_SIZE_X, GL_WIN_SIZE_Y, 0, -1.0, 1.0);

	// Calculate the accumulative histogram (the yellow display...)
	xnOSMemSet(g_pDepthHist, 0, MAX_DEPTH*sizeof(float));

	unsigned int nNumberOfPoints = 0;
	for (XnUInt y = 0; y < g_depthMD.YRes(); ++y)
	{
		for (XnUInt x = 0; x < g_depthMD.XRes(); ++x, ++pDepth)
		{
			if (*pDepth != 0)
			{
				g_pDepthHist[*pDepth]++;
				nNumberOfPoints++;
			}
		}
	}
	for (int nIndex=1; nIndex<MAX_DEPTH; nIndex++)
	{
		g_pDepthHist[nIndex] += g_pDepthHist[nIndex-1];
	}
	if (nNumberOfPoints)
	{
		for (int nIndex=1; nIndex<MAX_DEPTH; nIndex++)
		{
			g_pDepthHist[nIndex] = (unsigned int)(256 * (1.0f - (g_pDepthHist[nIndex] / nNumberOfPoints)));
		}
	}

	xnOSMemSet(g_pTexMap, 0, g_nTexMapX*g_nTexMapY*sizeof(XnRGB24Pixel));

	// check if we need to draw image frame to texture
	if (g_nViewState == DISPLAY_MODE_OVERLAY ||
		g_nViewState == DISPLAY_MODE_IMAGE)
	{
		const XnRGB24Pixel* pImageRow = g_imageMD.RGB24Data();
		XnRGB24Pixel* pTexRow = g_pTexMap + g_imageMD.YOffset() * g_nTexMapX;

		for (XnUInt y = 0; y < g_imageMD.YRes(); ++y)
		{
			const XnRGB24Pixel* pImage = pImageRow;
			XnRGB24Pixel* pTex = pTexRow + g_imageMD.XOffset();

			for (XnUInt x = 0; x < g_imageMD.XRes(); ++x, ++pImage, ++pTex)
			{
				*pTex = *pImage;
			}

			pImageRow += g_imageMD.XRes();
			pTexRow += g_nTexMapX;
		}
	}

	// check if we need to draw depth frame to texture
	if (g_nViewState == DISPLAY_MODE_OVERLAY ||
		g_nViewState == DISPLAY_MODE_DEPTH)
	{
		const XnDepthPixel* pDepthRow = g_depthMD.Data();
		XnRGB24Pixel* pTexRow = g_pTexMap + g_depthMD.YOffset() * g_nTexMapX;

		for (XnUInt y = 0; y < g_depthMD.YRes(); ++y)
		{
			const XnDepthPixel* pDepth = pDepthRow;
			XnRGB24Pixel* pTex = pTexRow + g_depthMD.XOffset();

			for (XnUInt x = 0; x < g_depthMD.XRes(); ++x, ++pDepth, ++pTex)
			{
				if (*pDepth != 0)
				{
					int nHistValue = g_pDepthHist[*pDepth];
					pTex->nRed = nHistValue;
					pTex->nGreen = nHistValue;
					pTex->nBlue = 0;
				}
			}

			pDepthRow += g_depthMD.XRes();
			pTexRow += g_nTexMapX;
		}
	}

	// Create the OpenGL texture map
	glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP_SGIS, GL_TRUE);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
	glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, g_nTexMapX, g_nTexMapY, 0, GL_RGB, GL_UNSIGNED_BYTE, g_pTexMap);

	// Display the OpenGL texture map
	glColor4f(1,1,1,1);

	glBegin(GL_QUADS);

	int nXRes = g_depthMD.FullXRes();
	int nYRes = g_depthMD.FullYRes();

	// upper left
	glTexCoord2f(0, 0);
	glVertex2f(0, 0);
	// upper right
	glTexCoord2f((float)nXRes/(float)g_nTexMapX, 0);
	glVertex2f(GL_WIN_SIZE_X, 0);
	// bottom right
	glTexCoord2f((float)nXRes/(float)g_nTexMapX, (float)nYRes/(float)g_nTexMapY);
	glVertex2f(GL_WIN_SIZE_X, GL_WIN_SIZE_Y);
	// bottom left
	glTexCoord2f(0, (float)nYRes/(float)g_nTexMapY);
	glVertex2f(0, GL_WIN_SIZE_Y);

	glEnd();

	// Swap the OpenGL display buffers
	glutSwapBuffers();
}
コード例 #19
0
int main(int argc, char* argv[])
{
        int nRetVal;
	XnStatus rc;
	EnumerationErrors errors;

        // get playback file if using 
        if (argc > 2 && strcmp(argv[2], "true") == 0) {
            rc = g_context.Init();

            rc = g_context.OpenFileRecording(RECORDING_PATH, g_player);
            CHECK_RC(rc, "Opening file");

            rc = g_player.SetRepeat(TRUE);
	    CHECK_RC(rc, "Turn repeat off");
        } else {
            // get context from xml
	    rc = g_context.InitFromXmlFile(SAMPLE_XML_PATH, g_scriptNode, &errors);
        }

        // error checking
	if (rc == XN_STATUS_NO_NODE_PRESENT)
	{
		XnChar strError[1024];
		errors.ToString(strError, 1024);
		printf("%s\n", strError);
		return (rc);
	}
	CHECK_RC(rc, "Context initialization");        

        // get hand and image generator from context, check errors
	rc = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image);
	CHECK_RC(rc, "Get image generator");

        rc = g_context.FindExistingNode(XN_NODE_TYPE_HANDS, g_hands);
	CHECK_RC(rc, "Get hand generator");       
       
        rc = g_context.FindExistingNode(XN_NODE_TYPE_GESTURE, g_gesture);
        CHECK_RC(rc, "Get gesture generator");

        // create and register callbacks
        XnCallbackHandle h1, h2;
        g_gesture.RegisterGestureCallbacks(Gesture_Recognized,
                                              Gesture_Process,
                                              NULL, h1);
        CHECK_RC(rc, "Get register gesture callback");     
  
        g_hands.RegisterHandCallbacks(Hand_Create, Hand_Update,
                                           Hand_Destroy, NULL, h2);
        CHECK_RC(rc, "Get hand callback");

        // add gestures to the generator
        rc = g_gesture.AddGesture("Click", NULL);
        CHECK_RC(rc, " add click gesture");
        rc = g_gesture.AddGesture("RaiseHand", NULL);
        CHECK_RC(rc, "add raise gesture");
        rc = g_gesture.AddGesture("Wave", NULL);
        CHECK_RC(rc, "add wave gesture");

        
	g_image.GetMetaData(g_imageMD);

	// RGB is the only image format supported.
	if (g_imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24)
	{
		printf("The device image format must be RGB24\n");
		return 1;
	}

        // if argument is set true, then record the session
        if (argc > 1 && strcmp(argv[1], "true") == 0) {
            std::cout << "recording to " << RECORDING_PATH << std::endl;
            // Create Recorder
            rc = recorder.Create(g_context);
            CHECK_RC(rc, "create recorder");

            // Init it
            rc = recorder.SetDestination(XN_RECORD_MEDIUM_FILE, RECORDING_PATH);
            CHECK_RC(rc, "init recorder");

            // Add nodes to recording
            rc = recorder.AddNodeToRecording(g_image);
            CHECK_RC(rc, "add image node");
            
            rc = recorder.AddNodeToRecording(g_hands);
            CHECK_RC(rc, "add hands node");
        }

        // initialize and run program
	glutInit(&argc, argv);                                      // GLUT initialization
	glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH );  // Display Mode
	glutInitWindowSize(WIDTH, HEIGHT);	// set window size
        glutInitWindowPosition(GL_WIN_POSITION_X, GL_WIN_POSITION_Y);
	glutCreateWindow(TITLE);	        // create Window
	glutDisplayFunc(glutDisplay);		// register Display Function
	glutIdleFunc(glutDisplay);		// register Idle Function
        glutKeyboardFunc(glutKeyboard );	// register Keyboard Handler
	initialize();
	glutMainLoop();	

        CleanUpExit();
	return 0;
}
コード例 #20
0
ファイル: main.cpp プロジェクト: alfiandosengkey/as3openni
int main(int argc, char *argv[])
{	
	//--------------------------------------------------------------------//
	//------------------------- SETUP REQUIRED NODES ---------------------//
	//--------------------------------------------------------------------//
	
	// Setup the command line parameters.
	setupParams(argc, argv);
	
	// Setup all the sockets.
	setupSockets();
    
	// Setup the capture socket server for Mac.
	#if (XN_PLATFORM == XN_PLATFORM_MACOSX)
		if(_featureDepthMapCapture || _featureRGBCapture)
		{
			if(_useSockets)
			{
				g_AS3Network = network();
				g_AS3Network.init(setupServer);
			}
		}
	#endif
	
	// Setup the status.
    XnStatus _status = XN_STATUS_OK;
    EnumerationErrors _errors;
    
    // Context Init and Add license.
	_status = _context.Init();
	CHECK_RC(_status, "AS3OpenNI :: Initialize context");
	_context.SetGlobalMirror(_mirror);
	
	XnChar vendor[XN_MAX_NAME_LENGTH];
	XnChar license[XN_MAX_LICENSE_LENGTH];

	_license.strVendor[XN_MAX_NAME_LENGTH] = strcmp(vendor, "PrimeSense");
	_license.strKey[XN_MAX_LICENSE_LENGTH] = strcmp(license, "0KOIk2JeIBYClPWVnMoRKn5cdY4=");
		
	_status = _context.AddLicense(_license);
   	CHECK_RC(_status, "AS3OpenNI :: Added license");
   	
   	// Set it to VGA maps at 30 FPS
	_depthMode.nXRes = 640;
	_depthMode.nYRes = 480;
	_depthMode.nFPS = 30;
	
	// Depth map create.
	_status = _depth.Create(_context);
	CHECK_RC(_status, "AS3OpenNI :: Create depth generator");
	_status = _depth.SetMapOutputMode(_depthMode);
	
	// Depth map create.
	_status = _image.Create(_context);
	CHECK_RC(_status, "AS3OpenNI :: Create image generator");
	_status = _image.SetMapOutputMode(_depthMode);
	_status = _image.SetPixelFormat(XN_PIXEL_FORMAT_RGB24);
	
	// Create the hands generator.
	_status = _hands.Create(_context);
	CHECK_RC(_status, "AS3OpenNI :: Create hands generator");
	_hands.SetSmoothing(0.1);

	// Create the gesture generator.
	_status = _gesture.Create(_context);
	CHECK_RC(_status, "AS3OpenNI :: Create gesture generator");
	
	// Create user generator.
	_status = _userGenerator.Create(_context);
	CHECK_RC(_status, "AS3OpenNI :: Find user generator");
	
	// Create and initialize point tracker
	_sessionManager = new XnVSessionManager();
	_status = _sessionManager->Initialize(&_context, "Wave", "RaiseHand");
	
	if (_status != XN_STATUS_OK)
	{
		printf("AS3OpenNI :: Couldn't initialize the Session Manager: %s\n", xnGetStatusString(_status));
		CleanupExit();
	}
	_sessionManager->RegisterSession(NULL, &SessionStart, &SessionEnd, &SessionProgress);
	
	// Start catching signals for quit indications
	CatchSignals(&_quit);
	
	//---------------------------------------------------------------//
	//------------------------- SETUP FEATURES ---------------------//
	//--------------------------------------------------------------//
	
	// Define the Wave and SinglePoint detectors.
	_waveDetector = new XnVWaveDetector();
	
	// SinglePoint detector.
	if(_featureSinglePoint) _waveDetector->RegisterPointUpdate(NULL, &OnPointUpdate);
	
	// Feature Gesture.
	if(_featureGesture)
	{
		// Wave detector.
		_waveDetector->RegisterWave(NULL, &OnWave);
		
		// Push detector.
		_pushDetector = new XnVPushDetector();
		_pushDetector->RegisterPush(NULL, &onPush);
	
		// Swipe detector.
		_swipeDetector = new XnVSwipeDetector();
		_swipeDetector->RegisterSwipeUp(NULL, &Swipe_SwipeUp);
		_swipeDetector->RegisterSwipeDown(NULL, &Swipe_SwipeDown);
		_swipeDetector->RegisterSwipeLeft(NULL, &Swipe_SwipeLeft);
		_swipeDetector->RegisterSwipeRight(NULL, &Swipe_SwipeRight);
	
		// Steady detector.
		_steadyDetector = new XnVSteadyDetector();
		_steadyDetector->RegisterSteady(NULL, &Steady_OnSteady);
	}
	
	// Feature Circle.
	if(_featureCircle)
	{
		// Circle detector.
		_circleDetector = new XnVCircleDetector();
		_circleDetector->RegisterCircle(NULL, &CircleCB);
		_circleDetector->RegisterNoCircle(NULL, &NoCircleCB);
		_circleDetector->RegisterPrimaryPointCreate(NULL, &Circle_PrimaryCreate);
		_circleDetector->RegisterPrimaryPointDestroy(NULL, &Circle_PrimaryDestroy);
	}
	
	// Feature Slider.
	if(_featureSlider)
	{
		// Left/Right slider.
		_leftRightSlider = new XnVSelectableSlider1D(3, 0, AXIS_X);
		_leftRightSlider->RegisterActivate(NULL, &LeftRightSlider_OnActivate);
		_leftRightSlider->RegisterDeactivate(NULL, &LeftRightSlider_OnDeactivate);
		_leftRightSlider->RegisterPrimaryPointCreate(NULL, &LeftRightSlider_OnPrimaryCreate);
		_leftRightSlider->RegisterPrimaryPointDestroy(NULL, &LeftRightSlider_OnPrimaryDestroy);
		_leftRightSlider->RegisterValueChange(NULL, &LeftRightSlider_OnValueChange);
		_leftRightSlider->SetValueChangeOnOffAxis(false);
		
		// Up/Down slider.
		_upDownSlider = new XnVSelectableSlider1D(3, 0, AXIS_Y);
		_upDownSlider->RegisterActivate(NULL, &UpDownSlider_OnActivate);
		_upDownSlider->RegisterDeactivate(NULL, &UpDownSlider_OnDeactivate);
		_upDownSlider->RegisterPrimaryPointCreate(NULL, &UpDownSlider_OnPrimaryCreate);
		_upDownSlider->RegisterPrimaryPointDestroy(NULL, &UpDownSlider_OnPrimaryDestroy);
		_upDownSlider->RegisterValueChange(NULL, &UpDownSlider_OnValueChange);
		_upDownSlider->SetValueChangeOnOffAxis(false);
		
		// In/Out slider.
		_inOutSlider = new XnVSelectableSlider1D(3, 0, AXIS_Z);
		_inOutSlider->RegisterActivate(NULL, &InOutSlider_OnActivate);
		_inOutSlider->RegisterDeactivate(NULL, &InOutSlider_OnDeactivate);
		_inOutSlider->RegisterPrimaryPointCreate(NULL, &InOutSlider_OnPrimaryCreate);
		_inOutSlider->RegisterPrimaryPointDestroy(NULL, &InOutSlider_OnPrimaryDestroy);
		_inOutSlider->RegisterValueChange(NULL, &InOutSlider_OnValueChange);
		_inOutSlider->SetValueChangeOnOffAxis(false);
	}
	
	// Feature TrackPad.
	if(_featureTrackPad)
	{
		// Track Pad.
		if(trackpad_columns > 0 && trackpad_rows > 0)
		{
			_trackPad = new XnVSelectableSlider2D(trackpad_columns, trackpad_rows);
		}
		else
		{
			_trackPad = new XnVSelectableSlider2D(4, 9);
		}
		
		_trackPad->RegisterItemHover(NULL, &TrackPad_ItemHover);
		_trackPad->RegisterItemSelect(NULL, &TrackPad_ItemSelect);
	    _trackPad->RegisterPrimaryPointCreate(NULL, &TrackPad_PrimaryCreate);
	  	_trackPad->RegisterPrimaryPointDestroy(NULL, &TrackPad_PrimaryDestroy);
	}
	
	// Feature User Tracking.
	if(_featureUserTracking)
	{
		// Setup user generator callbacks.
		XnCallbackHandle hUserCallbacks, hCalibrationCallbacks, hPoseCallbacks;
		if (!_userGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON))
		{
			printf("AS3OpenNI :: Supplied user generator doesn't support skeleton\n");
			return 1;
		}
		_userGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks);
		
		// Setup Skeleton detection.
		_userGenerator.GetSkeletonCap().RegisterCalibrationCallbacks(UserCalibration_CalibrationStart, UserCalibration_CalibrationEnd, NULL, hCalibrationCallbacks);
		if (_userGenerator.GetSkeletonCap().NeedPoseForCalibration())
		{
			_needPose = true;
			if (!_userGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION))
			{
				printf("AS3OpenNI :: Pose required, but not supported\n");
				return 1;
			}
			_userGenerator.GetPoseDetectionCap().RegisterToPoseCallbacks(UserPose_PoseDetected, NULL, NULL, hPoseCallbacks);
			_userGenerator.GetSkeletonCap().GetCalibrationPose(_strPose);
		}
		_userGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL);
	}
	
	// Create the broadcaster manager.
	_broadcaster = new XnVBroadcaster();
	
	// Start generating all.
	_context.StartGeneratingAll();
	
	// Set the frame rate.
	_status = xnFPSInit(&xnFPS, 180);
	CHECK_RC(_status, "AS3OpenNI :: FPS Init");
	
	//----------------------------------------------------------------------//
	//------------------------- SETUP DISPLAY SUPPORT ---------------------//
	//--------------------------------------------------------------------//
	
	// Setup depth and image data.
	_depth.GetMetaData(_depthData);
	_image.GetMetaData(_imageData);
	
	// Hybrid mode isn't supported in this sample
	if (_imageData.FullXRes() != _depthData.FullXRes() || _imageData.FullYRes() != _depthData.FullYRes())
	{
		printf ("AS3OpenNI :: The device depth and image resolution must be equal!\n");
		return 1;
	}

	// RGB is the only image format supported.
	if (_imageData.PixelFormat() != XN_PIXEL_FORMAT_RGB24)
	{
		printf("AS3OpenNI :: The device image format must be RGB24\n");
		return 1;
	}
	
	// Setup the view points to match between the depth and image maps.
	if(_snapPixels) _depth.GetAlternativeViewPointCap().SetViewPoint(_image);
	
	//-------------------------------------------------------------//
	//------------------------- MAIN LOOP ------------------------//
	//-----------------------------------------------------------//
	
	// Setup the capture socket server for PC.
	#if (XN_PLATFORM == XN_PLATFORM_WIN32)
		if(_featureDepthMapCapture || _featureRGBCapture || _featureUserTracking)
		{
			if(_useSockets)
			{
				g_AS3Network = network();
				g_AS3Network.init(setupServer);
			}
		}
	#endif
	
	// Main loop
	while ((!_kbhit()) && (!_quit))
	{
		xnFPSMarkFrame(&xnFPS);
		_context.WaitAndUpdateAll();
		_sessionManager->Update(&_context);
		if(_featureDepthMapCapture) captureDepthMap(g_ucDepthBuffer);
		if(_featureRGBCapture) captureRGB(g_ucImageBuffer);
		#if (XN_PLATFORM == XN_PLATFORM_WIN32)
			if(_featureUserTracking) getPlayers();
		#else
			if(_featureUserTracking) renderSkeleton();
		#endif
	}
	
	CleanupExit();
}
コード例 #21
0
ファイル: kinectd.cpp プロジェクト: mlab-upenn/HAWK-daemons
int main(void) {
	int sockfd = network_setup();
	int framecount = 0;

	// Initialize the Kinect  
	if(kinectInit() != XN_STATUS_OK) {
		printf("Unexpected error: check that the device is connected.\n");
		return 1;
	}

	uint32_t depthsize = sizeof(uint16_t)*640*480;
	unsigned long rgbsize;
	rgbsize = sizeof(uint8_t)*3*640*480;

	uint8_t *compdepth = (uint8_t *) malloc(depthsize);
	uint8_t *comprgb = (uint8_t *) malloc(rgbsize);
	
	uint8_t * rgb_buf;
	uint8_t * depth_buf;

	uint8_t *image_data = (uint8_t *) malloc(rgbsize);
	uint8_t *depth_data = (uint8_t *) malloc(depthsize);

	uint32_t depthcompression;
	//unsigned long *rgbcompression = (unsigned long *) malloc(sizeof(long));
	unsigned long outsize;

	setup_compression();

	while(1) {
		kinectUpdate();

		//compress rgb
		rgb_buf = (uint8_t *)g_imageMD.RGB24Data();
		depth_buf = (uint8_t *)depthMD.Data();
		memcpy(image_data, rgb_buf, rgbsize);
		memcpy(depth_data, depth_buf, depthsize);
		//comprgb = (uint8_t *)g_imageMD.RGB24Data();
		//compression = rgbsize;

		compress_frame(image_data, &comprgb, &outsize, 480, 640, 3);	
		//printf("compressed rgb to size %d\n", outsize);

		//send size of compressed rgb frame
		if((sendall(sockfd, (uint8_t *)&outsize, sizeof(uint32_t))) < 0) {
			perror("sendallrgbsize");
			exit(1);
		} 
		
		if((sendall(sockfd, comprgb, outsize)) < 0) {
			perror("sendallrgb");
			exit(1);
		}
	
		//compress depth 
		depthcompression = compress_depth(depth_data, compdepth, depthsize);	
		//printf("compressed depth to size %d\n", depthcompression);

		//send size of compressed rgb frame
		if((sendall(sockfd, (uint8_t *)&depthcompression, sizeof(uint32_t))) < 0) {
			perror("sendalldepthsize");
			exit(1);
		}
 
		if((sendall(sockfd, compdepth, depthcompression)) < 0) {
			perror("sendalldepth");
			exit(1);
}

		//printf("sent out frame %d\n", ++framecount);
	}
}
コード例 #22
0
ファイル: main_bk1.cpp プロジェクト: horsewin/ARMicroMachines
int main(int argc, char* argv[]) {

	markerSize.width = -1; markerSize.height = -1;
	EnumerationErrors errors;
	switch (XnStatus rc = niContext.InitFromXmlFile(KINECT_CONFIG_FILENAME, &errors)) {
		case XN_STATUS_OK:
			break;
		case XN_STATUS_NO_NODE_PRESENT:
			XnChar strError[1024];	errors.ToString(strError, 1024);
			printf("%s\n", strError);
			return rc; break;
		default:
			printf("Open failed: %s\n", xnGetStatusString(rc));
			return rc;
	}

	capture = new Camera(CAPTURE_SIZE, CAMERA_PARAMS_FILENAME);

	RegistrationParams = scaleParams(capture->getParameters(), double(REGISTRATION_SIZE.width)/double(CAPTURE_SIZE.width));
	osg_init(calcProjection(RegistrationParams, capture->getDistortion(), REGISTRATION_SIZE));

	loadKinectParams(KINECT_PARAMS_FILENAME, &kinectParams, &kinectDistort);
	kinectDistort =0;
	kinectParams->data.db[2]=320.0; kinectParams->data.db[5]=240.0;

	niContext.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth);
	niContext.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image);

	g_depth.GetMirrorCap().SetMirror(false);
	g_depth.GetAlternativeViewPointCap().SetViewPoint(g_image);

	kinectReg = new RegistrationOPIRA(new OCVSurf());
	kinectReg->addResizedMarker(MARKER_FILENAME, 400);

	//physics
	m_world = new KCRPhysicsWorld();
	ground_grid = new float[19200];
	for (int i =0;i < 19200; i++) {
		ground_grid[i] = 0; 
	}
#ifdef SIM_PARTICLES
	voxel_grid = new float[1200];
	for (int i =0;i < 1200; i++) {
		voxel_grid[i] = 0;
	}
#endif

	//controls
	KeyboardController *kc = new KeyboardController(m_world);
	XboxController *xc = new XboxController(m_world);

	loadKinectTransform(KINECT_TRANSFORM_FILENAME);

#ifdef USE_ARMM_VRPN
	m_Connection = new vrpn_Connection_IP();
	ARMM_server = new ARMM_Communicator(m_Connection );
    cout << "Created VRPN server." << endl;
#endif

#ifdef USE_SKIN_SEGMENTATION	//Skin color look up
	_HandRegion.LoadSkinColorProbTable();
#endif

#ifdef USE_OPTICAL_FLOW
	prev_colourIm = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 3);
#endif
/////////////////////////////////////////////Main Loop////////////////////////////////////////////////
	while (running) {
		if (XnStatus rc = niContext.WaitAnyUpdateAll() != XN_STATUS_OK) {
			printf("Read failed: %s\n", xnGetStatusString(rc));
			return rc;
		}
		g_depth.GetMetaData(niDepthMD);
		g_image.GetMetaData(niImageMD);

		colourIm = cvCreateImage(cvSize(niImageMD.XRes(), niImageMD.YRes()), IPL_DEPTH_8U, 3);
		memcpy(colourIm->imageData, niImageMD.Data(), colourIm->imageSize); cvCvtColor(colourIm, colourIm, CV_RGB2BGR);
		cvFlip(colourIm, colourIm, 1);

		depthIm = cvCreateImage(cvSize(niDepthMD.XRes(), niDepthMD.YRes()), IPL_DEPTH_16U, 1);
		transDepth160 = cvCreateImage(cvSize(MESH_SIZE.width, MESH_SIZE.height), IPL_DEPTH_32F, 1);
		transDepth320 = cvCreateImage(cvSize(CV_OP_SIZE.width, CV_OP_SIZE.height), IPL_DEPTH_32F, 1);
		memcpy(depthIm->imageData, niDepthMD.Data(), depthIm->imageSize);	
		cvShowImage("Kinect View", colourIm);

		IplImage *arImage = capture->getFrame();
		cvWaitKey(1); 
		kc->check_input(); xc->check_input();

#ifdef USE_OPTICAL_FLOW
		if(RunOnce) SceneOpticalFlowLK(prev_colourIm, colourIm);
#endif

		if(kinectTransform) { // kinect transform as cvmat* for use
			if( counter >= 4) {
				inpaintDepth(&niDepthMD, true); 
				memcpy(depthIm->imageData, niDepthMD.Data(), depthIm->imageSize);				
				TransformDepth(depthIm, transDepth160, MARKER_DEPTH, MESH_SIZE);
				GenerateTrimeshGroundFromDepth(transDepth160, MARKER_DEPTH); /*Trimesh generation*/
				m_world->updateTrimeshRefitTree(ground_grid);//opencl?
				osg_UpdateHeightfieldTrimesh(ground_grid);//opencl?
#ifdef SIM_PARTICLES
/*World spheres simulation*/
//				GenerateVoxelFromDepth(depthIm, MARKER_DEPTH);
//				m_world->updateWorldSphereTransform(voxel_grid);
//				osgUpdateWorldSphereTransform(voxel_grid);
#endif
				counter = 0;
			} else {
#ifdef USE_SKIN_SEGMENTATION /*Skin color segmentation*/ // may be reduce resolution first as well as cut off depth make processing faster
				TransformDepth(depthIm, transDepth320, MARKER_DEPTH, CV_OP_SIZE);
				IplImage* depthTmp = cvCreateImage(cvSize(CV_OP_SIZE.width, CV_OP_SIZE.height), IPL_DEPTH_8U, 1);
				IplImage* colourImResized = cvCreateImage(cvSize(CV_OP_SIZE.width, CV_OP_SIZE.height), IPL_DEPTH_8U, 3);
				gray = cvCreateImage(cvSize(colourImResized->width, colourImResized->height),IPL_DEPTH_8U,1);
				hand_region = cvCreateImage(cvSize(colourImResized->width, colourImResized->height),IPL_DEPTH_8U,1);
				IplImage* colourIm640 = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 3);

//				cvCmpS(transDepth, 0, depthTmp, CV_CMP_LT);//dst must b 8U
				cvThreshold(transDepth320, depthTmp, 1, 255, CV_THRESH_BINARY_INV); //thres at 1cm above marker
				cvResize(colourIm, colourImResized, CV_INTER_NN);//use nearest neighbor interpolation
//				removeNoise( depthTmp, 100 );
//				cvSet(colourImResized, cvScalar(0), depthTmp);
				cvShowImage ("Marker Thresh", colourImResized);
				cvResize(colourImResized, colourIm640,CV_INTER_NN);
				cvShowImage ("Marker Thresh 640", colourIm640);

				cvCopyImage( _HandRegion.GetHandRegion( colourImResized, gray), hand_region );
//				removeNoise( hand_region, 20 );
				cvThreshold(hand_region, depthTmp, 0, 255, CV_THRESH_BINARY_INV);
//				removeNoise( depthTmp, 100 );
//				cvShowImage ("depthTmp", depthTmp);
//				cvShowImage ("hand_region", hand_region);

				cvSet(colourImResized, cvScalar(0), depthTmp);

//				cvShowImage ("Skin Color", colourImResized);

//				cvDilate(colourImResized,colourImResized,CV_SHAPE_RECT,1);
//				cvErode(colourImResized,colourImResized,CV_SHAPE_RECT,1);
//				cvMorphologyEx(colourImResized,colourImResized,NULL,CV_SHAPE_RECT,CV_MOP_OPEN,1);
				cvResize(colourImResized, colourIm640,CV_INTER_NN);
				cvShowImage ("Color Skin Color 640", colourIm640);
				cvReleaseImage(&depthTmp);
				cvReleaseImage(&colourImResized);
				cvReleaseImage(&colourIm640);
#endif

#ifdef USE_PARTICLES
/*
				IplImage* depthTmp1 = cvCreateImage(cvSize(depthIm->width, depthIm->height), IPL_DEPTH_32F, 1);
				IplImage* depthTmp2 = cvCreateImage(cvSize(depthIm->width, depthIm->height), IPL_DEPTH_8U, 1);
				cvConvertScale(depthIm, depthTmp1, 1);
//				cvThreshold(depthTmp1, depthTmp2, MARKER_DEPTH-5, 255, CV_THRESH_TOZERO_INV);//thresh 5mm above marker
				cvThreshold(depthTmp1, depthTmp2, MARKER_DEPTH-10, 255, CV_THRESH_TOZERO);
				//cvCmpS(depthTmp1, MARKER_DEPTH, depthTmp2, CV_CMP_GT);
//				cvShowImage("DEPTH640", depthTmp2);
//				IplImage* colourTmp = cvCreateImage(cvSize(colourIm->width, colourIm->height), IPL_DEPTH_8U, 3);
//				cvCopyImage(colourIm, colourTmp);
//				cvSet(colourTmp, cvScalar(0), depthTmp2);
//				cvShowImage ("Color640", colourTmp);
				cvSet(depthTmp1, cvScalar(0), depthTmp2);
				cvShowImage("DEPTH640_32F", depthTmp1);
*/
				inpaintDepth(&niDepthMD, true); 
//				TransformDepth(depthTmp1, transDepth, MARKER_DEPTH);
				memcpy(depthIm->imageData, niDepthMD.Data(), depthIm->imageSize);
				TransformDepth(depthIm, transDepth, MARKER_DEPTH);
				IplImage* depthTmp3 = cvCreateImage(cvSize(TRACKING_SIZE.width, TRACKING_SIZE.height), IPL_DEPTH_8U, 1);
				cvThreshold(transDepth, depthTmp3, 0.5, 255, CV_THRESH_BINARY_INV);
//				cvThreshold(transDepth, depthTmp3, 0, 255, CV_THRESH_TOZERO);
//				cvConvertScale(transDepth, depthTmp3, 1);
				cvShowImage("DEPTH160", transDepth);
				IplImage* colourImResized = cvCreateImage(cvSize(TRACKING_SIZE.width, TRACKING_SIZE.height), IPL_DEPTH_8U, 3);
				cvResize(colourIm, colourImResized, CV_INTER_NN);//use nearest neighbor interpolation
				cvSet(colourImResized, cvScalar(0), depthTmp3);
				cvShowImage ("Color160", colourImResized);

//				cvReleaseImage(&depthTmp1);
//				cvReleaseImage(&depthTmp2);
//				cvReleaseImage(&colourTmp);
				cvReleaseImage(&depthTmp3);
				cvReleaseImage(&colourImResized);

/*
				IplImage* depthTmp1 = cvCreateImage(cvSize(depthIm->width, depthIm->height), IPL_DEPTH_32F, 1);
				IplImage* depthTmp2 = cvCreateImage(cvSize(depthIm->width, depthIm->height), IPL_DEPTH_8U, 1);
				cvConvertScale(depthIm, depthTmp1, 1);
//				cvThreshold(depthTmp1, depthTmp2, MARKER_DEPTH-5, 255, CV_THRESH_TOZERO_INV);//thresh 5mm above marker
				cvThreshold(depthTmp1, depthTmp2, MARKER_DEPTH-5, 255, CV_THRESH_TOZERO);
				//cvCmpS(depthTmp1, MARKER_DEPTH, depthTmp2, CV_CMP_GT);
				cvShowImage("TMP_DEPTH", depthTmp2);
				IplImage* colourTmp = cvCreateImage(cvSize(colourIm->width, colourIm->height), IPL_DEPTH_8U, 3);
				cvCopyImage(colourIm, colourTmp);
				cvSet(colourTmp, cvScalar(0), depthTmp2);
				cvShowImage ("Basic Thresh", colourTmp);
				cvReleaseImage(&depthTmp1);
				cvReleaseImage(&depthTmp2);
*/
#endif
				counter++;
//			} else {
//				counter++;
			}
			//do hand pose recognition
			m_world->Update();
			RenderScene(arImage, capture);
		}
#ifdef USE_ARMM_VRPN
		ARMM_server->mainloop();
		m_Connection->mainloop();
#endif

#ifdef USE_OPTICAL_FLOW
		if(!RunOnce) RunOnce = true;
		cvCopyImage(colourIm, prev_colourIm);
		memcpy(prev_colourIm->imageData, niImageMD.Data(), prev_colourIm->imageSize);
		cvCvtColor(prev_colourIm, prev_colourIm, CV_RGB2BGR);
#endif

		cvReleaseImage(&arImage);
		cvReleaseImage(&depthIm); cvReleaseImage(&colourIm);
		cvReleaseImage(&transDepth320);cvReleaseImage(&transDepth160);
#ifdef USE_SKIN_SEGMENTATION
		cvReleaseImage(&gray); cvReleaseImage(&hand_region);
#endif
	}

	cvReleaseImage(&prev_colourIm);
	osg_uninit();
	delete m_world;
	delete kinectReg;

	cvReleaseMat(&RegistrationParams);

	delete kc;

	return 0;
}
コード例 #23
0
int Init()
{
	
	XnStatus rc;

	//Make sure our image types are the same as the OpenNI image types.
	assert(sizeof(XnRGB24Pixel) == sizeof(ColorPixel));
	assert(sizeof(XnDepthPixel) == sizeof(DepthPixel));
	assert(sizeof(XnStatus) == sizeof(int));

	// Load OpenNI xml settings
	char filePath[255];
	int length = Util::Helpers::GetExeDirectory(filePath, sizeof(filePath));
	filePath[length] = '\\';
	strcpy(&filePath[length+1], SAMPLE_XML_PATH);

	EnumerationErrors errors;
	rc = deviceContext.InitFromXmlFile(filePath, &errors);
	if (rc == XN_STATUS_NO_NODE_PRESENT)
	{
		//One reason would be if Microsoft SDK is installed beside PrimeSense.  Device manager should say PrimeSense instead of Microsoft Kinect.
		
		//XnChar strError[1024];
		//errors.ToString(strError, 1024);
		//LOGE("%s\n", strError);
		return -1;
	}
	else if (rc != XN_STATUS_OK)
	{
		fprintf(stderr, "%s\n", xnGetStatusString(rc));
		/*LOGE("Open failed: %s\n", xnGetStatusString(rc));*/
		return (rc);
	}

	// Retrieve colour and depth nodes
	rc = deviceContext.FindExistingNode(XN_NODE_TYPE_IMAGE, colorImageGenerator);
	rc = deviceContext.FindExistingNode(XN_NODE_TYPE_DEPTH, depthImageGenerator);

	// Set mirror mode to off
	SetMirrorMode(false);

	// Get a frame to perform checks on it
	ImageMetaData colorImageMetaData;
	DepthMetaData depthImageMetaData;
	depthImageGenerator.GetMetaData(depthImageMetaData);
	colorImageGenerator.GetMetaData(colorImageMetaData);

	// Hybrid mode isn't supported in this sample
	if (colorImageMetaData.FullXRes() != depthImageMetaData.FullXRes() || colorImageMetaData.FullYRes() != depthImageMetaData.FullYRes())
	{
		/*LOGE("The device depth and image resolution must be equal!\n");*/
		return 1;
	}

	// RGB is the only image format supported.
	if (colorImageMetaData.PixelFormat() != XN_PIXEL_FORMAT_RGB24)
	{
		/*LOGE("The device image format must be RGB24\n");*/
		return 1;
	}
	
	// Need to make sure the automatic alignment of colour and depth images is supported.
	XnBool isSupported = depthImageGenerator.IsCapabilitySupported("AlternativeViewPoint");
	if(!isSupported)
	{
		/*LOGE("Cannot set AlternativeViewPoint!\n");*/
		return 1;
	}

	
	// Set it to VGA maps at 30 FPS
	/*XnMapOutputMode mapMode;
	mapMode.nXRes = XN_VGA_X_RES;
	mapMode.nYRes = XN_VGA_Y_RES;
	mapMode.nFPS = 60;
	rc = g_depth.SetMapOutputMode(mapMode);
	if(rc)
	{
		LOGE("Failed to set depth map mode: %s\n", xnGetStatusString(rc));
		return 1;
	}
	mapMode.nFPS = 30;
	rc = g_image.SetMapOutputMode(mapMode);
	if(rc)
	{
		LOGE("Failed to set image map mode: %s\n", xnGetStatusString(rc));
		return 1;
	}*/


	// Set automatic alignment of the colour and depth images.
	rc = depthImageGenerator.GetAlternativeViewPointCap().SetViewPoint(colorImageGenerator);
	if(rc)
	{
		/*LOGE("Failed to set depth map mode: %s\n", xnGetStatusString(rc));*/
		return 1;
	}


	return XN_STATUS_OK;
}
コード例 #24
0
ファイル: main.cpp プロジェクト: horsewin/ARMicroMachines
//////////////////// Entry point //////////////////// 
int main(int argc, char* argv[]) 
{
	depthmask_for_mesh = cvCreateImage(MESH_SIZE, IPL_DEPTH_8U, 1);
	markerSize.width = -1; 
	markerSize.height = -1;

  //init OpenNI
  EnumerationErrors errors;
	switch (XnStatus rc = niContext.InitFromXmlFile(KINECT_CONFIG_FILENAME, &errors)) {
		case XN_STATUS_OK:
			break;
		case XN_STATUS_NO_NODE_PRESENT:
			XnChar strError[1024];	errors.ToString(strError, 1024);
			printf("%s\n", strError);
			return rc; break;
		default:
			printf("Open failed: %s\n", xnGetStatusString(rc));
			return rc;
	}

  //set camera parameter
  capture = new Camera(0, CAPTURE_SIZE, CAMERA_PARAMS_FILENAME);
	RegistrationParams = scaleParams(capture->getParameters(), double(REGISTRATION_SIZE.width)/double(CAPTURE_SIZE.width));

  //init parameter for rendering
  osg_init(calcProjection(RegistrationParams, capture->getDistortion(), REGISTRATION_SIZE));

  //for Kinect view
  loadKinectParams(KINECT_PARAMS_FILENAME, &kinectParams, &kinectDistort);
	kinectDistort =0;
	kinectParams->data.db[2]=320.0; 
	kinectParams->data.db[5]=240.0;

	//setting kinect context
	niContext.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth);
	niContext.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image);
	g_depth.GetMirrorCap().SetMirror(false);
	g_depth.GetAlternativeViewPointCap().SetViewPoint(g_image);

	//registration
	kinectReg = new RegistrationOPIRA(new OCVSurf());
	kinectReg->addResizedMarker(MARKER_FILENAME, 400);

	//physics
	m_world = new bt_ARMM_world();
	ground_grid = new float[GRID_SIZE];
	for (int i =0;i < GRID_SIZE; i++) {
		ground_grid[i] = 0; 
	}
#ifdef SIM_PARTICLES
	voxel_grid = new float[1200];
	for (int i =0;i < 1200; i++) {
		voxel_grid[i] = 0;
	}
#endif

	//controls
	KeyboardController *kc = new KeyboardController(m_world);
	XboxController *xc = new XboxController(m_world);

	loadKinectTransform(KINECT_TRANSFORM_FILENAME);

#ifdef USE_ARMM_VRPN
	//----->Server part
	m_Connection = new vrpn_Connection_IP();
	ARMM_server = new ARMM_Communicator(m_Connection	);

	//Open the imager server and set up channel zero to send our data.
	//if ( (ARMM_img_server = new vrpn_Imager_Server("ARMM_Image", m_Connection, MESH_SIZE.width, MESH_SIZE.height)) == NULL) {
	//	fprintf(stderr, "Could not open imager server\n");
	//	return -1;
	//}
	//if ( (channel_id = ARMM_img_server->add_channel("Grid")) == -1) {
	//	fprintf(stderr, "Could not add channel\n");
	//	return -1;
	//}
	ARMM_server->SetObjectsData(&(m_world->Objects_Body));
	ARMM_server->SetHandsData(&(m_world->HandObjectsArray));

  cout << "Created VRPN server." << endl;
	//<-----
#ifdef USE_ARMM_VRPN_RECEIVER 	//----->Receiver part
	ARMM_sever_receiver = new vrpn_Tracker_Remote (ARMM_CLIENT_IP);
	ARMM_sever_receiver->register_change_handler(NULL, handle_object);
#endif 	//<----- 

#endif

#ifdef USE_SKIN_SEGMENTATION	//Skin color look up
	_HandRegion.LoadSkinColorProbTable();
#endif

#ifdef USE_OPTICAL_FLOW
	prev_gray = cvCreateImage(cvSize(OPFLOW_SIZE.width, OPFLOW_SIZE.height), IPL_DEPTH_8U, 1);
	curr_gray = cvCreateImage(cvSize(OPFLOW_SIZE.width, OPFLOW_SIZE.height), IPL_DEPTH_8U, 1);
	flow_capture = new FlowCapture();
	flow_capture->Init();
#endif

/////////////////////////////////////////////Main Loop////////////////////////////////////////////////
	while (running) {
    //start kinect
		if (XnStatus rc = niContext.WaitAnyUpdateAll() != XN_STATUS_OK) {
			printf("Read failed: %s\n", xnGetStatusString(rc));
			return rc;
		}

    //get image and depth data from Kinect
		g_depth.GetMetaData(niDepthMD);
		g_image.GetMetaData(niImageMD);

		colourIm = cvCreateImage(cvSize(niImageMD.XRes(), niImageMD.YRes()), IPL_DEPTH_8U, 3);
		memcpy(colourIm->imageData, niImageMD.Data(), colourIm->imageSize); cvCvtColor(colourIm, colourIm, CV_RGB2BGR);
		cvFlip(colourIm, colourIm, 1);

		depthIm = cvCreateImage(cvSize(niDepthMD.XRes(), niDepthMD.YRes()), IPL_DEPTH_16U, 1);
		transDepth160 = cvCreateImage(cvSize(MESH_SIZE.width, MESH_SIZE.height), IPL_DEPTH_32F, 1);
		transDepth320 = cvCreateImage(cvSize(SKIN_SEGM_SIZE.width, SKIN_SEGM_SIZE.height), IPL_DEPTH_32F, 1);
		transColor320 = cvCreateImage(cvSize(SKIN_SEGM_SIZE.width, SKIN_SEGM_SIZE.height), IPL_DEPTH_8U, 3);
		memcpy(depthIm->imageData, niDepthMD.Data(), depthIm->imageSize);	
		//cvCircle(colourIm, cvPoint(marker_origin.x,marker_origin.y), 5, CV_BLUE, 3);
		cvShowImage("Kinect View", colourIm);
		IplImage *arImage = capture->getFrame();
		cvWaitKey(1); 

		//check input device 
		input_key = kc->check_input(); 
#ifdef USE_ARMM_VRPN_RECEIVER
		if( pass_key != 0){
			kc->check_input(pass_key);
			pass_key = 0;
		}
#endif
		xc->check_input();

		if(kinectTransform) { // kinect transform as cvmat* for use
			if( counter >= SIM_FREQUENCY) {
#ifdef UPDATE_TRIMESH
				inpaintDepth(&niDepthMD, true); 
				memcpy(depthIm->imageData, niDepthMD.Data(), depthIm->imageSize);				
				TransformImage(depthIm, transDepth160, MARKER_DEPTH, MESH_SIZE, true);
				GenerateTrimeshGroundFromDepth(transDepth160, MARKER_DEPTH); /*Trimesh generation*/
				m_world->updateTrimeshRefitTree(ground_grid);//opencl?
				osg_UpdateHeightfieldTrimesh(ground_grid);//opencl?
#endif

#ifdef SIM_PARTICLES
/*World spheres simulation*/
//				GenerateVoxelFromDepth(depthIm, MARKER_DEPTH);
//				m_world->updateWorldSphereTransform(voxel_grid);
//				osgUpdateWorldSphereTransform(voxel_grid);
#endif
				counter = 0;
			} else {
#ifdef USE_SKIN_SEGMENTATION /*Skin color segmentation*/ // may be reduce resolution first as well as cut off depth make processing faster
				// (2)Sphere representation
				FindHands(depthIm, colourIm);
				UpdateAllHands();
#endif

#ifdef USE_PARTICLES
//XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#endif
				counter++;
			}
			//do hand pose recognition
			m_world->Update();
			//(B)normal client only rendering
			RenderScene(arImage, capture);
		}
//		TickCountAverageEnd();
#ifdef USE_ARMM_VRPN
	//Send Car position+orientation			
	ARMM_server->mainloop();
#ifdef USE_ARMM_VRPN_RECEIVER
	ARMM_sever_receiver->mainloop();
#endif
	////Copy depth info
	//for (int i = 0; i < GRID_SIZE;i++) {
	//	ARMM_img_buffer[i] = ground_grid[i];
	//}

	//Send depth grid info	
	//ARMM_img_server->send_begin_frame(0, MESH_SIZE.width-1, 0, MESH_SIZE.height-1);
 //   ARMM_img_server->mainloop();
 //   int nRowsPerRegion= ((int) vrpn_IMAGER_MAX_REGIONf32)/ MESH_SIZE.width;
 //   for(int y=0; y<MESH_SIZE.height; y+=nRowsPerRegion) {
 //     ARMM_img_server->send_region_using_base_pointer(channel_id,0,MESH_SIZE.width-1,y,min(MESH_SIZE.width,y+nRowsPerRegion)-1, ARMM_img_buffer, 1, MESH_SIZE.width, MESH_SIZE.height);
 //     ARMM_img_server->mainloop();
 //   }
 //   ARMM_img_server->send_end_frame(0, MESH_SIZE.width-1, 0, MESH_SIZE.height-1);
 //   ARMM_img_server->mainloop();
	//Exec data transmission
	m_Connection->mainloop();
#endif

#ifdef USE_OPTICAL_FLOW
		if(!RunOnce) RunOnce = true;
		cvCopyImage(curr_gray, prev_gray);
#endif

		cvReleaseImage(&arImage);
		cvReleaseImage(&depthIm); 
		cvReleaseImage(&colourIm);
		cvReleaseImage(&transDepth160);
#ifdef USE_SKIN_SEGMENTATION
		cvReleaseImage(&transDepth320);
		cvReleaseImage(&transColor320);
#endif
	}
#ifdef USE_OPTICAL_FLOW
	cvReleaseImage(&prev_gray); cvReleaseImage(&curr_gray);
#endif

	//memory release
	osg_uninit();
	delete m_world;
	delete kinectReg;
	cvReleaseMat(&RegistrationParams);
	delete kc;
	delete xc;

	return 0;
}