XnStatus prepare(char useScene, char useDepth, char useHistogram) { //TODO handle possible failures! if (useDepth) { mDepthGen.GetMetaData(depthMD); nXRes = depthMD.XRes(); nYRes = depthMD.YRes(); pDepth = depthMD.Data(); if (useHistogram) { calcHist(); // rewind the pointer pDepth = depthMD.Data(); } } if (useScene) { mUserGen.GetUserPixels(0, sceneMD); nXRes = sceneMD.XRes(); nYRes = sceneMD.YRes(); pLabels = sceneMD.Data(); } }
int main() { XnStatus nRetVal = XN_STATUS_OK; Context context; nRetVal = context.Init(); CHECK_RC(nRetVal, "Initialize context"); DepthGenerator depth; nRetVal = depth.Create(context); CHECK_RC(nRetVal, "Create depth generator"); nRetVal = context.StartGeneratingAll(); CHECK_RC(nRetVal, "StartGeneratingAll"); DepthMetaData depthMD; while (!xnOSWasKeyboardHit()) { nRetVal = context.WaitOneUpdateAll(depth); if (nRetVal != XN_STATUS_OK) { printf("UpdateData failed: %s\n", xnGetStatusString(nRetVal)); continue; } depth.GetMetaData(depthMD); const XnDepthPixel* pDepthMap = depthMD.Data(); printf("Frame %d Middle point is: %u.\n", depthMD.FrameID(), depthMD(depthMD.XRes() / 2, depthMD.YRes() / 2)); } context.Shutdown(); return 0; }
int OpencvModule::DrawDepth(DepthMetaData& g_depthMD){ if (!cvGetWindowHandle("Caremedia Kinect Viewer")) // if(window has been closed) { if (windowopened) {windowopened=false; return 0; } else windowopened = true; } int key=0; //for opencv Mat, accessing buffer Mat depth16(480,640,CV_16UC1,(unsigned short*)g_depthMD.WritableData()); depth16.convertTo(depth8,CV_8U,-255/4096.0,255); Pseudocolor->pseudocolor(depth8,rgbdepth); //CvFont font; //cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, 0, 1, CV_AA); float aux=((float)g_depthMD.Timestamp())/1E6; QVariant time_double(aux); QTime t = videostarttime.addSecs((int)aux).addMSecs((int)(aux - (int)aux ) * 1000); float percent = (float)100*(float)g_depthMD.FrameID() / (float)NumFrames; QString a; putText(rgbdepth,"Time:"+t.toString().toStdString(), cvPoint(460,30),5,1,cvScalar(255, 255, 255, 0),1,1); putText(rgbdepth, a.setNum(percent,'f',2).append("%").toStdString(), cvPoint(5,30),6,0.6,cvScalar(255, 255, 255, 0),1,1); imshow("Caremedia Kinect Viewer",rgbdepth); key = waitKey(5); }
XnDepthPixel* KinectControl::getDepth() { XnStatus nRetVal = context.WaitAndUpdateAll(); CHECK_RC(nRetVal, "Update Data"); DepthMetaData depthMD; depth_generator.GetMetaData(depthMD); return depthMD.WritableData(); }
int main() { XnStatus nRetVal = XN_STATUS_OK; Context context; EnumerationErrors errors; nRetVal = context.InitFromXmlFile(SAMPLE_XML_PATH, &errors); if (nRetVal == XN_STATUS_NO_NODE_PRESENT) { XnChar strError[1024]; errors.ToString(strError, 1024); printf("%s\n", strError); return (nRetVal); } else if (nRetVal != XN_STATUS_OK) { printf("Open failed: %s\n", xnGetStatusString(nRetVal)); return (nRetVal); } DepthGenerator depth; nRetVal = context.FindExistingNode(XN_NODE_TYPE_DEPTH, depth); CHECK_RC(nRetVal, "Find depth generator"); XnFPSData xnFPS; nRetVal = xnFPSInit(&xnFPS, 180); CHECK_RC(nRetVal, "FPS Init"); DepthMetaData depthMD; while (!xnOSWasKeyboardHit()) { nRetVal = context.WaitOneUpdateAll(depth); if (nRetVal != XN_STATUS_OK) { printf("UpdateData failed: %s\n", xnGetStatusString(nRetVal)); continue; } xnFPSMarkFrame(&xnFPS); depth.GetMetaData(depthMD); const XnDepthPixel* pDepthMap = depthMD.Data(); printf("Frame %d Middle point is: %u. FPS: %f\n", depthMD.FrameID(), depthMD(depthMD.XRes() / 2, depthMD.YRes() / 2), xnFPSCalc(&xnFPS)); } context.Shutdown(); return 0; }
// ----------------------------------------------------------------------------------------------------- // convertImageDepth // ----------------------------------------------------------------------------------------------------- void convertImageDepth(const XnDepthPixel* pDepthMap, IplImage* pImgDepth) { // convert from OpenNI buffer to IplImage // Save only the Z value per pixel as an image for quick visualization of depth for(unsigned int i=0; i<g_depthMD.XRes()*g_depthMD.YRes(); i++) { // depth pixels on 16 bits (11 effective bits) //short depthValue = pDepthMap[i]/16; // for quick look only pImgDepth->imageData[3*i+0]=(unsigned char)(pDepthMap[i]>>8); pImgDepth->imageData[3*i+1]=(unsigned char)(pDepthMap[i] & 0xFF); pImgDepth->imageData[3*i+2]=0; //pImgDepth->imageData[i] = pDepthMap[i]; } }
// Gets the colour and depth data from the Kinect sensor. bool GetColorAndDepthImages(ColorImage& colorImage, DepthImage& depthImage) { XnStatus rc = XN_STATUS_OK; // Read a new frame, blocking operation rc = deviceContext.WaitAnyUpdateAll(); if (rc != XN_STATUS_OK) { /*LOGE("Read failed: %s\n", xnGetStatusString(rc));*/ throw rc; } // Get handles to new data static ImageMetaData colorImageMetaData; static DepthMetaData depthImageMetaData; colorImageGenerator.GetMetaData(colorImageMetaData); depthImageGenerator.GetMetaData(depthImageMetaData); // Validate images if (!depthImageGenerator.IsValid() || !colorImageGenerator.IsValid()) { /*LOGE("Error: Color or depth image is invalid.");*/ throw 1; } if (colorImageMetaData.Timestamp() <= mostRecentRGB) return false; // Fetch pointers to data const XnRGB24Pixel* pColorImage = colorImageMetaData.RGB24Data(); //g_depth.GetRGB24ImageMap() const XnDepthPixel* pDepthImage = depthImageMetaData.Data();// g_depth.GetDepthMap(); // Copy data over to arrays memcpy(colorImage.data, pColorImage, sizeof(colorImage.data)); memcpy(depthImage.data, pDepthImage, sizeof(depthImage.data)); colorImage.rows = colorImage.maxRows; colorImage.cols = colorImage.maxCols; depthImage.rows = depthImage.maxRows; depthImage.cols = depthImage.maxCols; mostRecentRGB = colorImageMetaData.Timestamp(); return true; }
void loadKinectTransform(char *filename) { CvFileStorage* fs = cvOpenFileStorage( filename, 0, CV_STORAGE_READ ); if (fs!=0) { CvSeq *s = cvGetFileNodeByName(fs, 0, "MarkerSize")->data.seq; markerSize.width = cvReadInt((CvFileNode*)cvGetSeqElem(s, 0)); markerSize.height = cvReadInt((CvFileNode*)cvGetSeqElem(s, 1)); s = cvGetFileNodeByName(fs, 0, "MarkerOrigin")->data.seq; marker_origin.x = cvReadInt((CvFileNode*)cvGetSeqElem(s, 0)); marker_origin.y = cvReadInt((CvFileNode*)cvGetSeqElem(s, 1)); setWorldOrigin(); WORLD_SCALE = cvReadRealByName(fs, 0, "WorldScale", 1); WORLD_ANGLE = cvReadRealByName(fs, 0, "WorldAngle", 0); MARKER_DEPTH = cvReadRealByName(fs, 0, "MARKER_DEPTH", 0); CvFileNode* fileparams = cvGetFileNodeByName( fs, NULL, "KinectTransform" ); kinectTransform = (CvMat*)cvRead( fs, fileparams ); cvReleaseFileStorage( &fs ); if (niContext.WaitAnyUpdateAll() == XN_STATUS_OK) { //Load in the marker for registration osg_inittracker(MARKER_FILENAME, 400, markerSize.width); m_world->setWorldDepth(MARKER_DEPTH); m_world->setWorldScale(WORLD_SCALE); setOSGTrimeshScale(WORLD_SCALE); g_depth.GetMetaData(niDepthMD); inpaintDepth(&niDepthMD, true); depthIm = cvCreateImage(cvSize(niDepthMD.XRes(), niDepthMD.YRes()), IPL_DEPTH_16U, 1); transDepth160 = cvCreateImage(cvSize(MESH_SIZE.width, MESH_SIZE.height), IPL_DEPTH_32F, 1); memcpy(depthIm->imageData, niDepthMD.Data(), depthIm->imageSize); TransformDepth(depthIm, transDepth160, MARKER_DEPTH, MESH_SIZE); GenerateTrimeshGroundFromDepth(transDepth160, MARKER_DEPTH); m_world->updateTrimesh(ground_grid); m_world->setMinHeight(MinHeight); m_world->setMaxHeight(MaxHeight); m_world->initPhysics(); #ifdef SIM_PARTICLES CreateOSGSphereProxy();//osg spheres representation #endif #ifdef SIM_MICROMACHINE m_world->resetCarScene(0); m_world->resetCarScene(1); #endif /*SIM_MICROMACHINE*/ } } }
// ----------------------------------------------------------------------------------------------------- // savePointCloud // ----------------------------------------------------------------------------------------------------- int savePointCloud( const XnRGB24Pixel* pImageMap, const XnDepthPixel* pDepthMap, IplImage* pImgDepth, int frameID, bool savePointCloud) { float focalInv = 0.001 / Config::_FocalLength; unsigned int rgb; int depth_index = 0; int ImageCenterX = g_depthMD.XRes() >> 1; // divide by 2 int ImageCenterY = g_depthMD.YRes() >> 1; for (int ind_y =0; ind_y < g_depthMD.YRes(); ind_y++) { for (int ind_x=0; ind_x < g_depthMD.XRes(); ind_x++, depth_index++) { pcl::PointXYZRGB& pt = g_cloudPointSave(ind_x,ind_y); if (pDepthMap[depth_index] == g_noSampleValue || pDepthMap[depth_index] == g_shadowValue || pDepthMap[depth_index] == 0 ){ pt.x = bad_point; pt.y = bad_point; pt.z = bad_point; } else { // locate point in meters pt.x = (ind_x - ImageCenterX) * pDepthMap[depth_index] * focalInv; pt.y = (ImageCenterY - ind_y) * pDepthMap[depth_index] * focalInv; pt.z = pDepthMap[depth_index] * 0.001 ; // depth values are given in mm rgb = (((unsigned int)pImageMap[depth_index].nRed) << 16) | (((unsigned int)pImageMap[depth_index].nGreen) << 8) | ((unsigned int)pImageMap[depth_index].nBlue); pt.rgb = *reinterpret_cast<float*>(&rgb); } } } char buf[256]; sprintf(buf, "%s/cloud%d.pcd", Config::_PathDataProd.c_str(), frameID); pcl::io::savePCDFile(buf, g_cloudPointSave, true); // bug in PCL - the binary file is not created with the good permissions! char bufsys[256]; sprintf(bufsys, "chmod a+rw %s", buf); system(bufsys); }
XnStatus prepare(char useScene, char useDepth, char useImage, char useIr, char useHistogram) { //TODO handle possible failures! Gotcha! if (useDepth) { mDepthGen.GetMetaData(depthMD); nXRes = depthMD.XRes(); nYRes = depthMD.YRes(); pDepth = depthMD.Data(); if (useHistogram) { calcHist(); // rewind the pointer pDepth = depthMD.Data(); } } if (useScene) { mUserGen.GetUserPixels(0, sceneMD); nXRes = sceneMD.XRes(); nYRes = sceneMD.YRes(); pLabels = sceneMD.Data(); } if (useImage) { mImageGen.GetMetaData(imageMD); nXRes = imageMD.XRes(); nYRes = imageMD.YRes(); pRGB = imageMD.RGB24Data(); // HISTOGRAM????? } if (useIr) { mIrGen.GetMetaData(irMD); nXRes = irMD.XRes(); nYRes = irMD.YRes(); pIR = irMD.Data(); // HISTOGRAM???? } }
void takePhoto() { static int index = 1; char fname[256] = {0,}; sprintf(fname, "kinect%03d.txt", index++); g_depth.GetMetaData(g_depthMD); g_image.GetMetaData(g_imageMD); int const nx = g_depthMD.XRes(); int const ny = g_depthMD.YRes(); assert(nx == g_imageMD.XRes()); assert(ny == g_imageMD.YRes()); const XnDepthPixel* pDepth = g_depthMD.Data(); const XnUInt8* pImage = g_imageMD.Data(); FILE * file = fopen(fname, "wb"); fprintf(file, "%d\n%d\n\n", nx, ny); for (int y = 0, di = 0, ri = 0, gi = 1, bi = 2; y < ny; y++) { for (int x = 0; x < nx; x++, di++, ri += 3, gi += 3, bi += 3) { int const r = pImage[ri]; int const g = pImage[gi]; int const b = pImage[bi]; int const d = pDepth[di]; assert(r >= 0); assert(g >= 0); assert(b >= 0); assert(d >= 0); assert(r <= 0xFF); assert(g <= 0xFF); assert(b <= 0xFF); assert(d <= 0xFFFF); fprintf(file, "%3d %3d %3d %5d\n", r, g, b, d); } fprintf(file, "\n"); } fflush(file); fclose(file); }
void GeneratePointCloud(DepthGenerator& rDepthGen, const XnDepthPixel* pDepth, VISION_DATA &pData) { DepthMetaData mDepthMD; rDepthGen.GetMetaData(mDepthMD); pData.timeStamp = mDepthMD.Timestamp(); unsigned int uPointNum = mDepthMD.FullXRes() * mDepthMD.FullYRes(); XnPoint3D* pDepthPointSet = new XnPoint3D[uPointNum]; unsigned int i, j, idxshift, idx; for( j = 0; j < mDepthMD.FullYRes(); ++j) { idxshift = j * mDepthMD.FullXRes(); for(i = 0; i < mDepthMD.FullXRes(); ++i) { idx = idxshift + i; pDepthPointSet[idx].X = i; pDepthPointSet[idx].Y = j; pDepthPointSet[idx].Z = pDepth[idx]; } } XnPoint3D* p3DPointSet = new XnPoint3D[uPointNum]; rDepthGen.ConvertProjectiveToRealWorld(uPointNum, pDepthPointSet, p3DPointSet); memcpy(pData.pointCloud, p3DPointSet, uPointNum*3*sizeof(float)); delete[] pDepthPointSet; delete[] p3DPointSet; }
// ----------------------------------------------------------------------------------------------------- // generateFrame // ----------------------------------------------------------------------------------------------------- bool CameraDevice::generateFrame(IplImage* imgRGB, IplImage* imgDepth) { XnStatus nRetVal = XN_STATUS_OK; const XnDepthPixel* pDepthMap = NULL; const XnRGB24Pixel* pImageMap = NULL; xnFPSMarkFrame(&g_xnFPS); nRetVal = g_context.WaitAndUpdateAll(); if (nRetVal==XN_STATUS_OK) { g_depth.GetMetaData(g_depthMD); g_image.GetMetaData(g_imageMD); pDepthMap = g_depthMD.Data(); pImageMap = g_image.GetRGB24ImageMap(); printf("Frame %02d (%dx%d) Depth at middle point: %u. FPS: %f\r", g_depthMD.FrameID(), g_depthMD.XRes(), g_depthMD.YRes(), g_depthMD(g_depthMD.XRes()/2, g_depthMD.YRes()/2), xnFPSCalc(&g_xnFPS)); // convert to OpenCV buffers convertImageRGB(pImageMap, imgRGB); convertImageDepth(pDepthMap, imgDepth); return true; } return false; }
WorldRenderer::WorldRenderer(RenderingContext* rctx, DepthGenerator* depthGen, ImageGenerator* imageGen, BallManager* ball_manager) : AbstractOpenGLRenderer(rctx) { m_depthGen = depthGen; m_imageGen = imageGen; //m_henshinDetector = henshinDetector; m_ball_manager = ball_manager; DepthMetaData dmd; m_depthGen->GetMetaData(dmd); m_width = dmd.XRes(); m_height = dmd.YRes(); // allocate working buffers XnUInt32 numPoints = getNumPoints(); m_vertexBuf = new M3DVector3f[numPoints]; m_colorBuf = new M3DVector4f[numPoints]; // pre-set values on working buffers M3DVector3f* vp = m_vertexBuf; M3DVector4f* cp = m_colorBuf; for (XnUInt32 iy = 0; iy < m_height; iy++) { for (XnUInt32 ix = 0; ix < m_width; ix++) { (*vp)[0] = normalizeX(float(ix)); (*vp)[1] = normalizeY(float(iy)); (*vp)[2] = 0; vp++; (*cp)[0] = (*cp)[1] = (*cp)[2] = 0; (*cp)[3] = 1; // alpha is always 1.0 cp++; } } m_batch.init(numPoints); m_depthAdjustment = DEFAULT_DEPTH_ADJUSTMENT; }
void transformDepthMD(DepthMetaData& depthMD) { DepthMap& depthMap = depthMD.WritableDepthMap(); for (XnUInt32 y = 0; y < depthMap.YRes(); y++) { for (XnUInt32 x = 0; x < depthMap.XRes(); x++) { //Punch vertical cut lines in the depth image if ((x % 2) == 0) { depthMap(x, y) = 0; } } } }
//-------------------------------------------------------------- void testApp::setup(){ XnStatus rc; EnumerationErrors errors; rc = g_context.InitFromXmlFile(SAMPLE_XML_PATH, g_scriptNode, &errors); if (rc == XN_STATUS_NO_NODE_PRESENT) { XnChar strError[1024]; errors.ToString(strError, 1024); printf("%s\n", strError); return ; } else if (rc != XN_STATUS_OK) { printf("Open failed: %s\n", xnGetStatusString(rc)); return; } rc = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth); if (rc != XN_STATUS_OK) { printf("No depth node exists! Check your XML."); return; } g_depth.GetMetaData(g_depthMD); // Texture map init g_nTexMapX = (((unsigned short)(g_depthMD.FullXRes()-1) / 512) + 1) * 512; g_nTexMapY = (((unsigned short)(g_depthMD.FullYRes()-1) / 512) + 1) * 512; g_pTexMap = (XnRGB24Pixel*)malloc(g_nTexMapX * g_nTexMapY * sizeof(XnRGB24Pixel)); std::cout << " w:" << g_depthMD.FullXRes() << " h:" << g_depthMD.FullYRes() << std::endl; pixels = (unsigned char*)malloc(640*480*3*sizeof(unsigned char)); tex.allocate(640, 480, GL_RGB); }
void glutDisplay (void){ glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Setup the OpenGL viewpoint glMatrixMode(GL_PROJECTION); glPushMatrix(); glLoadIdentity(); SceneMetaData sceneMD; DepthMetaData depthMD; ImageMetaData imageMD; g_DepthGenerator.GetMetaData(depthMD); glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0); glDisable(GL_TEXTURE_2D); //XnStatus rc = g_Context.WaitOneUpdateAll(g_DepthGenerator); XnStatus rc = g_Context.WaitAnyUpdateAll(); CHECK_RC("Wait Data",rc); g_DepthGenerator.GetMetaData(depthMD); if(g_UserGenerator.IsValid()) g_UserGenerator.GetUserPixels(0, sceneMD); g_ImageGenerator.GetMetaData(imageMD); DrawDepthMap(depthMD, sceneMD); DrawImage(imageMD); glutSwapBuffers(); }//glutdisplay
void transformDepthMD(Mat FrameDepth,DepthMetaData& depthMD) { DepthMap& depthMap = depthMD.WritableDepthMap(); for (XnUInt32 y = 0; y < depthMap.YRes(); y++) { for (XnUInt32 x = 0; x < depthMap.XRes(); x++) { //Punch vertical cut lines in the depth image // if ((x % 2) == 0) // { depthMap(x,y) = FrameDepth.at<XnUInt16>(y,x); // } } } }
//---------------------------------------------------- // マウスのクリック処理 //---------------------------------------------------- void glutMouse(int button, int state, int _x, int _y){ int x = _x, y = _y; XnPoint3D pt[2] = {{0,0,0},{0,0,0}}; // サイズが違う場合,680*480に標準化する if(!(g_currentWindowSizeX == KINECT_IMAGE_WIDTH && g_currentWindowSizeY == KINECT_IMAGE_HEIGHT)){ x = 640 * _x / g_currentWindowSizeX; y = 480 * _y / g_currentWindowSizeY; } if(state == GLUT_DOWN){ if(button == GLUT_LEFT_BUTTON){ // 左クリック cout << "click! (" << _x << ", " << _y << ")->(" << x << ", " << y << "), depth = " << *(g_depthMD.Data() + y * KINECT_IMAGE_WIDTH + x) << endl; pt[0].X = _x; pt[0].Y = _y; pt[0].Z = *(g_depthMD.Data() + y * KINECT_IMAGE_WIDTH + x); //cout << "(_x, _y) -> (x, y) = (" << _x << ", " << _y << ") -> (" << x << ", " << y << ")" << endl; g_depth.ConvertProjectiveToRealWorld(2, pt, pt); cout << "change pt[0] => (" << pt[0].X << ", " << pt[0].Y << ", " << pt[0].Z << ")" << endl; }else if(button == GLUT_RIGHT_BUTTON){ // 右クリック cout << "click! back = (" << x << ", " << y << ") depth = " << *(g_pBackDepth + y * KINECT_IMAGE_WIDTH + x) << endl; pt[1].X = _x; pt[1].Y = _y; pt[1].Z = *(g_pBackDepth + y * KINECT_IMAGE_WIDTH + x); g_depth.ConvertProjectiveToRealWorld(2, pt, pt); cout << "change pt[1] => (" << pt[1].X << ", " << pt[1].Y << ", " << pt[1].Z << ")" << endl; } //g_depth.ConvertProjectiveToRealWorld(2, pt, pt); //cout << "change pt[0] => (" << pt[0].X << ", " << pt[0].Y << ", " << pt[0].Z << ")" << endl; //cout << "change pt[1] => (" << pt[1].X << ", " << pt[1].Y << ", " << pt[1].Z << ")" << endl; } }
void inpaintDepth(DepthMetaData *niDepthMD, bool halfSize) { IplImage *depthIm, *depthImFull; if (halfSize) { depthImFull = cvCreateImage(cvSize(niDepthMD->XRes(), niDepthMD->YRes()), IPL_DEPTH_16U, 1); depthImFull->imageData = (char*)niDepthMD->WritableData(); depthIm = cvCreateImage(cvSize(depthImFull->width/4.0, depthImFull->height/4.0), IPL_DEPTH_16U, 1); cvResize(depthImFull, depthIm, 0); } else { depthIm = cvCreateImage(cvSize(niDepthMD->XRes(), niDepthMD->YRes()), IPL_DEPTH_16U, 1); depthIm->imageData = (char*)niDepthMD->WritableData(); } IplImage *depthImMask = cvCreateImage(cvGetSize(depthIm), IPL_DEPTH_8U, 1); for (int y=0; y<depthIm->height; y++) { for (int x=0; x<depthIm->width; x++) { CV_IMAGE_ELEM(depthImMask, char, y, x)=CV_IMAGE_ELEM(depthIm, unsigned short,y,x)==0?255:0; } } IplImage *depthImMaskInv = cvCreateImage(cvGetSize(depthIm), IPL_DEPTH_8U, 1); cvNot(depthImMask, depthImMaskInv); double min, max; cvMinMaxLoc(depthIm, &min, &max, 0, 0, depthImMaskInv); IplImage *depthIm8 = cvCreateImage(cvGetSize(depthIm), IPL_DEPTH_8U, 1); float scale = 255.0/(max-min); cvConvertScale(depthIm, depthIm8, scale, -(min*scale)); IplImage *depthPaint = cvCreateImage(cvGetSize(depthIm8), IPL_DEPTH_8U, 1); cvInpaint(depthIm8, depthImMask, depthPaint, 3, CV_INPAINT_NS); IplImage *depthIm16 = cvCreateImage(cvGetSize(depthIm), IPL_DEPTH_16U, 1); cvConvertScale(depthPaint, depthIm16, 1/scale, min); if (halfSize) { IplImage *depthPaintedFull = cvCreateImage(cvGetSize(depthImFull), IPL_DEPTH_16U, 1); cvResize(depthIm16, depthPaintedFull,0); IplImage *depthImMaskFull = cvCreateImage(cvGetSize(depthImFull), IPL_DEPTH_8U, 1); for (int y=0; y<depthImFull->height; y++) for (int x=0; x<depthImFull->width; x++) CV_IMAGE_ELEM(depthImMaskFull, char, y, x)=CV_IMAGE_ELEM(depthImFull, unsigned short,y,x)==0?255:0; cvCopy(depthPaintedFull, depthImFull, depthImMaskFull); cvReleaseImage(&depthPaintedFull); cvReleaseImage(&depthImMaskFull); cvReleaseImage(&depthImFull); } else {
//---------------------------------------------------- // OpenNI関連の初期化 //---------------------------------------------------- void xnInit(void){ XnStatus rc; EnumerationErrors errors; rc = g_context.InitFromXmlFile(SAMPLE_XML_PATH, &errors); if (rc == XN_STATUS_NO_NODE_PRESENT){ XnChar strError[1024]; errors.ToString(strError, 1024); printf("%s\n", strError); exit(1); }else if (rc != XN_STATUS_OK){ printf("Open failed: %s\n", xnGetStatusString(rc)); exit(1); } //playerInit(); rc = xnFPSInit(&g_xnFPS, 180); // FPSの初期化 //CHECK_RC(rc, "FPS Init"); // デプス・イメージ・ユーザジェネレータの作成 rc = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth); errorCheck(rc, "g_depth"); // エラーチェック rc = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image); errorCheck(rc, "g_image"); rc = g_context.FindExistingNode(XN_NODE_TYPE_USER, g_user); //rc = g_user.Create(g_context); errorCheck(rc, "g_user"); // ユーザー検出機能をサポートしているか確認 if (!g_user.IsCapabilitySupported(XN_CAPABILITY_SKELETON)) { //throw std::runtime_error("ユーザー検出をサポートしてません"); cout << "ユーザー検出をサポートしてません" << endl; exit(1); } // レコーダーの設定 //rc = setRecorder(g_recorder, rc); // ユーザコールバックの登録 XnCallbackHandle userCallbacks; g_user.RegisterUserCallbacks(UserDetected, UserLost, NULL, userCallbacks); // デプス・イメージ・ユーザデータの取得 g_depth.GetMetaData(g_depthMD); g_image.GetMetaData(g_imageMD); g_user.GetUserPixels(0, g_sceneMD); // Hybrid mode isn't supported in this sample // イメージとデプスの大きさが違うとエラー if (g_imageMD.FullXRes() != g_depthMD.FullXRes() || g_imageMD.FullYRes() != g_depthMD.FullYRes()){ printf ("The device depth and image resolution must be equal!\n"); exit(1); } // RGB is the only image format supported. // フォーマットの確認 if (g_imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24){ printf("The device image format must be RGB24\n"); exit(1); } // Texture map init // フルスクリーン画面の大きさ調整 g_nTexMapX = (((unsigned short)(g_depthMD.FullXRes() - 1) / 512) + 1) * 512; // 大きさによって512の倍数に調整(1024) g_nTexMapY = (((unsigned short)(g_depthMD.FullYRes() - 1) / 512) + 1) * 512; // 512 g_pTexMap = (XnRGB24Pixel*)malloc(g_nTexMapX * g_nTexMapY * sizeof(XnRGB24Pixel)); // スクリーンの大きさ分の色情報の容量を確保 // 座標ポインタの初期化 g_pPoint = (XnPoint3D*)malloc(KINECT_IMAGE_SIZE * sizeof(XnPoint3D)); // 座標を入れるポインタを作成 g_pBackTex = (XnRGB24Pixel*)malloc(KINECT_IMAGE_SIZE * sizeof(XnRGB24Pixel)); // 背景画像を入れるポインタを作成 g_pBackPoint = (XnPoint3D*)malloc(KINECT_IMAGE_SIZE * sizeof(XnPoint3D)); // 背景座標を入れるポインタを作成 g_pBackDepth = (XnDepthPixel*)malloc(KINECT_IMAGE_SIZE * sizeof(XnDepthPixel)); // 背景座標を入れるポインタを作成 }
int main() { XnStatus nRetVal = XN_STATUS_OK; Context context; ScriptNode scriptNode; EnumerationErrors errors; const char *fn = NULL; if (fileExists(SAMPLE_XML_PATH)) fn = SAMPLE_XML_PATH; else if (fileExists(SAMPLE_XML_PATH_LOCAL)) fn = SAMPLE_XML_PATH_LOCAL; else { printf("Could not find '%s' nor '%s'. Aborting.\n" , SAMPLE_XML_PATH, SAMPLE_XML_PATH_LOCAL); return XN_STATUS_ERROR; } printf("Reading config from: '%s'\n", fn); nRetVal = context.InitFromXmlFile(fn, scriptNode, &errors); if (nRetVal == XN_STATUS_NO_NODE_PRESENT) { XnChar strError[1024]; errors.ToString(strError, 1024); printf("%s\n", strError); return (nRetVal); } else if (nRetVal != XN_STATUS_OK) { printf("Open failed: %s\n", xnGetStatusString(nRetVal)); return (nRetVal); } DepthGenerator depth; nRetVal = context.FindExistingNode(XN_NODE_TYPE_DEPTH, depth); CHECK_RC(nRetVal, "Find depth generator"); XnFPSData xnFPS; nRetVal = xnFPSInit(&xnFPS, 180); CHECK_RC(nRetVal, "FPS Init"); DepthMetaData depthMD; while (!xnOSWasKeyboardHit()) { nRetVal = context.WaitOneUpdateAll(depth); if (nRetVal != XN_STATUS_OK) { printf("UpdateData failed: %s\n", xnGetStatusString(nRetVal)); continue; } xnFPSMarkFrame(&xnFPS); depth.GetMetaData(depthMD); printf("Frame %d Middle point is: %u. FPS: %f\n", depthMD.FrameID(), depthMD(depthMD.XRes() / 2, depthMD.YRes() / 2), xnFPSCalc(&xnFPS)); } depth.Release(); scriptNode.Release(); context.Release(); return 0; }
int main(int argc, char* argv[]) { XnStatus nRetVal = XN_STATUS_OK; nRetVal = xnLogInitFromXmlFile(SAMPLE_XML_PATH); if (nRetVal != XN_STATUS_OK) { printf("Log couldn't be opened: %s. Running without log", xnGetStatusString(nRetVal)); } if (argc < 3) { printf("usage: %s <inputFile> <outputFile>\n", argv[0]); return -1; } const char* strInputFile = argv[1]; const char* strOutputFile = argv[2]; Context context; nRetVal = context.Init(); CHECK_RC(nRetVal, "Init"); // open input file Player player; nRetVal = context.OpenFileRecording(strInputFile, player); CHECK_RC(nRetVal, "Open input file"); // Get depth node from recording DepthGenerator depth; nRetVal = context.FindExistingNode(XN_NODE_TYPE_DEPTH, depth); CHECK_RC(nRetVal, "Find depth generator"); // Create mock node based on depth node from recording MockDepthGenerator mockDepth; nRetVal = mockDepth.CreateBasedOn(depth); CHECK_RC(nRetVal, "Create mock depth node"); // create recorder Recorder recorder; nRetVal = recorder.Create(context); CHECK_RC(nRetVal, "Create recorder"); nRetVal = recorder.SetDestination(XN_RECORD_MEDIUM_FILE, strOutputFile); CHECK_RC(nRetVal, "Set recorder destination file"); // add depth node to recorder nRetVal = recorder.AddNodeToRecording(mockDepth); CHECK_RC(nRetVal, "Add node to recording"); nRetVal = player.SetRepeat(FALSE); XN_IS_STATUS_OK(nRetVal); XnUInt32 nNumFrames = 0; nRetVal = player.GetNumFrames(depth.GetName(), nNumFrames); CHECK_RC(nRetVal, "Get player number of frames"); DepthMetaData depthMD; while ((nRetVal = depth.WaitAndUpdateData()) != XN_STATUS_EOF) { CHECK_RC(nRetVal, "Read next frame"); // Get depth meta data depth.GetMetaData(depthMD); //-----------------------------------------------// // Transform depth! This is the interesting part // //-----------------------------------------------// /* Enable the depth data to be modified. This is done implicitly by depthMD.WritableDepthMap(), but we're calling it just to be clear. */ nRetVal = depthMD.MakeDataWritable(); CHECK_RC(nRetVal, "Make depth data writable"); transformDepthMD(depthMD); // Pass the transformed data to the mock depth generator nRetVal = mockDepth.SetData(depthMD); CHECK_RC(nRetVal, "Set mock node new data"); /* We need to call recorder.Record explicitly because we're not using WaitAndUpdateAll(). */ nRetVal = recorder.Record(); CHECK_RC(nRetVal, "Record"); printf("Recorded: frame %u out of %u\r", depthMD.FrameID(), nNumFrames); } printf("\n"); return 0; }
void glutDisplay (void) { XnStatus rc = XN_STATUS_OK; // Read a new frame rc = g_context.WaitAnyUpdateAll(); if (rc != XN_STATUS_OK) { printf("Read failed: %s\n", xnGetStatusString(rc)); return; } g_depth.GetMetaData(g_depthMD); g_image.GetMetaData(g_imageMD); const XnDepthPixel* pDepth = g_depthMD.Data(); const XnUInt8* pImage = g_imageMD.Data(); unsigned int nImageScale = GL_WIN_SIZE_X / g_depthMD.FullXRes(); // Copied from SimpleViewer // Clear the OpenGL buffers glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Setup the OpenGL viewpoint glMatrixMode(GL_PROJECTION); glPushMatrix(); glLoadIdentity(); glOrtho(0, GL_WIN_SIZE_X, GL_WIN_SIZE_Y, 0, -1.0, 1.0); // Calculate the accumulative histogram (the yellow display...) xnOSMemSet(g_pDepthHist, 0, MAX_DEPTH*sizeof(float)); unsigned int nNumberOfPoints = 0; for (XnUInt y = 0; y < g_depthMD.YRes(); ++y) { for (XnUInt x = 0; x < g_depthMD.XRes(); ++x, ++pDepth) { if (*pDepth != 0) { g_pDepthHist[*pDepth]++; nNumberOfPoints++; } } } for (int nIndex=1; nIndex<MAX_DEPTH; nIndex++) { g_pDepthHist[nIndex] += g_pDepthHist[nIndex-1]; } if (nNumberOfPoints) { for (int nIndex=1; nIndex<MAX_DEPTH; nIndex++) { g_pDepthHist[nIndex] = (unsigned int)(256 * (1.0f - (g_pDepthHist[nIndex] / nNumberOfPoints))); } } xnOSMemSet(g_pTexMap, 0, g_nTexMapX*g_nTexMapY*sizeof(XnRGB24Pixel)); // check if we need to draw image frame to texture if (g_nViewState == DISPLAY_MODE_OVERLAY || g_nViewState == DISPLAY_MODE_IMAGE) { const XnRGB24Pixel* pImageRow = g_imageMD.RGB24Data(); XnRGB24Pixel* pTexRow = g_pTexMap + g_imageMD.YOffset() * g_nTexMapX; for (XnUInt y = 0; y < g_imageMD.YRes(); ++y) { const XnRGB24Pixel* pImage = pImageRow; XnRGB24Pixel* pTex = pTexRow + g_imageMD.XOffset(); for (XnUInt x = 0; x < g_imageMD.XRes(); ++x, ++pImage, ++pTex) { *pTex = *pImage; } pImageRow += g_imageMD.XRes(); pTexRow += g_nTexMapX; } } // check if we need to draw depth frame to texture if (g_nViewState == DISPLAY_MODE_OVERLAY || g_nViewState == DISPLAY_MODE_DEPTH) { const XnDepthPixel* pDepthRow = g_depthMD.Data(); XnRGB24Pixel* pTexRow = g_pTexMap + g_depthMD.YOffset() * g_nTexMapX; for (XnUInt y = 0; y < g_depthMD.YRes(); ++y) { const XnDepthPixel* pDepth = pDepthRow; XnRGB24Pixel* pTex = pTexRow + g_depthMD.XOffset(); for (XnUInt x = 0; x < g_depthMD.XRes(); ++x, ++pDepth, ++pTex) { if (*pDepth != 0) { int nHistValue = g_pDepthHist[*pDepth]; pTex->nRed = nHistValue; pTex->nGreen = nHistValue; pTex->nBlue = 0; } } pDepthRow += g_depthMD.XRes(); pTexRow += g_nTexMapX; } } // Create the OpenGL texture map glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP_SGIS, GL_TRUE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, g_nTexMapX, g_nTexMapY, 0, GL_RGB, GL_UNSIGNED_BYTE, g_pTexMap); // Display the OpenGL texture map glColor4f(1,1,1,1); glBegin(GL_QUADS); int nXRes = g_depthMD.FullXRes(); int nYRes = g_depthMD.FullYRes(); // upper left glTexCoord2f(0, 0); glVertex2f(0, 0); // upper right glTexCoord2f((float)nXRes/(float)g_nTexMapX, 0); glVertex2f(GL_WIN_SIZE_X, 0); // bottom right glTexCoord2f((float)nXRes/(float)g_nTexMapX, (float)nYRes/(float)g_nTexMapY); glVertex2f(GL_WIN_SIZE_X, GL_WIN_SIZE_Y); // bottom left glTexCoord2f(0, (float)nYRes/(float)g_nTexMapY); glVertex2f(0, GL_WIN_SIZE_Y); glEnd(); // Swap the OpenGL display buffers glutSwapBuffers(); }
int main(int argc, char* argv[]) { XnStatus rc; EnumerationErrors errors; rc = g_context.InitFromXmlFile(SAMPLE_XML_PATH, &errors); if (rc == XN_STATUS_NO_NODE_PRESENT) { XnChar strError[1024]; errors.ToString(strError, 1024); printf("%s\n", strError); return (rc); } else if (rc != XN_STATUS_OK) { printf("Open failed: %s\n", xnGetStatusString(rc)); return (rc); } rc = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth); rc = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image); g_depth.GetMetaData(g_depthMD); g_image.GetMetaData(g_imageMD); // Hybrid mode isn't supported in this sample if (g_imageMD.FullXRes() != g_depthMD.FullXRes() || g_imageMD.FullYRes() != g_depthMD.FullYRes()) { printf ("The device depth and image resolution must be equal!\n"); return 1; } // RGB is the only image format supported. if (g_imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24) { printf("The device image format must be RGB24\n"); return 1; } // Texture map init g_nTexMapX = (((unsigned short)(g_depthMD.FullXRes()-1) / 512) + 1) * 512; g_nTexMapY = (((unsigned short)(g_depthMD.FullYRes()-1) / 512) + 1) * 512; g_pTexMap = (XnRGB24Pixel*)malloc(g_nTexMapX * g_nTexMapY * sizeof(XnRGB24Pixel)); // OpenGL init glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH); glutInitWindowSize(GL_WIN_SIZE_X, GL_WIN_SIZE_Y); glutCreateWindow ("OpenNI Simple Viewer"); glutFullScreen(); glutSetCursor(GLUT_CURSOR_NONE); glutKeyboardFunc(glutKeyboard); glutDisplayFunc(glutDisplay); glutIdleFunc(glutIdle); glDisable(GL_DEPTH_TEST); glEnable(GL_TEXTURE_2D); // Per frame code is in glutDisplay glutMainLoop(); return 0; }
//---------------------------------------------------- // イメージ描画 //---------------------------------------------------- void drawImage(void){ switch(g_nViewState){ case DISPLAY_MODE_OVERLAY: // ノーマル描画モード case DISPLAY_MODE_DEPTH: case DISPLAY_MODE_IMAGE: glMatrixMode(GL_PROJECTION); // 射影変換の行列の設定 glLoadIdentity(); // スタックのクリア gluOrtho2D(0, GL_WIN_SIZE_X, GL_WIN_SIZE_Y, 0); // ワールド座標系を正規化デバイス座標系に平行投影(left, right, buttom, top, near, far) // ★平行投影する事によって,ポイントクラウドも平面に投影でき,クロマキーに最適 // Kinectの距離は約500〜9000まで使える(設定は10000) glMatrixMode(GL_MODELVIEW); // モデルビュー変換の行列の設定 glLoadIdentity(); glEnable(GL_TEXTURE_2D); // テクスチャマッピングの有効化 // テクスチャパラメータの設定と定義 glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP_SGIS, GL_TRUE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, g_nTexMapX, g_nTexMapY, 0, GL_RGB, GL_UNSIGNED_BYTE, g_pTexMap); // イメージデータ貼り付け // Display the OpenGL texture map glColor4f(1,1,1,1); // イメージデータの貼り付け glBegin(GL_QUADS); // 四角形の描画を行う { int nXRes = g_depthMD.FullXRes(); int nYRes = g_depthMD.FullYRes(); // 左上 glTexCoord2f(0, 0); glVertex2f(0, 0); // 座標指定 // 右上 glTexCoord2f((float)nXRes/(float)g_nTexMapX, 0); glVertex2f(GL_WIN_SIZE_X, 0); // 座標指定 // 右下 glTexCoord2f((float)nXRes/(float)g_nTexMapX, (float)nYRes/(float)g_nTexMapY); glVertex2f(GL_WIN_SIZE_X, GL_WIN_SIZE_Y); // 座標指定 // 左下 glTexCoord2f(0, (float)nYRes/(float)g_nTexMapY); glVertex2f(0, GL_WIN_SIZE_Y); // 座標指定 } glEnd(); glDisable(GL_TEXTURE_2D); // テクスチャマッピングの無効化 break; case DISPLAY_MODE_CHROMA: // ポイントクラウド描画モード case DISPLAY_MODE_POINT_CLOUD: // 投影変換 glMatrixMode(GL_PROJECTION); // 射影変換の行列の設定 glLoadIdentity(); // スタックのクリア glOrtho(0, KINECT_IMAGE_WIDTH, KINECT_IMAGE_HEIGHT, 0, -1.0, -KINECT_MAX_DEPTH - KINECT_VISIBLE_DELTA); // ワールド座標系を正規化デバイス座標系に平行投影(left, right, buttom, top, near, far) // ★平行投影する事によって,ポイントクラウドも平面に投影でき,クロマキーに最適 // Kinectの距離は約500〜9000まで使える(設定は10000) // 視野変換 gluLookAt( g_lokEyeX, g_lokEyeY, g_lokEyeZ, // 視点の位置(初期位置:(0,0,-1)) g_lokDirX, g_lokDirY, g_lokDirZ, // 視点先の位置(初期位置:(0,0,-2)) 0.0, 1.0, 0.0); // 向き // モデリング変換 glMatrixMode(GL_MODELVIEW); // モデルビュー変換の行列の設定 glLoadIdentity(); // スタックのクリア glEnable(GL_DEPTH_TEST); // 陰面処理の有効化 // ポイントクラウド表示 glPointSize(g_pointSize); // 点のサイズ drawPointCloud(g_pBackTex, g_pBackDepth, g_pPoint); //背景画像表示 //drawPointCloud(g_imageMD.RGB24Data(), g_depthMD.Data(), 10, g_chromaThresh); // 人物抜き出し(距離の閾値) drawPointCloudHuman(g_imageMD.RGB24Data(), g_depthMD.Data(), g_sceneMD.Data(), g_pPoint); // 人物抜き出し(動くものを検出) glDisable(GL_DEPTH_TEST); // 陰面処理の無効化 break; } }
//---------------------------------------------------- // テクスチャの設定 //---------------------------------------------------- void setTexture(void){ xnOSMemSet(g_pTexMap, 0, g_nTexMapX * g_nTexMapY * sizeof(XnRGB24Pixel)); // g_pTexMapの全てに0を代入 // 描画モード1か3 if (g_nViewState == DISPLAY_MODE_OVERLAY || g_nViewState == DISPLAY_MODE_IMAGE){ const XnRGB24Pixel* pImageRow = g_imageMD.RGB24Data(); // g_imageMDのポインタ取得(画像データ取得) XnRGB24Pixel* pTexRow = g_pTexMap + g_imageMD.YOffset() * g_nTexMapX; for (XnUInt y = 0; y < KINECT_IMAGE_HEIGHT; ++ y){ const XnRGB24Pixel* pImage = pImageRow; XnRGB24Pixel* pTex = pTexRow + g_imageMD.XOffset(); for (XnUInt x = 0; x < KINECT_IMAGE_WIDTH; ++ x, ++ pImage, ++ pTex){ *pTex = *pImage; } pImageRow += g_imageMD.XRes(); pTexRow += g_nTexMapX; } } // 描画モード1か2 if (g_nViewState == DISPLAY_MODE_OVERLAY || g_nViewState == DISPLAY_MODE_DEPTH){ const XnDepthPixel* pDepthRow = g_depthMD.Data(); XnRGB24Pixel* pTexRow = g_pTexMap + g_depthMD.YOffset() * g_nTexMapX; const XnLabel* pLabel = g_sceneMD.Data(); for (XnUInt y = 0; y < KINECT_IMAGE_HEIGHT; ++ y){ const XnDepthPixel* pDepth = pDepthRow; XnRGB24Pixel* pTex = pTexRow + g_depthMD.XOffset(); for (XnUInt x = 0; x < KINECT_IMAGE_WIDTH; ++ x, ++ pDepth, ++ pTex, ++ pLabel){ int nHistValue = g_pDepthHist[*pDepth]; if(*pLabel){ // 人物なら *pTex = userColor[*pLabel]; }else if (*pDepth != 0){ if(*pDepth < 1000){ *pTex = xnRGB24Pixel(nHistValue, 0, 0); // red }else if(*pDepth < 2000){ *pTex = xnRGB24Pixel(0, nHistValue, 0); // green }else if(*pDepth < 3000){ *pTex = xnRGB24Pixel(0, 0, nHistValue); // blue }else if(*pDepth < 4000){ *pTex = xnRGB24Pixel(nHistValue, nHistValue, 0); // 水色 }else if(*pDepth < 5000){ *pTex = xnRGB24Pixel(0, nHistValue, nHistValue); // yellow }else{ *pTex = xnRGB24Pixel(nHistValue, 0, nHistValue); // 紫 } } } pDepthRow += g_depthMD.XRes(); pTexRow += g_nTexMapX; } } // 描画モード4 //if (g_nViewState == DISPLAY_MODE_CHROMA){ // // イメージデータ(カメラ映像)貼り付け // const XnRGB24Pixel* pImageRow = g_imageMD.RGB24Data(); // g_imageMDのポインタ取得(画像データ取得) // XnRGB24Pixel* pTexRow = g_pTexMap + g_imageMD.YOffset() * g_nTexMapX; // for (XnUInt y = 0; y < KINECT_IMAGE_HEIGHT; ++ y){ // 480 // const XnRGB24Pixel* pImage = pImageRow; // XnRGB24Pixel* pTex = pTexRow + g_imageMD.XOffset(); // for (XnUInt x = 0; x < KINECT_IMAGE_WIDTH; ++ x, ++ pImage, ++ pTex){ // 640 // *pTex = *pImage; // } // pImageRow += g_imageMD.XRes(); // pTexRow += g_nTexMapX; // } // // デプスデータを用いた人物抜き出し + 背景合成 // const XnDepthPixel* pDepthRow = g_depthMD.Data(); // デプスデータのポインタ取得 // pTexRow = g_pTexMap + g_depthMD.YOffset() * g_nTexMapX; // GLuint g_backWidth = g_back.GetWidth(); // 背景の横幅の大きさ // GLubyte* pBackData = g_back.GetData() + g_back.GetImageSize() - 3 * g_backWidth; // 背景のポインタ取得(最後から見ていく) // for (XnUInt y = 0; y < KINECT_IMAGE_HEIGHT; ++ y){ // 480 // const XnDepthPixel* pDepth = pDepthRow; // デプスデータのポインタ取得 // XnRGB24Pixel* pTex = pTexRow + g_depthMD.XOffset(); // for (XnUInt x = 0; x < KINECT_IMAGE_WIDTH; ++ x, ++ pDepth, ++ pTex){ // 640 // // 深さが0か閾値以上なら背景画像を描画(閾値以下ならその部分を残す) // if (*pDepth == 0 || *pDepth >= g_chromaThresh){ // pTex->nRed = *pBackData; // pTex->nGreen = *(pBackData + 1); // pTex->nBlue = *(pBackData + 2); // } // pBackData += 3; // } // pDepthRow += g_depthMD.XRes(); // pTexRow += g_nTexMapX; // pBackData -= 2 * 3 * g_backWidth; // } //} }
//---------------------------------------------------- // 描画処理 //---------------------------------------------------- void glutDisplay (void){ xnFPSMarkFrame(&g_xnFPS); // FPSの計測開始? XnStatus rc = XN_STATUS_OK; // 更新されたノードを待つ(どれでもいい) rc = g_context.WaitAnyUpdateAll(); if (rc != XN_STATUS_OK){ printf("Read failed: %s\n", xnGetStatusString(rc)); printf("test\n"); return; } // イメージ・デプス・ユーザのデータを取得 g_image.GetMetaData(g_imageMD); g_depth.GetMetaData(g_depthMD); g_user.GetUserPixels(0, g_sceneMD); // カラー・デプスバッファをクリア glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // 設定 setDepthHistgram(g_depth, g_depthMD, g_pDepthHist); // ヒストグラムの計算・作成 setTexture(); // テクスチャ設定 // 描画 drawImage(); // イメージデータの描画 // デバッグモードの文字は描画の行列と隔離 glMatrixMode(GL_PROJECTION); // 射影変換の行列の設定 //glLoadIdentity(); // スタックのクリア glMatrixMode(GL_MODELVIEW); // モデルビュー変換の行列の設定 glLoadIdentity(); if(g_debugMode) glDebug(); // デバッグモード // 一度だけスクリーンショットをとる if(g_screenShotImageMode){ ostringstream fname; fname << OUT_IMAGE_PATH ;//出力ファイル名 std::string name = fname.str(); g_glScreenShot.screenshot(name.c_str(), 24); g_screenShotImageMode = !g_screenShotImageMode; // トグル } // 一度だけ深さデータを取得する if(g_screenShotDepthMode){ ofstream ofs(OUT_DEPTH_PATH); const XnDepthPixel* pDepth = g_depthMD.Data(); for (XnUInt y = 0; y < KINECT_IMAGE_HEIGHT; y ++){ for (XnUInt x = 0; x < KINECT_IMAGE_WIDTH; x ++, pDepth ++){ if(*pDepth < 2000){ ofs << (int)((*pDepth) * 2) << ','; }else{ ofs << (*pDepth) << ','; } } ofs << endl; } g_screenShotDepthMode = !g_screenShotDepthMode; // トグル } // Swap the OpenGL display buffers glutSwapBuffers(); }
// ----------------------------------------------------------------------------------------------------- // saveHistogramImage // ----------------------------------------------------------------------------------------------------- int saveHistogramImage( const XnRGB24Pixel* pImageMap, const XnDepthPixel* pDepthMap, IplImage* pImgDepth, int frameID) { static float depthHistogram[MAX_DEPTH_HISTOGRAM]; // Calculate the accumulative histogram (the yellow display...) const XnDepthPixel* pDepth = g_depthMD.Data(); xnOSMemSet(depthHistogram, 0, MAX_DEPTH_HISTOGRAM*sizeof(float)); unsigned int nNumberOfPoints = 0; // count depth values for (XnUInt y = 0; y < g_depthMD.YRes(); ++y) { for (XnUInt x = 0; x < g_depthMD.XRes(); ++x, ++pDepth) { if (*pDepth != 0) { depthHistogram[*pDepth]++; nNumberOfPoints++; } } } // cumulative sum for (int nIndex=1; nIndex<MAX_DEPTH_HISTOGRAM; nIndex++) { depthHistogram[nIndex] += depthHistogram[nIndex-1]; } // rescale to 0..256 if (nNumberOfPoints) { for (int nIndex=1; nIndex<MAX_DEPTH_HISTOGRAM; nIndex++) { depthHistogram[nIndex] = (unsigned int)(256 * (1.0f - (depthHistogram[nIndex] / nNumberOfPoints))); } } // generate histogram depth image int i = 0; pDepth = g_depthMD.Data(); for (XnUInt y = 0; y < g_depthMD.YRes(); ++y) { for (XnUInt x = 0; x < g_depthMD.XRes(); ++x, ++pDepth, ++i) { unsigned char nHistValue = 0; if (*pDepth != 0) nHistValue = depthHistogram[*pDepth]; // yellow pixels pImgDepth->imageData[3*i+0] = 0; //Blue pImgDepth->imageData[3*i+1] = nHistValue; //Green pImgDepth->imageData[3*i+2] = nHistValue; //Red } } if (frameID<0) frameID = g_depthMD.FrameID(); // use ID given by Kinect char bufFilename[256]; sprintf(bufFilename,"%s/frame_%d_histo.bmp", Config::_PathFrameSequence.c_str(), frameID); cvSaveImage(bufFilename, pImgDepth); }
int Init() { XnStatus rc; //Make sure our image types are the same as the OpenNI image types. assert(sizeof(XnRGB24Pixel) == sizeof(ColorPixel)); assert(sizeof(XnDepthPixel) == sizeof(DepthPixel)); assert(sizeof(XnStatus) == sizeof(int)); // Load OpenNI xml settings char filePath[255]; int length = Util::Helpers::GetExeDirectory(filePath, sizeof(filePath)); filePath[length] = '\\'; strcpy(&filePath[length+1], SAMPLE_XML_PATH); EnumerationErrors errors; rc = deviceContext.InitFromXmlFile(filePath, &errors); if (rc == XN_STATUS_NO_NODE_PRESENT) { //One reason would be if Microsoft SDK is installed beside PrimeSense. Device manager should say PrimeSense instead of Microsoft Kinect. //XnChar strError[1024]; //errors.ToString(strError, 1024); //LOGE("%s\n", strError); return -1; } else if (rc != XN_STATUS_OK) { fprintf(stderr, "%s\n", xnGetStatusString(rc)); /*LOGE("Open failed: %s\n", xnGetStatusString(rc));*/ return (rc); } // Retrieve colour and depth nodes rc = deviceContext.FindExistingNode(XN_NODE_TYPE_IMAGE, colorImageGenerator); rc = deviceContext.FindExistingNode(XN_NODE_TYPE_DEPTH, depthImageGenerator); // Set mirror mode to off SetMirrorMode(false); // Get a frame to perform checks on it ImageMetaData colorImageMetaData; DepthMetaData depthImageMetaData; depthImageGenerator.GetMetaData(depthImageMetaData); colorImageGenerator.GetMetaData(colorImageMetaData); // Hybrid mode isn't supported in this sample if (colorImageMetaData.FullXRes() != depthImageMetaData.FullXRes() || colorImageMetaData.FullYRes() != depthImageMetaData.FullYRes()) { /*LOGE("The device depth and image resolution must be equal!\n");*/ return 1; } // RGB is the only image format supported. if (colorImageMetaData.PixelFormat() != XN_PIXEL_FORMAT_RGB24) { /*LOGE("The device image format must be RGB24\n");*/ return 1; } // Need to make sure the automatic alignment of colour and depth images is supported. XnBool isSupported = depthImageGenerator.IsCapabilitySupported("AlternativeViewPoint"); if(!isSupported) { /*LOGE("Cannot set AlternativeViewPoint!\n");*/ return 1; } // Set it to VGA maps at 30 FPS /*XnMapOutputMode mapMode; mapMode.nXRes = XN_VGA_X_RES; mapMode.nYRes = XN_VGA_Y_RES; mapMode.nFPS = 60; rc = g_depth.SetMapOutputMode(mapMode); if(rc) { LOGE("Failed to set depth map mode: %s\n", xnGetStatusString(rc)); return 1; } mapMode.nFPS = 30; rc = g_image.SetMapOutputMode(mapMode); if(rc) { LOGE("Failed to set image map mode: %s\n", xnGetStatusString(rc)); return 1; }*/ // Set automatic alignment of the colour and depth images. rc = depthImageGenerator.GetAlternativeViewPointCap().SetViewPoint(colorImageGenerator); if(rc) { /*LOGE("Failed to set depth map mode: %s\n", xnGetStatusString(rc));*/ return 1; } return XN_STATUS_OK; }