// ----------------------------------------------------------------------------------------------------- // generateFrame // ----------------------------------------------------------------------------------------------------- bool CameraDevice::generateFrame(IplImage* imgRGB, IplImage* imgDepth) { XnStatus nRetVal = XN_STATUS_OK; const XnDepthPixel* pDepthMap = NULL; const XnRGB24Pixel* pImageMap = NULL; xnFPSMarkFrame(&g_xnFPS); nRetVal = g_context.WaitAndUpdateAll(); if (nRetVal==XN_STATUS_OK) { g_depth.GetMetaData(g_depthMD); g_image.GetMetaData(g_imageMD); pDepthMap = g_depthMD.Data(); pImageMap = g_image.GetRGB24ImageMap(); printf("Frame %02d (%dx%d) Depth at middle point: %u. FPS: %f\r", g_depthMD.FrameID(), g_depthMD.XRes(), g_depthMD.YRes(), g_depthMD(g_depthMD.XRes()/2, g_depthMD.YRes()/2), xnFPSCalc(&g_xnFPS)); // convert to OpenCV buffers convertImageRGB(pImageMap, imgRGB); convertImageDepth(pDepthMap, imgDepth); return true; } return false; }
void ImageGolographicThread :: run (void) { ImageGenerator* generator = new ImageGenerator(imageData);//, this); generator->loadModel(); connect (generator, SIGNAL (imageVal (int)), this, SLOT (setPVal (int)) ); resD = generator->generateImages(); delete generator; emit getImagesData (resD); }
int main() { FramesTransmitter framesTransmitter; ImageGenerator imageGenerator; int imageWidth, imageHeight, imageBPP; imageGenerator.GetImageParams(imageWidth, imageHeight, imageBPP); framesTransmitter.Init("127.0.0.1", "5541", imageWidth, imageHeight, imageBPP); //for (int i = 0; i < 100; ++i) //{ // unsigned char * buff = imageGenerator.GenerateImage(); // framesTransmitter.Transmit(buff); // // save image on HDD // bitmap_image image(imageWidth, imageHeight); // int sizeToCopy = sizeof(char) * imageWidth * imageHeight * imageBPP / 8; // std::copy(buff, buff + sizeToCopy, image.data()); // char filePath[200]; // sprintf(filePath, "D:/eclipse_workspace/FramesTransmitter/FramesToSend/img%04d.bmp", i); // image.save_image(filePath); //} int frameCounter = 0; int channels = imageBPP / 8; IplImage * image = cvCreateImage(cvSize(imageWidth, imageHeight), IPL_DEPTH_8U, channels); while(cvWaitKey(40) != 113) // press 'q' { unsigned char * buff = imageGenerator.GenerateImage(); int sizeToCopy = sizeof(char) * imageWidth * imageHeight * channels; std::copy(buff, buff + sizeToCopy, image -> imageData); char str[100]; sprintf(str,"[%04d]", frameCounter++ ); CvFont font; cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 2, 8); cvPutText(image, str, cvPoint(10,40), &font, cvScalar(255,255,255)); cvShowImage("Display window", image); framesTransmitter.Transmit((unsigned char*) image -> imageData); } cvDestroyWindow("Display window"); cvReleaseImage(&image); return 0; }
void seekFrame(int nDiff) { XnStatus nRetVal = XN_STATUS_OK; if (isPlayerOn()) { const XnChar* strNodeName = NULL; if (g_pPrimary != NULL) { strNodeName = g_pPrimary->GetName(); } else if (g_Depth.IsValid()) { strNodeName = g_Depth.GetName(); } else if (g_Image.IsValid()) { strNodeName = g_Image.GetName(); } else if (g_IR.IsValid()) { strNodeName = g_IR.GetName(); } else if (g_Audio.IsValid()) { strNodeName = g_Audio.GetName(); } nRetVal = g_Player.SeekToFrame(strNodeName, nDiff, XN_PLAYER_SEEK_CUR); if (nRetVal != XN_STATUS_OK) { displayMessage("Failed to seek: %s", xnGetStatusString(nRetVal)); return; } XnUInt32 nFrame = 0; XnUInt32 nNumFrames = 0; nRetVal = g_Player.TellFrame(strNodeName, nFrame); if (nRetVal != XN_STATUS_OK) { displayMessage("Failed to tell frame: %s", xnGetStatusString(nRetVal)); return; } nRetVal = g_Player.GetNumFrames(strNodeName, nNumFrames); if (nRetVal != XN_STATUS_OK) { displayMessage("Failed to get number of frames: %s", xnGetStatusString(nRetVal)); return; } displayMessage("Seeked %s to frame %u/%u", strNodeName, nFrame, nNumFrames); } }
// Gets the colour and depth data from the Kinect sensor. bool GetColorAndDepthImages(ColorImage& colorImage, DepthImage& depthImage) { XnStatus rc = XN_STATUS_OK; // Read a new frame, blocking operation rc = deviceContext.WaitAnyUpdateAll(); if (rc != XN_STATUS_OK) { /*LOGE("Read failed: %s\n", xnGetStatusString(rc));*/ throw rc; } // Get handles to new data static ImageMetaData colorImageMetaData; static DepthMetaData depthImageMetaData; colorImageGenerator.GetMetaData(colorImageMetaData); depthImageGenerator.GetMetaData(depthImageMetaData); // Validate images if (!depthImageGenerator.IsValid() || !colorImageGenerator.IsValid()) { /*LOGE("Error: Color or depth image is invalid.");*/ throw 1; } if (colorImageMetaData.Timestamp() <= mostRecentRGB) return false; // Fetch pointers to data const XnRGB24Pixel* pColorImage = colorImageMetaData.RGB24Data(); //g_depth.GetRGB24ImageMap() const XnDepthPixel* pDepthImage = depthImageMetaData.Data();// g_depth.GetDepthMap(); // Copy data over to arrays memcpy(colorImage.data, pColorImage, sizeof(colorImage.data)); memcpy(depthImage.data, pDepthImage, sizeof(depthImage.data)); colorImage.rows = colorImage.maxRows; colorImage.cols = colorImage.maxCols; depthImage.rows = depthImage.maxRows; depthImage.cols = depthImage.maxCols; mostRecentRGB = colorImageMetaData.Timestamp(); return true; }
// Updates to the latest image obtained from the Kinect int kinectUpdate(void) { XnStatus nRetVal = context.WaitAndUpdateAll(); g_image.GetMetaData(g_imageMD); //nRetVal = context.WaitOneUpdateAll(depth); depth.GetMetaData(depthMD); return nRetVal; }
void closeDevice() { g_Player.Release(); g_Device.Release(); g_Depth.Release(); g_Image.Release(); g_IR.Release(); g_Audio.Release(); g_scriptNode.Release(); g_Context.Release(); }
void CleanUpExit() { recorder.Release(); g_player.Release(); g_image.Release(); g_scriptNode.Release(); g_context.Release(); g_hands.Release(); g_gesture.Release(); free(g_pTexMap); exit(1); }
void readFrame() { if (!g_Depth.IsValid() && !g_Image.IsValid() && !g_IR.IsValid() && !g_Audio.IsValid()) // @@@dded return; XnStatus rc = XN_STATUS_OK; if (g_pPrimary != NULL) { rc = g_Context.WaitOneUpdateAll(*g_pPrimary); } else { rc = g_Context.WaitAnyUpdateAll(); } if (rc != XN_STATUS_OK) { printf("Error: %s\n", xnGetStatusString(rc)); } if (g_Depth.IsValid()) { g_Depth.GetMetaData(g_DepthMD); } if (g_Image.IsValid()) { g_Image.GetMetaData(g_ImageMD); } if (g_IR.IsValid()) { g_IR.GetMetaData(g_irMD); } if (g_Audio.IsValid()) { g_Audio.GetMetaData(g_AudioMD); } }
// Set up OpenNI to obtain 8-bit mono images from the Kinect's RGB camera int kinectInit(void) { XnStatus nRetVal = XN_STATUS_OK; ScriptNode scriptNode; EnumerationErrors errors; printf("Reading config from: '%s'\n", SAMPLE_XML_PATH_LOCAL); nRetVal = context.InitFromXmlFile(SAMPLE_XML_PATH_LOCAL, scriptNode, &errors); nRetVal = context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image); //g_image.SetPixelFormat(XN_PIXEL_FORMAT_GRAYSCALE_8_BIT); g_image.SetPixelFormat(XN_PIXEL_FORMAT_RGB24); g_image.GetMetaData(g_imageMD); nRetVal = context.FindExistingNode(XN_NODE_TYPE_DEPTH, depth); depth.GetMetaData(depthMD); // nRetVal = depth.GetAlternativeViewPointCap().SetViewPoint(g_image); //nRetVal = depth.GetFrameSyncCap().FrameSyncWith(g_image); return nRetVal; }
void captureRGB(unsigned char* g_ucImageBuffer) { ImageMetaData imd; _image.GetMetaData(imd); unsigned int nValue = 0; unsigned int nX = 0; unsigned int nY = 0; XnUInt16 g_nXRes = imd.XRes(); XnUInt16 g_nYRes = imd.YRes(); const XnRGB24Pixel * pImageMap = _image.GetRGB24ImageMap(); for (nY=0; nY<g_nYRes; nY++) { for (nX=0; nX < g_nXRes; nX++) { ((unsigned char*)g_ucImageBuffer)[(nY*g_nXRes+nX)*4+0] = pImageMap[nY*g_nXRes+nX].nBlue; ((unsigned char*)g_ucImageBuffer)[(nY*g_nXRes+nX)*4+1] = pImageMap[nY*g_nXRes+nX].nGreen; ((unsigned char*)g_ucImageBuffer)[(nY*g_nXRes+nX)*4+2] = pImageMap[nY*g_nXRes+nX].nRed; ((unsigned char*)g_ucImageBuffer)[(nY*g_nXRes+nX)*4+3] = 0x00; } } }
void changeRegistration(int nValue) { if (!g_Depth.IsValid() || !g_Depth.IsCapabilitySupported(XN_CAPABILITY_ALTERNATIVE_VIEW_POINT)) { return; } if (!nValue) { g_Depth.GetAlternativeViewPointCap().ResetViewPoint(); } else if (g_Image.IsValid()) { g_Depth.GetAlternativeViewPointCap().SetViewPoint(g_Image); } }
XnStatus prepare(char useScene, char useDepth, char useImage, char useIr, char useHistogram) { //TODO handle possible failures! Gotcha! if (useDepth) { mDepthGen.GetMetaData(depthMD); nXRes = depthMD.XRes(); nYRes = depthMD.YRes(); pDepth = depthMD.Data(); if (useHistogram) { calcHist(); // rewind the pointer pDepth = depthMD.Data(); } } if (useScene) { mUserGen.GetUserPixels(0, sceneMD); nXRes = sceneMD.XRes(); nYRes = sceneMD.YRes(); pLabels = sceneMD.Data(); } if (useImage) { mImageGen.GetMetaData(imageMD); nXRes = imageMD.XRes(); nYRes = imageMD.YRes(); pRGB = imageMD.RGB24Data(); // HISTOGRAM????? } if (useIr) { mIrGen.GetMetaData(irMD); nXRes = irMD.XRes(); nYRes = irMD.YRes(); pIR = irMD.Data(); // HISTOGRAM???? } }
void takePhoto() { static int index = 1; char fname[256] = {0,}; sprintf(fname, "kinect%03d.txt", index++); g_depth.GetMetaData(g_depthMD); g_image.GetMetaData(g_imageMD); int const nx = g_depthMD.XRes(); int const ny = g_depthMD.YRes(); assert(nx == g_imageMD.XRes()); assert(ny == g_imageMD.YRes()); const XnDepthPixel* pDepth = g_depthMD.Data(); const XnUInt8* pImage = g_imageMD.Data(); FILE * file = fopen(fname, "wb"); fprintf(file, "%d\n%d\n\n", nx, ny); for (int y = 0, di = 0, ri = 0, gi = 1, bi = 2; y < ny; y++) { for (int x = 0; x < nx; x++, di++, ri += 3, gi += 3, bi += 3) { int const r = pImage[ri]; int const g = pImage[gi]; int const b = pImage[bi]; int const d = pDepth[di]; assert(r >= 0); assert(g >= 0); assert(b >= 0); assert(d >= 0); assert(r <= 0xFF); assert(g <= 0xFF); assert(b <= 0xFF); assert(d <= 0xFFFF); fprintf(file, "%3d %3d %3d %5d\n", r, g, b, d); } fprintf(file, "\n"); } fflush(file); fclose(file); }
bool getImageCoordinatesForDepthPixel(int x, int y, int& imageX, int& imageY) { if (!g_Depth.IsValid()) return false; // no depth if (!g_Image.IsValid()) return false; // no image if (!g_Depth.IsCapabilitySupported(XN_CAPABILITY_ALTERNATIVE_VIEW_POINT)) return false; XnUInt32 altX; XnUInt32 altY; if (XN_STATUS_OK != g_Depth.GetAlternativeViewPointCap().GetPixelCoordinatesInViewPoint(g_Image, x, y, altX, altY)) return false; imageX = (int)altX; imageY = (int)altY; return true; }
/* * Class: org_OpenNI_Samples_Assistant_NativeMethods * Method: dispose * Signature: ()I */ JNIEXPORT jint JNICALL Java_org_OpenNI_Samples_Assistant_NativeMethods_dispose (JNIEnv *, jclass) { LOGD("dispose_start"); disposeGraphics(); mUserGen.Release(); hasUserGen = 0; mDepthGen.Release(); hasDepthGen = 0; mImageGen.Release(); hasUserGen = 0; mIrGen.Release(); hasIrGen = 0; mContext->Release(); delete mContext; mContext = 0; LOGD("dispose_end"); return XN_STATUS_OK; }
void glutDisplay (void){ glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Setup the OpenGL viewpoint glMatrixMode(GL_PROJECTION); glPushMatrix(); glLoadIdentity(); SceneMetaData sceneMD; DepthMetaData depthMD; ImageMetaData imageMD; g_DepthGenerator.GetMetaData(depthMD); glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0); glDisable(GL_TEXTURE_2D); //XnStatus rc = g_Context.WaitOneUpdateAll(g_DepthGenerator); XnStatus rc = g_Context.WaitAnyUpdateAll(); CHECK_RC("Wait Data",rc); g_DepthGenerator.GetMetaData(depthMD); if(g_UserGenerator.IsValid()) g_UserGenerator.GetUserPixels(0, sceneMD); g_ImageGenerator.GetMetaData(imageMD); DrawDepthMap(depthMD, sceneMD); DrawImage(imageMD); glutSwapBuffers(); }//glutdisplay
XnStatus Init_Kinect(EventOutSFNode* skltn,EventOutSFNode* hnz,EventOutSFNode* flr){ XnStatus rc=XN_STATUS_OK; EnumerationErrors errors; DepthMetaData g_depthMD; ImageMetaData g_imageMD; rc = g_context.InitFromXmlFile(SAMPLE_XML_PATH, &errors); CHECK_RC(rc, "InitFromXml"); rc = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth); CHECK_RC(rc,"XN_NODE_TYPE_DEPTH"); rc = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image); CHECK_RC(rc,"XN_NODE_TYPE_IMAGE"); rc= g_context.FindExistingNode(XN_NODE_TYPE_USER,g_user); CHECK_RC(rc,"XN_NODE_TYPE_USER"); rc=g_context.FindExistingNode(XN_NODE_TYPE_SCENE,g_scene); CHECK_RC(rc,"XN_NODE_TYPE_SCENE"); rc=g_context.FindExistingNode(XN_NODE_TYPE_HANDS,g_hands); CHECK_RC(rc,"XN_NODE_TYPE_HANDS"); rc=g_context.FindExistingNode(XN_NODE_TYPE_GESTURE,g_gesture); CHECK_RC(rc,"XN_NODE_TYPE_GESTURE"); g_depth.GetMetaData(g_depthMD); g_fps=g_depthMD.FPS(); g_image.GetMetaData(g_imageMD); rc=init_skeleton(); CHECK_RC(rc,"INIT SKELETON"); rc=init_hands(); CHECK_RC(rc,"INIT HANDS"); pix_w=g_depthMD.FullXRes(); pix_h=g_depthMD.FullYRes(); if(pix_h==0||pix_w==0){return XN_STATUS_ERROR;} g_skltn=skltn; g_hnz=hnz; g_flr=flr; if(NULL==g_skltn||NULL==g_hands||NULL==g_flr)return XN_STATUS_ERROR; isInit=true; return rc; }
void glutDisplay (void) { XnStatus rc = XN_STATUS_OK; // Read a new frame rc = g_context.WaitAnyUpdateAll(); if (rc != XN_STATUS_OK) { printf("Read failed: %s\n", xnGetStatusString(rc)); return; } g_depth.GetMetaData(g_depthMD); g_image.GetMetaData(g_imageMD); const XnDepthPixel* pDepth = g_depthMD.Data(); const XnUInt8* pImage = g_imageMD.Data(); unsigned int nImageScale = GL_WIN_SIZE_X / g_depthMD.FullXRes(); // Copied from SimpleViewer // Clear the OpenGL buffers glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Setup the OpenGL viewpoint glMatrixMode(GL_PROJECTION); glPushMatrix(); glLoadIdentity(); glOrtho(0, GL_WIN_SIZE_X, GL_WIN_SIZE_Y, 0, -1.0, 1.0); // Calculate the accumulative histogram (the yellow display...) xnOSMemSet(g_pDepthHist, 0, MAX_DEPTH*sizeof(float)); unsigned int nNumberOfPoints = 0; for (XnUInt y = 0; y < g_depthMD.YRes(); ++y) { for (XnUInt x = 0; x < g_depthMD.XRes(); ++x, ++pDepth) { if (*pDepth != 0) { g_pDepthHist[*pDepth]++; nNumberOfPoints++; } } } for (int nIndex=1; nIndex<MAX_DEPTH; nIndex++) { g_pDepthHist[nIndex] += g_pDepthHist[nIndex-1]; } if (nNumberOfPoints) { for (int nIndex=1; nIndex<MAX_DEPTH; nIndex++) { g_pDepthHist[nIndex] = (unsigned int)(256 * (1.0f - (g_pDepthHist[nIndex] / nNumberOfPoints))); } } xnOSMemSet(g_pTexMap, 0, g_nTexMapX*g_nTexMapY*sizeof(XnRGB24Pixel)); // check if we need to draw image frame to texture if (g_nViewState == DISPLAY_MODE_OVERLAY || g_nViewState == DISPLAY_MODE_IMAGE) { const XnRGB24Pixel* pImageRow = g_imageMD.RGB24Data(); XnRGB24Pixel* pTexRow = g_pTexMap + g_imageMD.YOffset() * g_nTexMapX; for (XnUInt y = 0; y < g_imageMD.YRes(); ++y) { const XnRGB24Pixel* pImage = pImageRow; XnRGB24Pixel* pTex = pTexRow + g_imageMD.XOffset(); for (XnUInt x = 0; x < g_imageMD.XRes(); ++x, ++pImage, ++pTex) { *pTex = *pImage; } pImageRow += g_imageMD.XRes(); pTexRow += g_nTexMapX; } } // check if we need to draw depth frame to texture if (g_nViewState == DISPLAY_MODE_OVERLAY || g_nViewState == DISPLAY_MODE_DEPTH) { const XnDepthPixel* pDepthRow = g_depthMD.Data(); XnRGB24Pixel* pTexRow = g_pTexMap + g_depthMD.YOffset() * g_nTexMapX; for (XnUInt y = 0; y < g_depthMD.YRes(); ++y) { const XnDepthPixel* pDepth = pDepthRow; XnRGB24Pixel* pTex = pTexRow + g_depthMD.XOffset(); for (XnUInt x = 0; x < g_depthMD.XRes(); ++x, ++pDepth, ++pTex) { if (*pDepth != 0) { int nHistValue = g_pDepthHist[*pDepth]; pTex->nRed = nHistValue; pTex->nGreen = nHistValue; pTex->nBlue = 0; } } pDepthRow += g_depthMD.XRes(); pTexRow += g_nTexMapX; } } // Create the OpenGL texture map glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP_SGIS, GL_TRUE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, g_nTexMapX, g_nTexMapY, 0, GL_RGB, GL_UNSIGNED_BYTE, g_pTexMap); // Display the OpenGL texture map glColor4f(1,1,1,1); glBegin(GL_QUADS); int nXRes = g_depthMD.FullXRes(); int nYRes = g_depthMD.FullYRes(); // upper left glTexCoord2f(0, 0); glVertex2f(0, 0); // upper right glTexCoord2f((float)nXRes/(float)g_nTexMapX, 0); glVertex2f(GL_WIN_SIZE_X, 0); // bottom right glTexCoord2f((float)nXRes/(float)g_nTexMapX, (float)nYRes/(float)g_nTexMapY); glVertex2f(GL_WIN_SIZE_X, GL_WIN_SIZE_Y); // bottom left glTexCoord2f(0, (float)nYRes/(float)g_nTexMapY); glVertex2f(0, GL_WIN_SIZE_Y); glEnd(); // Swap the OpenGL display buffers glutSwapBuffers(); }
int main(int argc, char* argv[]) { XnStatus rc; EnumerationErrors errors; rc = g_context.InitFromXmlFile(SAMPLE_XML_PATH, &errors); if (rc == XN_STATUS_NO_NODE_PRESENT) { XnChar strError[1024]; errors.ToString(strError, 1024); printf("%s\n", strError); return (rc); } else if (rc != XN_STATUS_OK) { printf("Open failed: %s\n", xnGetStatusString(rc)); return (rc); } rc = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth); rc = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image); g_depth.GetMetaData(g_depthMD); g_image.GetMetaData(g_imageMD); // Hybrid mode isn't supported in this sample if (g_imageMD.FullXRes() != g_depthMD.FullXRes() || g_imageMD.FullYRes() != g_depthMD.FullYRes()) { printf ("The device depth and image resolution must be equal!\n"); return 1; } // RGB is the only image format supported. if (g_imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24) { printf("The device image format must be RGB24\n"); return 1; } // Texture map init g_nTexMapX = (((unsigned short)(g_depthMD.FullXRes()-1) / 512) + 1) * 512; g_nTexMapY = (((unsigned short)(g_depthMD.FullYRes()-1) / 512) + 1) * 512; g_pTexMap = (XnRGB24Pixel*)malloc(g_nTexMapX * g_nTexMapY * sizeof(XnRGB24Pixel)); // OpenGL init glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH); glutInitWindowSize(GL_WIN_SIZE_X, GL_WIN_SIZE_Y); glutCreateWindow ("OpenNI Simple Viewer"); glutFullScreen(); glutSetCursor(GLUT_CURSOR_NONE); glutKeyboardFunc(glutKeyboard); glutDisplayFunc(glutDisplay); glutIdleFunc(glutIdle); glDisable(GL_DEPTH_TEST); glEnable(GL_TEXTURE_2D); // Per frame code is in glutDisplay glutMainLoop(); return 0; }
const ImageMetaData* getImageMetaData() { return g_Image.IsValid() ? &g_ImageMD : NULL; }
ImageGenerator* getImageGenerator() { return g_Image.IsValid() ? &g_Image : NULL; }
int main(int argc, char *argv[]) { //--------------------------------------------------------------------// //------------------------- SETUP REQUIRED NODES ---------------------// //--------------------------------------------------------------------// // Setup the command line parameters. setupParams(argc, argv); // Setup all the sockets. setupSockets(); // Setup the capture socket server for Mac. #if (XN_PLATFORM == XN_PLATFORM_MACOSX) if(_featureDepthMapCapture || _featureRGBCapture) { if(_useSockets) { g_AS3Network = network(); g_AS3Network.init(setupServer); } } #endif // Setup the status. XnStatus _status = XN_STATUS_OK; EnumerationErrors _errors; // Context Init and Add license. _status = _context.Init(); CHECK_RC(_status, "AS3OpenNI :: Initialize context"); _context.SetGlobalMirror(_mirror); XnChar vendor[XN_MAX_NAME_LENGTH]; XnChar license[XN_MAX_LICENSE_LENGTH]; _license.strVendor[XN_MAX_NAME_LENGTH] = strcmp(vendor, "PrimeSense"); _license.strKey[XN_MAX_LICENSE_LENGTH] = strcmp(license, "0KOIk2JeIBYClPWVnMoRKn5cdY4="); _status = _context.AddLicense(_license); CHECK_RC(_status, "AS3OpenNI :: Added license"); // Set it to VGA maps at 30 FPS _depthMode.nXRes = 640; _depthMode.nYRes = 480; _depthMode.nFPS = 30; // Depth map create. _status = _depth.Create(_context); CHECK_RC(_status, "AS3OpenNI :: Create depth generator"); _status = _depth.SetMapOutputMode(_depthMode); // Depth map create. _status = _image.Create(_context); CHECK_RC(_status, "AS3OpenNI :: Create image generator"); _status = _image.SetMapOutputMode(_depthMode); _status = _image.SetPixelFormat(XN_PIXEL_FORMAT_RGB24); // Create the hands generator. _status = _hands.Create(_context); CHECK_RC(_status, "AS3OpenNI :: Create hands generator"); _hands.SetSmoothing(0.1); // Create the gesture generator. _status = _gesture.Create(_context); CHECK_RC(_status, "AS3OpenNI :: Create gesture generator"); // Create user generator. _status = _userGenerator.Create(_context); CHECK_RC(_status, "AS3OpenNI :: Find user generator"); // Create and initialize point tracker _sessionManager = new XnVSessionManager(); _status = _sessionManager->Initialize(&_context, "Wave", "RaiseHand"); if (_status != XN_STATUS_OK) { printf("AS3OpenNI :: Couldn't initialize the Session Manager: %s\n", xnGetStatusString(_status)); CleanupExit(); } _sessionManager->RegisterSession(NULL, &SessionStart, &SessionEnd, &SessionProgress); // Start catching signals for quit indications CatchSignals(&_quit); //---------------------------------------------------------------// //------------------------- SETUP FEATURES ---------------------// //--------------------------------------------------------------// // Define the Wave and SinglePoint detectors. _waveDetector = new XnVWaveDetector(); // SinglePoint detector. if(_featureSinglePoint) _waveDetector->RegisterPointUpdate(NULL, &OnPointUpdate); // Feature Gesture. if(_featureGesture) { // Wave detector. _waveDetector->RegisterWave(NULL, &OnWave); // Push detector. _pushDetector = new XnVPushDetector(); _pushDetector->RegisterPush(NULL, &onPush); // Swipe detector. _swipeDetector = new XnVSwipeDetector(); _swipeDetector->RegisterSwipeUp(NULL, &Swipe_SwipeUp); _swipeDetector->RegisterSwipeDown(NULL, &Swipe_SwipeDown); _swipeDetector->RegisterSwipeLeft(NULL, &Swipe_SwipeLeft); _swipeDetector->RegisterSwipeRight(NULL, &Swipe_SwipeRight); // Steady detector. _steadyDetector = new XnVSteadyDetector(); _steadyDetector->RegisterSteady(NULL, &Steady_OnSteady); } // Feature Circle. if(_featureCircle) { // Circle detector. _circleDetector = new XnVCircleDetector(); _circleDetector->RegisterCircle(NULL, &CircleCB); _circleDetector->RegisterNoCircle(NULL, &NoCircleCB); _circleDetector->RegisterPrimaryPointCreate(NULL, &Circle_PrimaryCreate); _circleDetector->RegisterPrimaryPointDestroy(NULL, &Circle_PrimaryDestroy); } // Feature Slider. if(_featureSlider) { // Left/Right slider. _leftRightSlider = new XnVSelectableSlider1D(3, 0, AXIS_X); _leftRightSlider->RegisterActivate(NULL, &LeftRightSlider_OnActivate); _leftRightSlider->RegisterDeactivate(NULL, &LeftRightSlider_OnDeactivate); _leftRightSlider->RegisterPrimaryPointCreate(NULL, &LeftRightSlider_OnPrimaryCreate); _leftRightSlider->RegisterPrimaryPointDestroy(NULL, &LeftRightSlider_OnPrimaryDestroy); _leftRightSlider->RegisterValueChange(NULL, &LeftRightSlider_OnValueChange); _leftRightSlider->SetValueChangeOnOffAxis(false); // Up/Down slider. _upDownSlider = new XnVSelectableSlider1D(3, 0, AXIS_Y); _upDownSlider->RegisterActivate(NULL, &UpDownSlider_OnActivate); _upDownSlider->RegisterDeactivate(NULL, &UpDownSlider_OnDeactivate); _upDownSlider->RegisterPrimaryPointCreate(NULL, &UpDownSlider_OnPrimaryCreate); _upDownSlider->RegisterPrimaryPointDestroy(NULL, &UpDownSlider_OnPrimaryDestroy); _upDownSlider->RegisterValueChange(NULL, &UpDownSlider_OnValueChange); _upDownSlider->SetValueChangeOnOffAxis(false); // In/Out slider. _inOutSlider = new XnVSelectableSlider1D(3, 0, AXIS_Z); _inOutSlider->RegisterActivate(NULL, &InOutSlider_OnActivate); _inOutSlider->RegisterDeactivate(NULL, &InOutSlider_OnDeactivate); _inOutSlider->RegisterPrimaryPointCreate(NULL, &InOutSlider_OnPrimaryCreate); _inOutSlider->RegisterPrimaryPointDestroy(NULL, &InOutSlider_OnPrimaryDestroy); _inOutSlider->RegisterValueChange(NULL, &InOutSlider_OnValueChange); _inOutSlider->SetValueChangeOnOffAxis(false); } // Feature TrackPad. if(_featureTrackPad) { // Track Pad. if(trackpad_columns > 0 && trackpad_rows > 0) { _trackPad = new XnVSelectableSlider2D(trackpad_columns, trackpad_rows); } else { _trackPad = new XnVSelectableSlider2D(4, 9); } _trackPad->RegisterItemHover(NULL, &TrackPad_ItemHover); _trackPad->RegisterItemSelect(NULL, &TrackPad_ItemSelect); _trackPad->RegisterPrimaryPointCreate(NULL, &TrackPad_PrimaryCreate); _trackPad->RegisterPrimaryPointDestroy(NULL, &TrackPad_PrimaryDestroy); } // Feature User Tracking. if(_featureUserTracking) { // Setup user generator callbacks. XnCallbackHandle hUserCallbacks, hCalibrationCallbacks, hPoseCallbacks; if (!_userGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON)) { printf("AS3OpenNI :: Supplied user generator doesn't support skeleton\n"); return 1; } _userGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks); // Setup Skeleton detection. _userGenerator.GetSkeletonCap().RegisterCalibrationCallbacks(UserCalibration_CalibrationStart, UserCalibration_CalibrationEnd, NULL, hCalibrationCallbacks); if (_userGenerator.GetSkeletonCap().NeedPoseForCalibration()) { _needPose = true; if (!_userGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION)) { printf("AS3OpenNI :: Pose required, but not supported\n"); return 1; } _userGenerator.GetPoseDetectionCap().RegisterToPoseCallbacks(UserPose_PoseDetected, NULL, NULL, hPoseCallbacks); _userGenerator.GetSkeletonCap().GetCalibrationPose(_strPose); } _userGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL); } // Create the broadcaster manager. _broadcaster = new XnVBroadcaster(); // Start generating all. _context.StartGeneratingAll(); // Set the frame rate. _status = xnFPSInit(&xnFPS, 180); CHECK_RC(_status, "AS3OpenNI :: FPS Init"); //----------------------------------------------------------------------// //------------------------- SETUP DISPLAY SUPPORT ---------------------// //--------------------------------------------------------------------// // Setup depth and image data. _depth.GetMetaData(_depthData); _image.GetMetaData(_imageData); // Hybrid mode isn't supported in this sample if (_imageData.FullXRes() != _depthData.FullXRes() || _imageData.FullYRes() != _depthData.FullYRes()) { printf ("AS3OpenNI :: The device depth and image resolution must be equal!\n"); return 1; } // RGB is the only image format supported. if (_imageData.PixelFormat() != XN_PIXEL_FORMAT_RGB24) { printf("AS3OpenNI :: The device image format must be RGB24\n"); return 1; } // Setup the view points to match between the depth and image maps. if(_snapPixels) _depth.GetAlternativeViewPointCap().SetViewPoint(_image); //-------------------------------------------------------------// //------------------------- MAIN LOOP ------------------------// //-----------------------------------------------------------// // Setup the capture socket server for PC. #if (XN_PLATFORM == XN_PLATFORM_WIN32) if(_featureDepthMapCapture || _featureRGBCapture || _featureUserTracking) { if(_useSockets) { g_AS3Network = network(); g_AS3Network.init(setupServer); } } #endif // Main loop while ((!_kbhit()) && (!_quit)) { xnFPSMarkFrame(&xnFPS); _context.WaitAndUpdateAll(); _sessionManager->Update(&_context); if(_featureDepthMapCapture) captureDepthMap(g_ucDepthBuffer); if(_featureRGBCapture) captureRGB(g_ucImageBuffer); #if (XN_PLATFORM == XN_PLATFORM_WIN32) if(_featureUserTracking) getPlayers(); #else if(_featureUserTracking) renderSkeleton(); #endif } CleanupExit(); }
//---------------------------------------------------- // 描画処理 //---------------------------------------------------- void glutDisplay (void){ xnFPSMarkFrame(&g_xnFPS); // FPSの計測開始? XnStatus rc = XN_STATUS_OK; // 更新されたノードを待つ(どれでもいい) rc = g_context.WaitAnyUpdateAll(); if (rc != XN_STATUS_OK){ printf("Read failed: %s\n", xnGetStatusString(rc)); printf("test\n"); return; } // イメージ・デプス・ユーザのデータを取得 g_image.GetMetaData(g_imageMD); g_depth.GetMetaData(g_depthMD); g_user.GetUserPixels(0, g_sceneMD); // カラー・デプスバッファをクリア glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // 設定 setDepthHistgram(g_depth, g_depthMD, g_pDepthHist); // ヒストグラムの計算・作成 setTexture(); // テクスチャ設定 // 描画 drawImage(); // イメージデータの描画 // デバッグモードの文字は描画の行列と隔離 glMatrixMode(GL_PROJECTION); // 射影変換の行列の設定 //glLoadIdentity(); // スタックのクリア glMatrixMode(GL_MODELVIEW); // モデルビュー変換の行列の設定 glLoadIdentity(); if(g_debugMode) glDebug(); // デバッグモード // 一度だけスクリーンショットをとる if(g_screenShotImageMode){ ostringstream fname; fname << OUT_IMAGE_PATH ;//出力ファイル名 std::string name = fname.str(); g_glScreenShot.screenshot(name.c_str(), 24); g_screenShotImageMode = !g_screenShotImageMode; // トグル } // 一度だけ深さデータを取得する if(g_screenShotDepthMode){ ofstream ofs(OUT_DEPTH_PATH); const XnDepthPixel* pDepth = g_depthMD.Data(); for (XnUInt y = 0; y < KINECT_IMAGE_HEIGHT; y ++){ for (XnUInt x = 0; x < KINECT_IMAGE_WIDTH; x ++, pDepth ++){ if(*pDepth < 2000){ ofs << (int)((*pDepth) * 2) << ','; }else{ ofs << (*pDepth) << ','; } } ofs << endl; } g_screenShotDepthMode = !g_screenShotDepthMode; // トグル } // Swap the OpenGL display buffers glutSwapBuffers(); }
//---------------------------------------------------- // OpenNI関連の初期化 //---------------------------------------------------- void xnInit(void){ XnStatus rc; EnumerationErrors errors; rc = g_context.InitFromXmlFile(SAMPLE_XML_PATH, &errors); if (rc == XN_STATUS_NO_NODE_PRESENT){ XnChar strError[1024]; errors.ToString(strError, 1024); printf("%s\n", strError); exit(1); }else if (rc != XN_STATUS_OK){ printf("Open failed: %s\n", xnGetStatusString(rc)); exit(1); } //playerInit(); rc = xnFPSInit(&g_xnFPS, 180); // FPSの初期化 //CHECK_RC(rc, "FPS Init"); // デプス・イメージ・ユーザジェネレータの作成 rc = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth); errorCheck(rc, "g_depth"); // エラーチェック rc = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image); errorCheck(rc, "g_image"); rc = g_context.FindExistingNode(XN_NODE_TYPE_USER, g_user); //rc = g_user.Create(g_context); errorCheck(rc, "g_user"); // ユーザー検出機能をサポートしているか確認 if (!g_user.IsCapabilitySupported(XN_CAPABILITY_SKELETON)) { //throw std::runtime_error("ユーザー検出をサポートしてません"); cout << "ユーザー検出をサポートしてません" << endl; exit(1); } // レコーダーの設定 //rc = setRecorder(g_recorder, rc); // ユーザコールバックの登録 XnCallbackHandle userCallbacks; g_user.RegisterUserCallbacks(UserDetected, UserLost, NULL, userCallbacks); // デプス・イメージ・ユーザデータの取得 g_depth.GetMetaData(g_depthMD); g_image.GetMetaData(g_imageMD); g_user.GetUserPixels(0, g_sceneMD); // Hybrid mode isn't supported in this sample // イメージとデプスの大きさが違うとエラー if (g_imageMD.FullXRes() != g_depthMD.FullXRes() || g_imageMD.FullYRes() != g_depthMD.FullYRes()){ printf ("The device depth and image resolution must be equal!\n"); exit(1); } // RGB is the only image format supported. // フォーマットの確認 if (g_imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24){ printf("The device image format must be RGB24\n"); exit(1); } // Texture map init // フルスクリーン画面の大きさ調整 g_nTexMapX = (((unsigned short)(g_depthMD.FullXRes() - 1) / 512) + 1) * 512; // 大きさによって512の倍数に調整(1024) g_nTexMapY = (((unsigned short)(g_depthMD.FullYRes() - 1) / 512) + 1) * 512; // 512 g_pTexMap = (XnRGB24Pixel*)malloc(g_nTexMapX * g_nTexMapY * sizeof(XnRGB24Pixel)); // スクリーンの大きさ分の色情報の容量を確保 // 座標ポインタの初期化 g_pPoint = (XnPoint3D*)malloc(KINECT_IMAGE_SIZE * sizeof(XnPoint3D)); // 座標を入れるポインタを作成 g_pBackTex = (XnRGB24Pixel*)malloc(KINECT_IMAGE_SIZE * sizeof(XnRGB24Pixel)); // 背景画像を入れるポインタを作成 g_pBackPoint = (XnPoint3D*)malloc(KINECT_IMAGE_SIZE * sizeof(XnPoint3D)); // 背景座標を入れるポインタを作成 g_pBackDepth = (XnDepthPixel*)malloc(KINECT_IMAGE_SIZE * sizeof(XnDepthPixel)); // 背景座標を入れるポインタを作成 }
void mixRGB_Depth() { bool bShouldRun = true; int c; XnStatus nRetVal = XN_STATUS_OK; Context context; // Initialize context object nRetVal = context.Init(); // Check error code if (nRetVal) printf("Error: %s", xnGetStatusString(nRetVal)); context.SetGlobalMirror(true); //Create Depth generator node DepthGenerator depth; nRetVal = depth.Create(context); // Check error code if (nRetVal) printf("Error: %s", xnGetStatusString(nRetVal)); // Create an ImageGenetor node ImageGenerator image; nRetVal = image.Create(context); if (nRetVal) printf("Error: %s", xnGetStatusString(nRetVal)); // Sync the DepthGenerator with the ImageGenerator nRetVal = depth.GetAlternativeViewPointCap().SetViewPoint(image); if (nRetVal) printf("Error: %s", xnGetStatusString(nRetVal)); //Set it to VGA maps at 30 fps XnMapOutputMode mapMode; mapMode.nXRes = XN_VGA_X_RES; mapMode.nYRes = XN_VGA_Y_RES; mapMode.nFPS = 30; nRetVal = depth.SetMapOutputMode(mapMode); // Make it start generating data nRetVal = context.StartGeneratingAll(); if (nRetVal) printf("Error: %s", xnGetStatusString(nRetVal)); // Create an OpenCv matrix CvMat* depthMetersMat = cvCreateMat(480, 640, CV_16UC1); IplImage *kinectDepthImage; kinectDepthImage = cvCreateImage(cvSize(640,480), 16, 1); IplImage *rgbimg = cvCreateImageHeader(cvSize(640,480), 8,3); // Main loop while (bShouldRun) { //wait for new data to be available nRetVal = context.WaitOneUpdateAll(depth); if (nRetVal) { printf("Error: %s", xnGetStatusString(nRetVal)); continue; } //Take current depth map const XnDepthPixel* pDepthMap = depth.GetDepthMap(); for (int y=0; y<XN_VGA_Y_RES; y++) { for (int x=0; x<XN_VGA_X_RES; x++) { depthMetersMat->data.s[y*XN_VGA_X_RES+x]=10*pDepthMap[y*XN_VGA_X_RES+x]; } } cvGetImage(depthMetersMat, kinectDepthImage); //take current image const XnRGB24Pixel* pImage = image.GetRGB24ImageMap(); //process image data XnRGB24Pixel* ucpImage = const_cast<XnRGB24Pixel*>(pImage); cvSetData(rgbimg, ucpImage, 640*3); cvShowImage("RGB", kinectDepthImage); c = cvWaitKey(1); if (c == 27) bShouldRun = false; } cvReleaseImageHeader(&kinectDepthImage); context.Shutdown(); }
namespace KinectSensor { //--------------------------------------------------------------------------- // Globals //--------------------------------------------------------------------------- // OpenNI #define SAMPLE_XML_PATH "Data\\SamplesConfig.xml" Context deviceContext; ImageGenerator colorImageGenerator; DepthGenerator depthImageGenerator; //--------------------------------------------------------------------------- // Private Method Declarations //--------------------------------------------------------------------------- void GetColorAndDepthImages(ColorImage& colorImage, DepthImage& depthImage); void GetValidPixelMap(const DepthImage& depthImage, BinaryImage& validityImage); void PreProcessDepthData(const DepthImage& rawDepth, DepthImage& processedDepth); //--------------------------------------------------------------------------- // Public Methods //--------------------------------------------------------------------------- int Init() { XnStatus rc; //Make sure our image types are the same as the OpenNI image types. assert(sizeof(XnRGB24Pixel) == sizeof(ColorPixel)); assert(sizeof(XnDepthPixel) == sizeof(DepthPixel)); assert(sizeof(XnStatus) == sizeof(int)); // Load OpenNI xml settings char filePath[255]; int length = Util::Helpers::GetExeDirectory(filePath, sizeof(filePath)); filePath[length] = '\\'; strcpy(&filePath[length+1], SAMPLE_XML_PATH); EnumerationErrors errors; rc = deviceContext.InitFromXmlFile(filePath, &errors); if (rc == XN_STATUS_NO_NODE_PRESENT) { //One reason would be if Microsoft SDK is installed beside PrimeSense. Device manager should say PrimeSense instead of Microsoft Kinect. //XnChar strError[1024]; //errors.ToString(strError, 1024); //LOGE("%s\n", strError); return -1; } else if (rc != XN_STATUS_OK) { fprintf(stderr, "%s\n", xnGetStatusString(rc)); /*LOGE("Open failed: %s\n", xnGetStatusString(rc));*/ return (rc); } // Retrieve colour and depth nodes rc = deviceContext.FindExistingNode(XN_NODE_TYPE_IMAGE, colorImageGenerator); rc = deviceContext.FindExistingNode(XN_NODE_TYPE_DEPTH, depthImageGenerator); // Set mirror mode to off SetMirrorMode(false); // Get a frame to perform checks on it ImageMetaData colorImageMetaData; DepthMetaData depthImageMetaData; depthImageGenerator.GetMetaData(depthImageMetaData); colorImageGenerator.GetMetaData(colorImageMetaData); // Hybrid mode isn't supported in this sample if (colorImageMetaData.FullXRes() != depthImageMetaData.FullXRes() || colorImageMetaData.FullYRes() != depthImageMetaData.FullYRes()) { /*LOGE("The device depth and image resolution must be equal!\n");*/ return 1; } // RGB is the only image format supported. if (colorImageMetaData.PixelFormat() != XN_PIXEL_FORMAT_RGB24) { /*LOGE("The device image format must be RGB24\n");*/ return 1; } // Need to make sure the automatic alignment of colour and depth images is supported. XnBool isSupported = depthImageGenerator.IsCapabilitySupported("AlternativeViewPoint"); if(!isSupported) { /*LOGE("Cannot set AlternativeViewPoint!\n");*/ return 1; } // Set it to VGA maps at 30 FPS /*XnMapOutputMode mapMode; mapMode.nXRes = XN_VGA_X_RES; mapMode.nYRes = XN_VGA_Y_RES; mapMode.nFPS = 60; rc = g_depth.SetMapOutputMode(mapMode); if(rc) { LOGE("Failed to set depth map mode: %s\n", xnGetStatusString(rc)); return 1; } mapMode.nFPS = 30; rc = g_image.SetMapOutputMode(mapMode); if(rc) { LOGE("Failed to set image map mode: %s\n", xnGetStatusString(rc)); return 1; }*/ // Set automatic alignment of the colour and depth images. rc = depthImageGenerator.GetAlternativeViewPointCap().SetViewPoint(colorImageGenerator); if(rc) { /*LOGE("Failed to set depth map mode: %s\n", xnGetStatusString(rc));*/ return 1; } return XN_STATUS_OK; } void DeInit() { //deviceContext.Shutdown(); return; } //NOTE: Based on tests, data range seems to be valid between 511 (0x1FF) and 8191 (0x1FFF). void GetData(ColorImage& colorImage, DepthImage& depthImage, BinaryImage& validityImage) { static DepthImage rawDepthImage; //Get the colour and depth data //!!GetColorAndDepthImages(colorImage, rawDepthImage); GetColorAndDepthImages(colorImage, depthImage); // Get an image which specifies the valid depth pixels //!!GetValidPixelMap(rawDepthImage, validityImage); GetValidPixelMap(depthImage, validityImage); // Process the depth data //!!PreProcessDepthData(rawDepthImage, depthImage); PreProcessDepthData(depthImage, depthImage); return; } bool GetMirrorMode() { return (deviceContext.GetGlobalMirror() == 0) ? false : true; } void SetMirrorMode(bool mirrorMode) { XnStatus rc = deviceContext.SetGlobalMirror(mirrorMode); if(rc != XN_STATUS_OK) throw rc; return; } //--------------------------------------------------------------------------- // Private Methods //--------------------------------------------------------------------------- // Gets the colour and depth data from the Kinect sensor. void GetColorAndDepthImages(ColorImage& colorImage, DepthImage& depthImage) { XnStatus rc = XN_STATUS_OK; // Read a new frame, blocking operation rc = deviceContext.WaitAnyUpdateAll(); if (rc != XN_STATUS_OK) { /*LOGE("Read failed: %s\n", xnGetStatusString(rc));*/ throw rc; } // Get handles to new data static ImageMetaData colorImageMetaData; static DepthMetaData depthImageMetaData; colorImageGenerator.GetMetaData(colorImageMetaData); depthImageGenerator.GetMetaData(depthImageMetaData); // Validate images if (!depthImageGenerator.IsValid() || !colorImageGenerator.IsValid()) { /*LOGE("Error: Color or depth image is invalid.");*/ throw 1; } // Fetch pointers to data const XnRGB24Pixel* pColorImage = colorImageMetaData.RGB24Data(); //g_depth.GetRGB24ImageMap() const XnDepthPixel* pDepthImage = depthImageMetaData.Data();// g_depth.GetDepthMap(); // Copy data over to arrays memcpy(colorImage.data, pColorImage, sizeof(colorImage.data)); memcpy(depthImage.data, pDepthImage, sizeof(depthImage.data)); colorImage.rows = colorImage.maxRows; colorImage.cols = colorImage.maxCols; depthImage.rows = depthImage.maxRows; depthImage.cols = depthImage.maxCols; return; } // Gets a binary image of valid pixels in depth map. True corresponds to valid pixels. void GetValidPixelMap(const DepthImage& depthImage, BinaryImage& validityImage) { const DepthImage::ArrayType& depthData = depthImage.data; BinaryImage::ArrayType& validityData = validityImage.data; // Loop through all pixels and mark whether they are valid or not for(size_t y=0; y<depthImage.rows; y++) { for(size_t x=0; x<depthImage.cols; x++) { validityData[y][x] = depthData[y][x] != XN_DEPTH_NO_SAMPLE_VALUE; } } validityImage.rows = depthImage.rows; validityImage.cols = depthImage.cols; return; } // Cleans up the depth data void PreProcessDepthData(const DepthImage& rawDepth, DepthImage& processedDepth) { // Constants const int medianBoxSize = 5; // Create filtering matrices // quick opencv structs (does not copy! so ultra fast) cv::Mat input(rawDepth.maxRows, rawDepth.maxCols, CV_16U, (void*)rawDepth.data); cv::Mat output(processedDepth.maxRows, processedDepth.maxCols, CV_16U, processedDepth.data); // Fill all the invalid pixels { // simple blur //cv::blur(src, dest, cv::Size(3,3)); // box filter //cv::boxFilter(src, dest, -1, cv::Size(5, 5), cv::Point(-1, -1), true); cv::medianBlur(input, output, medianBoxSize); // median blur //cv::Mat foo(dest); //cv::medianBlur(dest, foo, 5); //cv::GaussianBlur(src, dest, cv::Size(3,3), 3); } return; } }
int main(int argc, char* argv[]) { int nRetVal; XnStatus rc; EnumerationErrors errors; // get playback file if using if (argc > 2 && strcmp(argv[2], "true") == 0) { rc = g_context.Init(); rc = g_context.OpenFileRecording(RECORDING_PATH, g_player); CHECK_RC(rc, "Opening file"); rc = g_player.SetRepeat(TRUE); CHECK_RC(rc, "Turn repeat off"); } else { // get context from xml rc = g_context.InitFromXmlFile(SAMPLE_XML_PATH, g_scriptNode, &errors); } // error checking if (rc == XN_STATUS_NO_NODE_PRESENT) { XnChar strError[1024]; errors.ToString(strError, 1024); printf("%s\n", strError); return (rc); } CHECK_RC(rc, "Context initialization"); // get hand and image generator from context, check errors rc = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image); CHECK_RC(rc, "Get image generator"); rc = g_context.FindExistingNode(XN_NODE_TYPE_HANDS, g_hands); CHECK_RC(rc, "Get hand generator"); rc = g_context.FindExistingNode(XN_NODE_TYPE_GESTURE, g_gesture); CHECK_RC(rc, "Get gesture generator"); // create and register callbacks XnCallbackHandle h1, h2; g_gesture.RegisterGestureCallbacks(Gesture_Recognized, Gesture_Process, NULL, h1); CHECK_RC(rc, "Get register gesture callback"); g_hands.RegisterHandCallbacks(Hand_Create, Hand_Update, Hand_Destroy, NULL, h2); CHECK_RC(rc, "Get hand callback"); // add gestures to the generator rc = g_gesture.AddGesture("Click", NULL); CHECK_RC(rc, " add click gesture"); rc = g_gesture.AddGesture("RaiseHand", NULL); CHECK_RC(rc, "add raise gesture"); rc = g_gesture.AddGesture("Wave", NULL); CHECK_RC(rc, "add wave gesture"); g_image.GetMetaData(g_imageMD); // RGB is the only image format supported. if (g_imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24) { printf("The device image format must be RGB24\n"); return 1; } // if argument is set true, then record the session if (argc > 1 && strcmp(argv[1], "true") == 0) { std::cout << "recording to " << RECORDING_PATH << std::endl; // Create Recorder rc = recorder.Create(g_context); CHECK_RC(rc, "create recorder"); // Init it rc = recorder.SetDestination(XN_RECORD_MEDIUM_FILE, RECORDING_PATH); CHECK_RC(rc, "init recorder"); // Add nodes to recording rc = recorder.AddNodeToRecording(g_image); CHECK_RC(rc, "add image node"); rc = recorder.AddNodeToRecording(g_hands); CHECK_RC(rc, "add hands node"); } // initialize and run program glutInit(&argc, argv); // GLUT initialization glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH ); // Display Mode glutInitWindowSize(WIDTH, HEIGHT); // set window size glutInitWindowPosition(GL_WIN_POSITION_X, GL_WIN_POSITION_Y); glutCreateWindow(TITLE); // create Window glutDisplayFunc(glutDisplay); // register Display Function glutIdleFunc(glutDisplay); // register Idle Function glutKeyboardFunc(glutKeyboard ); // register Keyboard Handler initialize(); glutMainLoop(); CleanUpExit(); return 0; }
int Init() { XnStatus rc; //Make sure our image types are the same as the OpenNI image types. assert(sizeof(XnRGB24Pixel) == sizeof(ColorPixel)); assert(sizeof(XnDepthPixel) == sizeof(DepthPixel)); assert(sizeof(XnStatus) == sizeof(int)); // Load OpenNI xml settings char filePath[255]; int length = Util::Helpers::GetExeDirectory(filePath, sizeof(filePath)); filePath[length] = '\\'; strcpy(&filePath[length+1], SAMPLE_XML_PATH); EnumerationErrors errors; rc = deviceContext.InitFromXmlFile(filePath, &errors); if (rc == XN_STATUS_NO_NODE_PRESENT) { //One reason would be if Microsoft SDK is installed beside PrimeSense. Device manager should say PrimeSense instead of Microsoft Kinect. //XnChar strError[1024]; //errors.ToString(strError, 1024); //LOGE("%s\n", strError); return -1; } else if (rc != XN_STATUS_OK) { fprintf(stderr, "%s\n", xnGetStatusString(rc)); /*LOGE("Open failed: %s\n", xnGetStatusString(rc));*/ return (rc); } // Retrieve colour and depth nodes rc = deviceContext.FindExistingNode(XN_NODE_TYPE_IMAGE, colorImageGenerator); rc = deviceContext.FindExistingNode(XN_NODE_TYPE_DEPTH, depthImageGenerator); // Set mirror mode to off SetMirrorMode(false); // Get a frame to perform checks on it ImageMetaData colorImageMetaData; DepthMetaData depthImageMetaData; depthImageGenerator.GetMetaData(depthImageMetaData); colorImageGenerator.GetMetaData(colorImageMetaData); // Hybrid mode isn't supported in this sample if (colorImageMetaData.FullXRes() != depthImageMetaData.FullXRes() || colorImageMetaData.FullYRes() != depthImageMetaData.FullYRes()) { /*LOGE("The device depth and image resolution must be equal!\n");*/ return 1; } // RGB is the only image format supported. if (colorImageMetaData.PixelFormat() != XN_PIXEL_FORMAT_RGB24) { /*LOGE("The device image format must be RGB24\n");*/ return 1; } // Need to make sure the automatic alignment of colour and depth images is supported. XnBool isSupported = depthImageGenerator.IsCapabilitySupported("AlternativeViewPoint"); if(!isSupported) { /*LOGE("Cannot set AlternativeViewPoint!\n");*/ return 1; } // Set it to VGA maps at 30 FPS /*XnMapOutputMode mapMode; mapMode.nXRes = XN_VGA_X_RES; mapMode.nYRes = XN_VGA_Y_RES; mapMode.nFPS = 60; rc = g_depth.SetMapOutputMode(mapMode); if(rc) { LOGE("Failed to set depth map mode: %s\n", xnGetStatusString(rc)); return 1; } mapMode.nFPS = 30; rc = g_image.SetMapOutputMode(mapMode); if(rc) { LOGE("Failed to set image map mode: %s\n", xnGetStatusString(rc)); return 1; }*/ // Set automatic alignment of the colour and depth images. rc = depthImageGenerator.GetAlternativeViewPointCap().SetViewPoint(colorImageGenerator); if(rc) { /*LOGE("Failed to set depth map mode: %s\n", xnGetStatusString(rc));*/ return 1; } return XN_STATUS_OK; }
/* * Class: org_OpenNI_Samples_Assistant_NativeMethods * Method: initFromContext * Signature: (JZZ)I */ JNIEXPORT jint JNICALL Java_org_OpenNI_Samples_Assistant_NativeMethods_initFromContext (JNIEnv *env, jclass cls, jlong pContext, jboolean _hasUserGen, jboolean _hasDepthGen, jboolean _hasImageGen, jboolean _hasIrGen) { LOGD("init_start"); hasUserGen = _hasUserGen; hasDepthGen = _hasDepthGen; hasImageGen = _hasImageGen; hasIrGen = _hasIrGen; mContext = new Context((XnContext*) pContext); if (!(hasUserGen || hasDepthGen || hasImageGen || hasIrGen)) { LOGD(" All booleans are false"); return XN_STATUS_BAD_PARAM; } int rc; if (hasUserGen) { rc = mContext->FindExistingNode(XN_NODE_TYPE_USER, mUserGen); if (rc != XN_STATUS_OK) { //TODO log&retval LOGD("No user node exists!"); return 1; } mUserGen.GetUserPixels(0, sceneMD); } if (hasDepthGen) { rc = mContext->FindExistingNode(XN_NODE_TYPE_DEPTH, mDepthGen); if (rc != XN_STATUS_OK) { //TODO log&retval LOGD("No depth node exists! Check your XML."); return 1; } mDepthGen.GetMetaData(depthMD); } if(hasImageGen) { rc = mContext->FindExistingNode(XN_NODE_TYPE_IMAGE,mImageGen); if(rc != XN_STATUS_OK) { LOGD("No image node exists! Check your XML."); return 1; } mImageGen.GetMetaData(imageMD); } if(hasIrGen) { rc = mContext->FindExistingNode(XN_NODE_TYPE_IR,mIrGen); if(rc != XN_STATUS_OK) { LOGD("No IR node exists! Check your XML"); return 1; } LOGD("Ir Node created"); mIrGen.GetMetaData(irMD); } initGraphics(); LOGD("init_end"); return XN_STATUS_OK; }