ImageProvider::ImageProvider(Context* pContext) : AbstractImageStreamProvider(pContext) { CALL_XN( pContext->FindExistingNode(XN_NODE_TYPE_IMAGE, m_imageGen) ); ImageMetaData md; m_imageGen.GetMetaData(md); CHECK_ERROR(md.PixelFormat() == XN_PIXEL_FORMAT_RGB24, "This camera's data format is not supported."); CHECK_ERROR(md.XRes() == 640 && md.YRes() == 480, "This camera's resolution is not supported."); }
// ----------------------------------------------------------------------------------------------------- // convertImageRGB // ----------------------------------------------------------------------------------------------------- void convertImageRGB(const XnRGB24Pixel* pImageMap, IplImage* pImgRGB) { // Convert from OpenNI buffer to IplImage 24 bit, 3 channels for(unsigned int i=0; i<g_imageMD.XRes()*g_imageMD.YRes(); i++) { pImgRGB->imageData[3*i+0]=pImageMap[i].nBlue; pImgRGB->imageData[3*i+1]=pImageMap[i].nGreen; pImgRGB->imageData[3*i+2]=pImageMap[i].nRed; } }
XnStatus prepare(char useScene, char useDepth, char useImage, char useIr, char useHistogram) { //TODO handle possible failures! Gotcha! if (useDepth) { mDepthGen.GetMetaData(depthMD); nXRes = depthMD.XRes(); nYRes = depthMD.YRes(); pDepth = depthMD.Data(); if (useHistogram) { calcHist(); // rewind the pointer pDepth = depthMD.Data(); } } if (useScene) { mUserGen.GetUserPixels(0, sceneMD); nXRes = sceneMD.XRes(); nYRes = sceneMD.YRes(); pLabels = sceneMD.Data(); } if (useImage) { mImageGen.GetMetaData(imageMD); nXRes = imageMD.XRes(); nYRes = imageMD.YRes(); pRGB = imageMD.RGB24Data(); // HISTOGRAM????? } if (useIr) { mIrGen.GetMetaData(irMD); nXRes = irMD.XRes(); nYRes = irMD.YRes(); pIR = irMD.Data(); // HISTOGRAM???? } }
void takePhoto() { static int index = 1; char fname[256] = {0,}; sprintf(fname, "kinect%03d.txt", index++); g_depth.GetMetaData(g_depthMD); g_image.GetMetaData(g_imageMD); int const nx = g_depthMD.XRes(); int const ny = g_depthMD.YRes(); assert(nx == g_imageMD.XRes()); assert(ny == g_imageMD.YRes()); const XnDepthPixel* pDepth = g_depthMD.Data(); const XnUInt8* pImage = g_imageMD.Data(); FILE * file = fopen(fname, "wb"); fprintf(file, "%d\n%d\n\n", nx, ny); for (int y = 0, di = 0, ri = 0, gi = 1, bi = 2; y < ny; y++) { for (int x = 0; x < nx; x++, di++, ri += 3, gi += 3, bi += 3) { int const r = pImage[ri]; int const g = pImage[gi]; int const b = pImage[bi]; int const d = pDepth[di]; assert(r >= 0); assert(g >= 0); assert(b >= 0); assert(d >= 0); assert(r <= 0xFF); assert(g <= 0xFF); assert(b <= 0xFF); assert(d <= 0xFFFF); fprintf(file, "%3d %3d %3d %5d\n", r, g, b, d); } fprintf(file, "\n"); } fflush(file); fclose(file); }
void transformImageMD(Mat FrameImage,ImageMetaData& imageMD) { RGB24Map& imageMap = imageMD.WritableRGB24Map(); for (XnUInt32 y = 0; y < imageMD.YRes(); y++) { for (XnUInt32 x = 0; x <imageMD.XRes(); x++) { cout<<" x "<<x<<" y "<<y<<endl; XnRGB24Pixel imagePixel; imagePixel.nBlue=FrameImage.at<Vec3b>(y,x)[0]; imagePixel.nGreen=FrameImage.at<Vec3b>(y,x)[1]; imagePixel.nRed=FrameImage.at<Vec3b>(y,x)[2]; imageMap(x,y) = imagePixel; cout<<" 76 "<<endl; } } }
void captureRGB(unsigned char* g_ucImageBuffer) { ImageMetaData imd; _image.GetMetaData(imd); unsigned int nValue = 0; unsigned int nX = 0; unsigned int nY = 0; XnUInt16 g_nXRes = imd.XRes(); XnUInt16 g_nYRes = imd.YRes(); const XnRGB24Pixel * pImageMap = _image.GetRGB24ImageMap(); for (nY=0; nY<g_nYRes; nY++) { for (nX=0; nX < g_nXRes; nX++) { ((unsigned char*)g_ucImageBuffer)[(nY*g_nXRes+nX)*4+0] = pImageMap[nY*g_nXRes+nX].nBlue; ((unsigned char*)g_ucImageBuffer)[(nY*g_nXRes+nX)*4+1] = pImageMap[nY*g_nXRes+nX].nGreen; ((unsigned char*)g_ucImageBuffer)[(nY*g_nXRes+nX)*4+2] = pImageMap[nY*g_nXRes+nX].nRed; ((unsigned char*)g_ucImageBuffer)[(nY*g_nXRes+nX)*4+3] = 0x00; } } }
void glutDisplay (void) { XnStatus rc = XN_STATUS_OK; // Read a new frame rc = g_context.WaitAnyUpdateAll(); if (rc != XN_STATUS_OK) { printf("Read failed: %s\n", xnGetStatusString(rc)); return; } g_depth.GetMetaData(g_depthMD); g_image.GetMetaData(g_imageMD); const XnDepthPixel* pDepth = g_depthMD.Data(); const XnUInt8* pImage = g_imageMD.Data(); unsigned int nImageScale = GL_WIN_SIZE_X / g_depthMD.FullXRes(); // Copied from SimpleViewer // Clear the OpenGL buffers glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Setup the OpenGL viewpoint glMatrixMode(GL_PROJECTION); glPushMatrix(); glLoadIdentity(); glOrtho(0, GL_WIN_SIZE_X, GL_WIN_SIZE_Y, 0, -1.0, 1.0); // Calculate the accumulative histogram (the yellow display...) xnOSMemSet(g_pDepthHist, 0, MAX_DEPTH*sizeof(float)); unsigned int nNumberOfPoints = 0; for (XnUInt y = 0; y < g_depthMD.YRes(); ++y) { for (XnUInt x = 0; x < g_depthMD.XRes(); ++x, ++pDepth) { if (*pDepth != 0) { g_pDepthHist[*pDepth]++; nNumberOfPoints++; } } } for (int nIndex=1; nIndex<MAX_DEPTH; nIndex++) { g_pDepthHist[nIndex] += g_pDepthHist[nIndex-1]; } if (nNumberOfPoints) { for (int nIndex=1; nIndex<MAX_DEPTH; nIndex++) { g_pDepthHist[nIndex] = (unsigned int)(256 * (1.0f - (g_pDepthHist[nIndex] / nNumberOfPoints))); } } xnOSMemSet(g_pTexMap, 0, g_nTexMapX*g_nTexMapY*sizeof(XnRGB24Pixel)); // check if we need to draw image frame to texture if (g_nViewState == DISPLAY_MODE_OVERLAY || g_nViewState == DISPLAY_MODE_IMAGE) { const XnRGB24Pixel* pImageRow = g_imageMD.RGB24Data(); XnRGB24Pixel* pTexRow = g_pTexMap + g_imageMD.YOffset() * g_nTexMapX; for (XnUInt y = 0; y < g_imageMD.YRes(); ++y) { const XnRGB24Pixel* pImage = pImageRow; XnRGB24Pixel* pTex = pTexRow + g_imageMD.XOffset(); for (XnUInt x = 0; x < g_imageMD.XRes(); ++x, ++pImage, ++pTex) { *pTex = *pImage; } pImageRow += g_imageMD.XRes(); pTexRow += g_nTexMapX; } } // check if we need to draw depth frame to texture if (g_nViewState == DISPLAY_MODE_OVERLAY || g_nViewState == DISPLAY_MODE_DEPTH) { const XnDepthPixel* pDepthRow = g_depthMD.Data(); XnRGB24Pixel* pTexRow = g_pTexMap + g_depthMD.YOffset() * g_nTexMapX; for (XnUInt y = 0; y < g_depthMD.YRes(); ++y) { const XnDepthPixel* pDepth = pDepthRow; XnRGB24Pixel* pTex = pTexRow + g_depthMD.XOffset(); for (XnUInt x = 0; x < g_depthMD.XRes(); ++x, ++pDepth, ++pTex) { if (*pDepth != 0) { int nHistValue = g_pDepthHist[*pDepth]; pTex->nRed = nHistValue; pTex->nGreen = nHistValue; pTex->nBlue = 0; } } pDepthRow += g_depthMD.XRes(); pTexRow += g_nTexMapX; } } // Create the OpenGL texture map glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP_SGIS, GL_TRUE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, g_nTexMapX, g_nTexMapY, 0, GL_RGB, GL_UNSIGNED_BYTE, g_pTexMap); // Display the OpenGL texture map glColor4f(1,1,1,1); glBegin(GL_QUADS); int nXRes = g_depthMD.FullXRes(); int nYRes = g_depthMD.FullYRes(); // upper left glTexCoord2f(0, 0); glVertex2f(0, 0); // upper right glTexCoord2f((float)nXRes/(float)g_nTexMapX, 0); glVertex2f(GL_WIN_SIZE_X, 0); // bottom right glTexCoord2f((float)nXRes/(float)g_nTexMapX, (float)nYRes/(float)g_nTexMapY); glVertex2f(GL_WIN_SIZE_X, GL_WIN_SIZE_Y); // bottom left glTexCoord2f(0, (float)nYRes/(float)g_nTexMapY); glVertex2f(0, GL_WIN_SIZE_Y); glEnd(); // Swap the OpenGL display buffers glutSwapBuffers(); }
int main(int argc, char* argv[]) { markerSize.width = -1; markerSize.height = -1; EnumerationErrors errors; switch (XnStatus rc = niContext.InitFromXmlFile(KINECT_CONFIG_FILENAME, &errors)) { case XN_STATUS_OK: break; case XN_STATUS_NO_NODE_PRESENT: XnChar strError[1024]; errors.ToString(strError, 1024); printf("%s\n", strError); return rc; break; default: printf("Open failed: %s\n", xnGetStatusString(rc)); return rc; } capture = new Camera(CAPTURE_SIZE, CAMERA_PARAMS_FILENAME); RegistrationParams = scaleParams(capture->getParameters(), double(REGISTRATION_SIZE.width)/double(CAPTURE_SIZE.width)); osg_init(calcProjection(RegistrationParams, capture->getDistortion(), REGISTRATION_SIZE)); loadKinectParams(KINECT_PARAMS_FILENAME, &kinectParams, &kinectDistort); kinectDistort =0; kinectParams->data.db[2]=320.0; kinectParams->data.db[5]=240.0; niContext.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth); niContext.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image); g_depth.GetMirrorCap().SetMirror(false); g_depth.GetAlternativeViewPointCap().SetViewPoint(g_image); kinectReg = new RegistrationOPIRA(new OCVSurf()); kinectReg->addResizedMarker(MARKER_FILENAME, 400); //physics m_world = new KCRPhysicsWorld(); ground_grid = new float[19200]; for (int i =0;i < 19200; i++) { ground_grid[i] = 0; } #ifdef SIM_PARTICLES voxel_grid = new float[1200]; for (int i =0;i < 1200; i++) { voxel_grid[i] = 0; } #endif //controls KeyboardController *kc = new KeyboardController(m_world); XboxController *xc = new XboxController(m_world); loadKinectTransform(KINECT_TRANSFORM_FILENAME); #ifdef USE_ARMM_VRPN m_Connection = new vrpn_Connection_IP(); ARMM_server = new ARMM_Communicator(m_Connection ); cout << "Created VRPN server." << endl; #endif #ifdef USE_SKIN_SEGMENTATION //Skin color look up _HandRegion.LoadSkinColorProbTable(); #endif #ifdef USE_OPTICAL_FLOW prev_colourIm = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 3); #endif /////////////////////////////////////////////Main Loop//////////////////////////////////////////////// while (running) { if (XnStatus rc = niContext.WaitAnyUpdateAll() != XN_STATUS_OK) { printf("Read failed: %s\n", xnGetStatusString(rc)); return rc; } g_depth.GetMetaData(niDepthMD); g_image.GetMetaData(niImageMD); colourIm = cvCreateImage(cvSize(niImageMD.XRes(), niImageMD.YRes()), IPL_DEPTH_8U, 3); memcpy(colourIm->imageData, niImageMD.Data(), colourIm->imageSize); cvCvtColor(colourIm, colourIm, CV_RGB2BGR); cvFlip(colourIm, colourIm, 1); depthIm = cvCreateImage(cvSize(niDepthMD.XRes(), niDepthMD.YRes()), IPL_DEPTH_16U, 1); transDepth160 = cvCreateImage(cvSize(MESH_SIZE.width, MESH_SIZE.height), IPL_DEPTH_32F, 1); transDepth320 = cvCreateImage(cvSize(CV_OP_SIZE.width, CV_OP_SIZE.height), IPL_DEPTH_32F, 1); memcpy(depthIm->imageData, niDepthMD.Data(), depthIm->imageSize); cvShowImage("Kinect View", colourIm); IplImage *arImage = capture->getFrame(); cvWaitKey(1); kc->check_input(); xc->check_input(); #ifdef USE_OPTICAL_FLOW if(RunOnce) SceneOpticalFlowLK(prev_colourIm, colourIm); #endif if(kinectTransform) { // kinect transform as cvmat* for use if( counter >= 4) { inpaintDepth(&niDepthMD, true); memcpy(depthIm->imageData, niDepthMD.Data(), depthIm->imageSize); TransformDepth(depthIm, transDepth160, MARKER_DEPTH, MESH_SIZE); GenerateTrimeshGroundFromDepth(transDepth160, MARKER_DEPTH); /*Trimesh generation*/ m_world->updateTrimeshRefitTree(ground_grid);//opencl? osg_UpdateHeightfieldTrimesh(ground_grid);//opencl? #ifdef SIM_PARTICLES /*World spheres simulation*/ // GenerateVoxelFromDepth(depthIm, MARKER_DEPTH); // m_world->updateWorldSphereTransform(voxel_grid); // osgUpdateWorldSphereTransform(voxel_grid); #endif counter = 0; } else { #ifdef USE_SKIN_SEGMENTATION /*Skin color segmentation*/ // may be reduce resolution first as well as cut off depth make processing faster TransformDepth(depthIm, transDepth320, MARKER_DEPTH, CV_OP_SIZE); IplImage* depthTmp = cvCreateImage(cvSize(CV_OP_SIZE.width, CV_OP_SIZE.height), IPL_DEPTH_8U, 1); IplImage* colourImResized = cvCreateImage(cvSize(CV_OP_SIZE.width, CV_OP_SIZE.height), IPL_DEPTH_8U, 3); gray = cvCreateImage(cvSize(colourImResized->width, colourImResized->height),IPL_DEPTH_8U,1); hand_region = cvCreateImage(cvSize(colourImResized->width, colourImResized->height),IPL_DEPTH_8U,1); IplImage* colourIm640 = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 3); // cvCmpS(transDepth, 0, depthTmp, CV_CMP_LT);//dst must b 8U cvThreshold(transDepth320, depthTmp, 1, 255, CV_THRESH_BINARY_INV); //thres at 1cm above marker cvResize(colourIm, colourImResized, CV_INTER_NN);//use nearest neighbor interpolation // removeNoise( depthTmp, 100 ); // cvSet(colourImResized, cvScalar(0), depthTmp); cvShowImage ("Marker Thresh", colourImResized); cvResize(colourImResized, colourIm640,CV_INTER_NN); cvShowImage ("Marker Thresh 640", colourIm640); cvCopyImage( _HandRegion.GetHandRegion( colourImResized, gray), hand_region ); // removeNoise( hand_region, 20 ); cvThreshold(hand_region, depthTmp, 0, 255, CV_THRESH_BINARY_INV); // removeNoise( depthTmp, 100 ); // cvShowImage ("depthTmp", depthTmp); // cvShowImage ("hand_region", hand_region); cvSet(colourImResized, cvScalar(0), depthTmp); // cvShowImage ("Skin Color", colourImResized); // cvDilate(colourImResized,colourImResized,CV_SHAPE_RECT,1); // cvErode(colourImResized,colourImResized,CV_SHAPE_RECT,1); // cvMorphologyEx(colourImResized,colourImResized,NULL,CV_SHAPE_RECT,CV_MOP_OPEN,1); cvResize(colourImResized, colourIm640,CV_INTER_NN); cvShowImage ("Color Skin Color 640", colourIm640); cvReleaseImage(&depthTmp); cvReleaseImage(&colourImResized); cvReleaseImage(&colourIm640); #endif #ifdef USE_PARTICLES /* IplImage* depthTmp1 = cvCreateImage(cvSize(depthIm->width, depthIm->height), IPL_DEPTH_32F, 1); IplImage* depthTmp2 = cvCreateImage(cvSize(depthIm->width, depthIm->height), IPL_DEPTH_8U, 1); cvConvertScale(depthIm, depthTmp1, 1); // cvThreshold(depthTmp1, depthTmp2, MARKER_DEPTH-5, 255, CV_THRESH_TOZERO_INV);//thresh 5mm above marker cvThreshold(depthTmp1, depthTmp2, MARKER_DEPTH-10, 255, CV_THRESH_TOZERO); //cvCmpS(depthTmp1, MARKER_DEPTH, depthTmp2, CV_CMP_GT); // cvShowImage("DEPTH640", depthTmp2); // IplImage* colourTmp = cvCreateImage(cvSize(colourIm->width, colourIm->height), IPL_DEPTH_8U, 3); // cvCopyImage(colourIm, colourTmp); // cvSet(colourTmp, cvScalar(0), depthTmp2); // cvShowImage ("Color640", colourTmp); cvSet(depthTmp1, cvScalar(0), depthTmp2); cvShowImage("DEPTH640_32F", depthTmp1); */ inpaintDepth(&niDepthMD, true); // TransformDepth(depthTmp1, transDepth, MARKER_DEPTH); memcpy(depthIm->imageData, niDepthMD.Data(), depthIm->imageSize); TransformDepth(depthIm, transDepth, MARKER_DEPTH); IplImage* depthTmp3 = cvCreateImage(cvSize(TRACKING_SIZE.width, TRACKING_SIZE.height), IPL_DEPTH_8U, 1); cvThreshold(transDepth, depthTmp3, 0.5, 255, CV_THRESH_BINARY_INV); // cvThreshold(transDepth, depthTmp3, 0, 255, CV_THRESH_TOZERO); // cvConvertScale(transDepth, depthTmp3, 1); cvShowImage("DEPTH160", transDepth); IplImage* colourImResized = cvCreateImage(cvSize(TRACKING_SIZE.width, TRACKING_SIZE.height), IPL_DEPTH_8U, 3); cvResize(colourIm, colourImResized, CV_INTER_NN);//use nearest neighbor interpolation cvSet(colourImResized, cvScalar(0), depthTmp3); cvShowImage ("Color160", colourImResized); // cvReleaseImage(&depthTmp1); // cvReleaseImage(&depthTmp2); // cvReleaseImage(&colourTmp); cvReleaseImage(&depthTmp3); cvReleaseImage(&colourImResized); /* IplImage* depthTmp1 = cvCreateImage(cvSize(depthIm->width, depthIm->height), IPL_DEPTH_32F, 1); IplImage* depthTmp2 = cvCreateImage(cvSize(depthIm->width, depthIm->height), IPL_DEPTH_8U, 1); cvConvertScale(depthIm, depthTmp1, 1); // cvThreshold(depthTmp1, depthTmp2, MARKER_DEPTH-5, 255, CV_THRESH_TOZERO_INV);//thresh 5mm above marker cvThreshold(depthTmp1, depthTmp2, MARKER_DEPTH-5, 255, CV_THRESH_TOZERO); //cvCmpS(depthTmp1, MARKER_DEPTH, depthTmp2, CV_CMP_GT); cvShowImage("TMP_DEPTH", depthTmp2); IplImage* colourTmp = cvCreateImage(cvSize(colourIm->width, colourIm->height), IPL_DEPTH_8U, 3); cvCopyImage(colourIm, colourTmp); cvSet(colourTmp, cvScalar(0), depthTmp2); cvShowImage ("Basic Thresh", colourTmp); cvReleaseImage(&depthTmp1); cvReleaseImage(&depthTmp2); */ #endif counter++; // } else { // counter++; } //do hand pose recognition m_world->Update(); RenderScene(arImage, capture); } #ifdef USE_ARMM_VRPN ARMM_server->mainloop(); m_Connection->mainloop(); #endif #ifdef USE_OPTICAL_FLOW if(!RunOnce) RunOnce = true; cvCopyImage(colourIm, prev_colourIm); memcpy(prev_colourIm->imageData, niImageMD.Data(), prev_colourIm->imageSize); cvCvtColor(prev_colourIm, prev_colourIm, CV_RGB2BGR); #endif cvReleaseImage(&arImage); cvReleaseImage(&depthIm); cvReleaseImage(&colourIm); cvReleaseImage(&transDepth320);cvReleaseImage(&transDepth160); #ifdef USE_SKIN_SEGMENTATION cvReleaseImage(&gray); cvReleaseImage(&hand_region); #endif } cvReleaseImage(&prev_colourIm); osg_uninit(); delete m_world; delete kinectReg; cvReleaseMat(&RegistrationParams); delete kc; return 0; }
//////////////////// Entry point //////////////////// int main(int argc, char* argv[]) { depthmask_for_mesh = cvCreateImage(MESH_SIZE, IPL_DEPTH_8U, 1); markerSize.width = -1; markerSize.height = -1; //init OpenNI EnumerationErrors errors; switch (XnStatus rc = niContext.InitFromXmlFile(KINECT_CONFIG_FILENAME, &errors)) { case XN_STATUS_OK: break; case XN_STATUS_NO_NODE_PRESENT: XnChar strError[1024]; errors.ToString(strError, 1024); printf("%s\n", strError); return rc; break; default: printf("Open failed: %s\n", xnGetStatusString(rc)); return rc; } //set camera parameter capture = new Camera(0, CAPTURE_SIZE, CAMERA_PARAMS_FILENAME); RegistrationParams = scaleParams(capture->getParameters(), double(REGISTRATION_SIZE.width)/double(CAPTURE_SIZE.width)); //init parameter for rendering osg_init(calcProjection(RegistrationParams, capture->getDistortion(), REGISTRATION_SIZE)); //for Kinect view loadKinectParams(KINECT_PARAMS_FILENAME, &kinectParams, &kinectDistort); kinectDistort =0; kinectParams->data.db[2]=320.0; kinectParams->data.db[5]=240.0; //setting kinect context niContext.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth); niContext.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image); g_depth.GetMirrorCap().SetMirror(false); g_depth.GetAlternativeViewPointCap().SetViewPoint(g_image); //registration kinectReg = new RegistrationOPIRA(new OCVSurf()); kinectReg->addResizedMarker(MARKER_FILENAME, 400); //physics m_world = new bt_ARMM_world(); ground_grid = new float[GRID_SIZE]; for (int i =0;i < GRID_SIZE; i++) { ground_grid[i] = 0; } #ifdef SIM_PARTICLES voxel_grid = new float[1200]; for (int i =0;i < 1200; i++) { voxel_grid[i] = 0; } #endif //controls KeyboardController *kc = new KeyboardController(m_world); XboxController *xc = new XboxController(m_world); loadKinectTransform(KINECT_TRANSFORM_FILENAME); #ifdef USE_ARMM_VRPN //----->Server part m_Connection = new vrpn_Connection_IP(); ARMM_server = new ARMM_Communicator(m_Connection ); //Open the imager server and set up channel zero to send our data. //if ( (ARMM_img_server = new vrpn_Imager_Server("ARMM_Image", m_Connection, MESH_SIZE.width, MESH_SIZE.height)) == NULL) { // fprintf(stderr, "Could not open imager server\n"); // return -1; //} //if ( (channel_id = ARMM_img_server->add_channel("Grid")) == -1) { // fprintf(stderr, "Could not add channel\n"); // return -1; //} ARMM_server->SetObjectsData(&(m_world->Objects_Body)); ARMM_server->SetHandsData(&(m_world->HandObjectsArray)); cout << "Created VRPN server." << endl; //<----- #ifdef USE_ARMM_VRPN_RECEIVER //----->Receiver part ARMM_sever_receiver = new vrpn_Tracker_Remote (ARMM_CLIENT_IP); ARMM_sever_receiver->register_change_handler(NULL, handle_object); #endif //<----- #endif #ifdef USE_SKIN_SEGMENTATION //Skin color look up _HandRegion.LoadSkinColorProbTable(); #endif #ifdef USE_OPTICAL_FLOW prev_gray = cvCreateImage(cvSize(OPFLOW_SIZE.width, OPFLOW_SIZE.height), IPL_DEPTH_8U, 1); curr_gray = cvCreateImage(cvSize(OPFLOW_SIZE.width, OPFLOW_SIZE.height), IPL_DEPTH_8U, 1); flow_capture = new FlowCapture(); flow_capture->Init(); #endif /////////////////////////////////////////////Main Loop//////////////////////////////////////////////// while (running) { //start kinect if (XnStatus rc = niContext.WaitAnyUpdateAll() != XN_STATUS_OK) { printf("Read failed: %s\n", xnGetStatusString(rc)); return rc; } //get image and depth data from Kinect g_depth.GetMetaData(niDepthMD); g_image.GetMetaData(niImageMD); colourIm = cvCreateImage(cvSize(niImageMD.XRes(), niImageMD.YRes()), IPL_DEPTH_8U, 3); memcpy(colourIm->imageData, niImageMD.Data(), colourIm->imageSize); cvCvtColor(colourIm, colourIm, CV_RGB2BGR); cvFlip(colourIm, colourIm, 1); depthIm = cvCreateImage(cvSize(niDepthMD.XRes(), niDepthMD.YRes()), IPL_DEPTH_16U, 1); transDepth160 = cvCreateImage(cvSize(MESH_SIZE.width, MESH_SIZE.height), IPL_DEPTH_32F, 1); transDepth320 = cvCreateImage(cvSize(SKIN_SEGM_SIZE.width, SKIN_SEGM_SIZE.height), IPL_DEPTH_32F, 1); transColor320 = cvCreateImage(cvSize(SKIN_SEGM_SIZE.width, SKIN_SEGM_SIZE.height), IPL_DEPTH_8U, 3); memcpy(depthIm->imageData, niDepthMD.Data(), depthIm->imageSize); //cvCircle(colourIm, cvPoint(marker_origin.x,marker_origin.y), 5, CV_BLUE, 3); cvShowImage("Kinect View", colourIm); IplImage *arImage = capture->getFrame(); cvWaitKey(1); //check input device input_key = kc->check_input(); #ifdef USE_ARMM_VRPN_RECEIVER if( pass_key != 0){ kc->check_input(pass_key); pass_key = 0; } #endif xc->check_input(); if(kinectTransform) { // kinect transform as cvmat* for use if( counter >= SIM_FREQUENCY) { #ifdef UPDATE_TRIMESH inpaintDepth(&niDepthMD, true); memcpy(depthIm->imageData, niDepthMD.Data(), depthIm->imageSize); TransformImage(depthIm, transDepth160, MARKER_DEPTH, MESH_SIZE, true); GenerateTrimeshGroundFromDepth(transDepth160, MARKER_DEPTH); /*Trimesh generation*/ m_world->updateTrimeshRefitTree(ground_grid);//opencl? osg_UpdateHeightfieldTrimesh(ground_grid);//opencl? #endif #ifdef SIM_PARTICLES /*World spheres simulation*/ // GenerateVoxelFromDepth(depthIm, MARKER_DEPTH); // m_world->updateWorldSphereTransform(voxel_grid); // osgUpdateWorldSphereTransform(voxel_grid); #endif counter = 0; } else { #ifdef USE_SKIN_SEGMENTATION /*Skin color segmentation*/ // may be reduce resolution first as well as cut off depth make processing faster // (2)Sphere representation FindHands(depthIm, colourIm); UpdateAllHands(); #endif #ifdef USE_PARTICLES //XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #endif counter++; } //do hand pose recognition m_world->Update(); //(B)normal client only rendering RenderScene(arImage, capture); } // TickCountAverageEnd(); #ifdef USE_ARMM_VRPN //Send Car position+orientation ARMM_server->mainloop(); #ifdef USE_ARMM_VRPN_RECEIVER ARMM_sever_receiver->mainloop(); #endif ////Copy depth info //for (int i = 0; i < GRID_SIZE;i++) { // ARMM_img_buffer[i] = ground_grid[i]; //} //Send depth grid info //ARMM_img_server->send_begin_frame(0, MESH_SIZE.width-1, 0, MESH_SIZE.height-1); // ARMM_img_server->mainloop(); // int nRowsPerRegion= ((int) vrpn_IMAGER_MAX_REGIONf32)/ MESH_SIZE.width; // for(int y=0; y<MESH_SIZE.height; y+=nRowsPerRegion) { // ARMM_img_server->send_region_using_base_pointer(channel_id,0,MESH_SIZE.width-1,y,min(MESH_SIZE.width,y+nRowsPerRegion)-1, ARMM_img_buffer, 1, MESH_SIZE.width, MESH_SIZE.height); // ARMM_img_server->mainloop(); // } // ARMM_img_server->send_end_frame(0, MESH_SIZE.width-1, 0, MESH_SIZE.height-1); // ARMM_img_server->mainloop(); //Exec data transmission m_Connection->mainloop(); #endif #ifdef USE_OPTICAL_FLOW if(!RunOnce) RunOnce = true; cvCopyImage(curr_gray, prev_gray); #endif cvReleaseImage(&arImage); cvReleaseImage(&depthIm); cvReleaseImage(&colourIm); cvReleaseImage(&transDepth160); #ifdef USE_SKIN_SEGMENTATION cvReleaseImage(&transDepth320); cvReleaseImage(&transColor320); #endif } #ifdef USE_OPTICAL_FLOW cvReleaseImage(&prev_gray); cvReleaseImage(&curr_gray); #endif //memory release osg_uninit(); delete m_world; delete kinectReg; cvReleaseMat(&RegistrationParams); delete kc; delete xc; return 0; }
int main(int argc, char* argv[]) { EnumerationErrors errors; //rc = context.Init(); rc = context.InitFromXmlFile(strPathToXML,&errors); if (rc == XN_STATUS_NO_NODE_PRESENT) { XnChar strError[1024]; errors.ToString(strError, 1024); printf("%s\n", strError); return (rc); } else if (rc != XN_STATUS_OK) { printf("Open failed: %s\n", xnGetStatusString(rc)); return (rc); } /* UNCOMMENT TO GET FILE READING //rc = context.OpenFileRecording(strInputFile); //CHECK_RC(rc, "Open input file"); //rc = context.FindExistingNode(XN_NODE_TYPE_PLAYER, player); //CHECK_RC(rc, "Get player node"); */ rc = context.FindExistingNode(XN_NODE_TYPE_DEPTH, depth); CHECK_RC(rc, "Find depth generator"); rc = context.FindExistingNode(XN_NODE_TYPE_IMAGE, image); CHECK_RC(rc, "Find image generator"); depth.GetMetaData(depthMD); image.GetMetaData(imageMD); //rc = player.SetRepeat(FALSE); XN_IS_STATUS_OK(rc); //rc = player.GetNumFrames(image.GetName(), nNumFrames); //CHECK_RC(rc, "Get player number of frames"); //printf("%d\n",nNumFrames); //rc = player.GetNumFrames(depth.GetName(), nNumFrames); //CHECK_RC(rc, "Get player number of frames"); //printf("%d\n",nNumFrames); // Hybrid mode isn't supported if (imageMD.FullXRes() != depthMD.FullXRes() || imageMD.FullYRes() != depthMD.FullYRes()) { printf ("The device depth and image resolution must be equal!\n"); return 1; } // RGB is the only image format supported. if (imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24) { printf("The device image format must be RGB24\n"); return 1; } avi = cvCreateVideoWriter(strOutputFile, 0, 30, cvSize(640,480), TRUE); depthMetersMat = cvCreateMat(480, 640, CV_16UC1); kinectDepthImage = cvCreateImage( cvSize(640,480),16,1 ); depthMetersMat2 = cvCreateMat(480, 640, CV_16UC1); kinectDepthImage2 = cvCreateImage( cvSize(640,480),16,1 ); colorArr[0] = cv::Mat(imageMD.YRes(),imageMD.XRes(),CV_8U); colorArr[1] = cv::Mat(imageMD.YRes(),imageMD.XRes(),CV_8U); colorArr[2] = cv::Mat(imageMD.YRes(),imageMD.XRes(),CV_8U); //prepare_for_face_detection(); int b; int g; int r; while ((rc = image.WaitAndUpdateData()) != XN_STATUS_EOF && (rc = depth.WaitAndUpdateData()) != XN_STATUS_EOF) { if (rc != XN_STATUS_OK) { printf("Read failed: %s\n", xnGetStatusString(rc)); break; } depth.GetMetaData(depthMD); image.GetMetaData(imageMD); //XnUInt32 a; //a = g_imageMD.FPS; printf("%d\n",imageMD.FrameID()); //a = g_depthMD.DataSize(); //printf("%d\n",a); pDepth = depthMD.Data(); pImageRow = imageMD.RGB24Data(); for (unsigned int y=0; y<imageMD.YRes(); y++) { pPixel = pImageRow; uchar* Bptr = colorArr[0].ptr<uchar>(y); uchar* Gptr = colorArr[1].ptr<uchar>(y); uchar* Rptr = colorArr[2].ptr<uchar>(y); for(unsigned int x=0;x<imageMD.XRes();++x , ++pPixel){ Bptr[x] = pPixel->nBlue; Gptr[x] = pPixel->nGreen; Rptr[x] = pPixel->nRed; depthMetersMat->data.s[y * XN_VGA_X_RES + x ] = 7*pDepth[y * XN_VGA_X_RES + x]; depthMetersMat2->data.s[y * XN_VGA_X_RES + x ] = pDepth[y * XN_VGA_X_RES + x]; } pImageRow += imageMD.XRes(); } cv::merge(colorArr,3,colorImage); iplImage = colorImage; //cvThreshold(depthMetersMat2, depthMetersMat2, 150, 1500, THRESH_BINARY); cvGetImage(depthMetersMat,kinectDepthImage); cvGetImage(depthMetersMat2,kinectDepthImage2); depthImage = Bw2Image(kinectDepthImage2); printf("1. Middle pixel is %u millimeters away\n",depthImage[240][320]); rgbImage = RgbImage(&iplImage); // we want to see on up to 2000 MM int THRESH = 2000; for (unsigned int y=0; y<imageMD.YRes(); y++) { for(unsigned int x=0;x<imageMD.XRes();++x){ if ( depthImage[y][x] >= THRESH ) { depthImage[y][x] = 0; } else { float tmp = depthImage[y][x]; tmp = tmp / THRESH * (65536)*(-1) + 65536; depthImage[y][x] = (unsigned int)tmp; } } } // THE PART ABOUT FILTERING COLOURS IN HSV TO SEE ONLY SPECIFIC ONE // AFTER ONE FEW MORPHOLOGICAL OPERATIONS TO MAKE IT LOOK BETTER IplImage* imgHSV = cvCreateImage(cvGetSize(&iplImage), 8, 3); cvCvtColor(&iplImage, imgHSV, CV_BGR2HSV); imgThreshed = cvCreateImage(cvGetSize(&iplImage), 8, 1); //cvInRangeS(imgHSV, cvScalar(100, 60, 80), cvScalar(110, 255, 255), imgThreshed); // BLUE cvInRangeS(imgHSV, cvScalar(29, 95, 95), cvScalar(35, 255, 255), imgThreshed); // YELLOW //cvInRangeS(imgHSV, cvScalar(29, 60, 60), cvScalar(35, 255, 255), imgThreshed); // YELLOW DARK //cvInRangeS(imgHSV, cvScalar(150, 70, 70), cvScalar(160, 255, 255), imgThreshed); // PINK //cvInRangeS(imgHSV, cvScalar(40, 76, 76), cvScalar(70, 255, 255), imgThreshed); // GREEN IplConvKernel* kernel = cvCreateStructuringElementEx(3, 3, 1, 1, CV_SHAPE_RECT, NULL); //cvDilate(imgThreshed,imgThreshed,kernel); //cvErode(imgThreshed,imgThreshed,kernel); Mat mat = Mat(imgThreshed); blur(Mat(imgThreshed),mat,cvSize(3,3)); imgThreshed = &IplImage(mat); //cvInRangeS(imgThreshed,cvScalar(100),cvScalar(255),imgThreshed); //cvErode(imgThreshed,imgThreshed,kernel); cvDilate(imgThreshed,imgThreshed,kernel); cvDilate(imgThreshed,imgThreshed,kernel); cvErode(imgThreshed,imgThreshed,kernel); cvErode(imgThreshed,imgThreshed,kernel); mat = Mat(imgThreshed); blur(Mat(imgThreshed),mat,cvSize(6,6)); imgThreshed = &IplImage(mat); cvInRangeS(imgThreshed,cvScalar(100),cvScalar(255),imgThreshed); cvReleaseImage(&imgHSV); BwImage threshed = BwImage(imgThreshed); if ( initialize == true ) { normalizeReferenceFace(); int currentID = 0; for ( int y = 30; y<480; y++ ) { for ( int x = 30; x<640; x++ ) { bool g2g = true; //printf("%d %d %d\n",ID, y,x); if ( threshed[y][x]!=0 ) { for ( int ID2 = 0; ID2<nbOfPoints; ID2++) { if ( (abs(markers[ID2].y-y)<proximityLimit) && (abs(markers[ID2].x-x)<proximityLimit)) { g2g = false; } } if (currentID >= nbOfPoints || g2g == false ) { break; } markers[currentID].y=y; markers[currentID].x=x; currentID++; printf("WHITE PIXEL INITIALIZED %d: %d %d\n",currentID, x,y); } } } if (isDebugConf==true || currentID == nbOfMarkers) { printf("%d PIXELS INITIALIZED\n", currentID); initialize = false; //printf("%d,%d\n", currentID, nbOfPoints); //return 0; } else { printf("WAITING FOR %d PIXELS TO APPEAR, %d SO FAR \n",nbOfMarkers, currentID); continue; } // FIND TOP RIGHT AND CHIN PIXEL int refPixID = 0; int chinPixID = 0; for ( int i = 0; i < nbOfMarkers; i++) { if ( (markers[i].x + markers[i].y)*(markers[i].x + markers[i].y) < (markers[refPixID].x + markers[refPixID].y)* (markers[refPixID].x + markers[refPixID].y)) { refPixID = i; } if (markers[i].y > markers[chinPixID].y) { chinPixID = i; } } float width = (markers[1].x-markers[0].x)*2; float heigth = abs(markers[1].y-markers[0].y); // WE GOT WIDTH & HEIGTH OF THE FACE, LETS ADJUST POINTS // SET 0 to REF, SET 1 to CHIN MyPoint tmp = MyPoint(markers[refPixID].x,markers[refPixID].y); markers[refPixID].x = markers[0].x; markers[refPixID].y = markers[0].y; markers[0].x = tmp.x; markers[0].y = tmp.y; tmp = MyPoint(markers[chinPixID].x,markers[chinPixID].y); markers[chinPixID].x = markers[1].x; markers[chinPixID].y = markers[1].y; markers[1].x = tmp.x; markers[1].y = tmp.y; // REST OF THE POINTS for ( int i = 2; i < nbOfPoints; i++) { int cost = 0; int lowestCost = 0; int closestPixID = -1; for ( int j = 2; j < nbOfMarkers; j++ ) { cost = (markers[j].x-points[i].x*width)*(markers[j].x-points[i].x*width) + (markers[j].y-points[i].y*heigth)*(markers[j].y-points[i].y*heigth); if ( cost < lowestCost ) { lowestCost = cost; closestPixID = j; } if (closestPixID == -1) { //printf("COS JEST SPORO NIE W PORZADKU, CHECK HERE\n"); break; } tmp.x = markers[i].x; tmp.y = markers[i].y; markers[i].x=markers[closestPixID].x; markers[i].x=markers[closestPixID].y; markers[closestPixID].x = tmp.x; markers[closestPixID].y = tmp.y; } } } for ( int currentPixelID = 0; currentPixelID < nbOfMarkers; currentPixelID++) { if (markers[currentPixelID].x == 0) { continue; } if ( threshed[markers[currentPixelID].y][markers[currentPixelID].x] < 128 ) { printf("PIXEL %d LOST\n",currentPixelID); for ( int neighbSize = 2; neighbSize < maxNeighbSize; neighbSize = neighbSize + 2 ) { int x1 = markers[currentPixelID].x - neighbSize/2; if ( x1 < intoDepthX(0) ) { x1 = (int)intoDepthX(0); } int y1 = (int)(markers[currentPixelID].y-neighbSize/2); if ( y1 < intoDepthY(0) ) { y1 = intoDepthY(0); } int y2 = markers[currentPixelID].y+neighbSize/2; if ( y2 > intoDepthY(480) ) { y2 = intoDepthY(480); } int x2 = markers[currentPixelID].x+neighbSize/2; if ( x2 > intoDepthX(640) ) { y2 = intoDepthX(640); } bool found = false; for ( int y = y1; y < y2; y++) { for ( int x = x1; x < x2; x++) { bool g2g = true; if (threshed[y][x] > 128) { for ( int ID2 = 0; ID2<nbOfMarkers; ID2++) { if ( currentPixelID == ID2 ) continue; if ( (abs(markers[ID2].y-y)<proximityLimit) && (abs(markers[ID2].x-x)<proximityLimit)) { g2g = false; break; } } if ( g2g ) { markers[currentPixelID].x = x; markers[currentPixelID].y = y; found = true; printf("Pixel %d, FOUND\n",currentPixelID); break; } } } if (found == true ) { break; } } if (found == true ) { break; } } } paintMarkerOnBoth(markers[currentPixelID]); } faceImage = cvCreateImage(cvGetSize(&iplImage), 8, 1); paintFace(); // normal kinect depth cvShowImage("Depth_Kinect", kinectDepthImage); // depth within 80 - 200 mm, normalized cvShowImage("Depth_Kinect_2", kinectDepthImage2); // rgb with tracking points cvShowImage("RGB_Kinect", &iplImage); // colour detector cvShowImage("RGB_Threshed", imgThreshed); // attempt to draw a face cvShowImage("Face Image", faceImage); cvWaitKey(50); // wait 20 ms if ( avi == NULL) { printf ("dupa%d \n",1); } //cvWriteFrame (avi, &iplImage); } // cvReleaseImageHeader(kinectDepthImage); cvReleaseVideoWriter(&avi); // cvReleaseHaarClassifierCascade( &cascade ); context.Shutdown(); return 0; }