int readOpenNiColorAndDepth(VideoStream &color , VideoStream &depth,VideoFrameRef &colorFrame,VideoFrameRef &depthFrame) { #if USE_WAITFORANYSTREAM_TO_GRAB #warning "Please turn #define USE_WAITFORANYSTREAM_TO_GRAB 0" #warning "This is a bad idea taken from OpenNI2/Samples/SimpleViewer , we dont just want to update 'any' frame we really want to snap BOTH and do that sequentially" #warning "It is better to sequencially grab them instead of waiting for any stream a couple of times " openni::VideoStream** m_streams = new openni::VideoStream*[2]; m_streams[0] = &depth; m_streams[1] = &color; unsigned char haveDepth=0,haveColor=0; int changedIndex; while ( (!haveDepth) || (!haveColor) ) { openni::Status rc = openni::OpenNI::waitForAnyStream(m_streams, 2, &changedIndex); if (rc != openni::STATUS_OK) { fprintf(stderr,"Wait failed\n"); return 0 ; } unsigned int i=0; switch (changedIndex) { case 0: depth.readFrame(&depthFrame); haveDepth=1; break; case 1: color.readFrame(&colorFrame); haveColor=1; break; default: printf("Error in wait\n"); return 0; } } delete m_streams; return 1; #else //Using serial frame grabbing readFrameBlocking(depth,depthFrame,MAX_TRIES_FOR_EACH_FRAME); // depth.readFrame(&depthFrame); readFrameBlocking(color,colorFrame,MAX_TRIES_FOR_EACH_FRAME); // color.readFrame(&colorFrame); if(depthFrame.isValid() && colorFrame.isValid()) { return 1; } fprintf(stderr,"Depth And Color frames are wrong!\n"); #endif return 0; }
void XtionDepthDriverImpl::onNewFrame(VideoStream &stream) { VideoFrameRef ref; stream.readFrame(&ref); _lastCaptured = XtionDepthImage(ref.getData(), ref.getDataSize(), ref.getWidth(), ref.getHeight(), 0, this); }
virtual void onNewFrame(VideoStream& stream) { ros::Time ts = ros::Time::now(); VideoFrameRef frame; stream.readFrame(&frame); sensor_msgs::Image::Ptr img(new sensor_msgs::Image); sensor_msgs::CameraInfo::Ptr info(new sensor_msgs::CameraInfo); double scale = double(frame.getWidth()) / double(1280); info->header.stamp = ts; info->header.frame_id = frame_id_; info->width = frame.getWidth(); info->height = frame.getHeight(); info->K.assign(0); info->K[0] = 1050.0 * scale; info->K[4] = 1050.0 * scale; info->K[2] = frame.getWidth() / 2.0 - 0.5; info->K[5] = frame.getHeight() / 2.0 - 0.5; info->P.assign(0); info->P[0] = 1050.0 * scale; info->P[5] = 1050.0 * scale; info->P[2] = frame.getWidth() / 2.0 - 0.5; info->P[6] = frame.getHeight() / 2.0 - 0.5; switch(frame.getVideoMode().getPixelFormat()) { case PIXEL_FORMAT_GRAY8: img->encoding = sensor_msgs::image_encodings::MONO8; break; case PIXEL_FORMAT_GRAY16: img->encoding = sensor_msgs::image_encodings::MONO16; break; case PIXEL_FORMAT_YUV422: img->encoding = sensor_msgs::image_encodings::YUV422; break; case PIXEL_FORMAT_RGB888: img->encoding = sensor_msgs::image_encodings::RGB8; break; case PIXEL_FORMAT_SHIFT_9_2: case PIXEL_FORMAT_DEPTH_1_MM: img->encoding = sensor_msgs::image_encodings::TYPE_16UC1; break; default: ROS_WARN("Unknown OpenNI pixel format!"); break; } img->header.stamp = ts; img->header.frame_id = frame_id_; img->height = frame.getHeight(); img->width = frame.getWidth(); img->step = frame.getStrideInBytes(); img->data.resize(frame.getDataSize()); std::copy(static_cast<const uint8_t*>(frame.getData()), static_cast<const uint8_t*>(frame.getData()) + frame.getDataSize(), img->data.begin()); publish(img, info); }
const short unsigned int *wb_kinect_get_range_image_mm(WbDeviceTag tag) { if(kinectenable) { int i; int changedStreamDummy; VideoStream* pStream = &depth; rc = OpenNI::waitForAnyStream(&pStream, 1, &changedStreamDummy, SAMPLE_READ_WAIT_TIMEOUT); if (rc != STATUS_OK) { printf("Wait failed! (timeout is %d ms)\n%s\n", SAMPLE_READ_WAIT_TIMEOUT, OpenNI::getExtendedError()); return NULL; } rc = depth.readFrame(&frame); if (rc != STATUS_OK) { printf("Read failed!\n%s\n", OpenNI::getExtendedError()); return NULL; } if (frame.getVideoMode().getPixelFormat() != PIXEL_FORMAT_DEPTH_1_MM && frame.getVideoMode().getPixelFormat() != PIXEL_FORMAT_DEPTH_100_UM) { printf("Unexpected frame format\n"); return NULL; } DepthPixel* pDepth = (DepthPixel*)frame.getData(); height_pixels = frame.getHeight(); width_pixels = frame.getWidth(); if (width_pixels>160){ //Fix blind column for(i=0;i<height_pixels;i++){ pDepth[i*width_pixels + 0] = pDepth[i*width_pixels + 3]; pDepth[i*width_pixels + 1] = pDepth[i*width_pixels + 3]; pDepth[i*width_pixels + 2] = pDepth[i*width_pixels + 3]; } } else { for(i=0;i<height_pixels;i++){ pDepth[i*width_pixels + 0] = pDepth[i*width_pixels + 1]; } } return pDepth; } // kinect not enable fprintf(stderr, "2.Please enable the kinect before to use wb_kinect_get_range_image()\n"); return NULL; }
int readFrameBlocking(VideoStream &stream,VideoFrameRef &frame , unsigned int max_tries) { unsigned int tries_for_frame = 0 ; while ( tries_for_frame < max_tries ) { stream.readFrame(&frame); if (frame.isValid()) { return 1; } ++tries_for_frame; } if (!frame.isValid()) { fprintf(stderr,"Could not get a valid frame even after %u tries \n",max_tries); return 0; } return (tries_for_frame<max_tries); }
int main (int argc, char** argv) { Status rc = OpenNI::initialize(); if (rc != STATUS_OK) { std::cout << "Initialize failed: " << OpenNI::getExtendedError() << std::endl; return 1; } Device device; rc = device.open(ANY_DEVICE); if (rc != STATUS_OK) { std::cout << "Couldn't open device: " << OpenNI::getExtendedError() << std::endl; return 2; } VideoStream stream; if (device.getSensorInfo(currentSensor) != NULL) { rc = stream.create(device, currentSensor); if (rc != STATUS_OK) { std::cout << "Couldn't create stream: " << OpenNI::getExtendedError() << std::endl; return 3; } } rc = stream.start(); if (rc != STATUS_OK) { std::cout << "Couldn't start the stream: " << OpenNI::getExtendedError() << std::endl; return 4; } VideoFrameRef frame; //now open the video writer Size S = Size(stream.getVideoMode().getResolutionX(), stream.getVideoMode().getResolutionY()); VideoWriter outputVideo; std::string fileName = "out.avi"; outputVideo.open(fileName, -1, stream.getVideoMode().getFps(), S, currentSensor == SENSOR_COLOR ? true : false); if (!outputVideo.isOpened()) { std::cout << "Could not open the output video for write: " << fileName << std::endl; return -1; } while (waitKey(50) == -1) { int changedStreamDummy; VideoStream* pStream = &stream; rc = OpenNI::waitForAnyStream(&pStream, 1, &changedStreamDummy, SAMPLE_READ_WAIT_TIMEOUT); if (rc != STATUS_OK) { std::cout << "Wait failed! (timeout is " << SAMPLE_READ_WAIT_TIMEOUT << "ms): " << OpenNI::getExtendedError() << std::endl; continue; } rc = stream.readFrame(&frame); if (rc != STATUS_OK) { std::cout << "Read failed:" << OpenNI::getExtendedError() << std::endl; continue; } Mat image; switch (currentSensor) { case SENSOR_COLOR: image = Mat(frame.getHeight(), frame.getWidth(), CV_8UC3, (void*)frame.getData()); break; case SENSOR_DEPTH: image = Mat(frame.getHeight(), frame.getWidth(), DataType<DepthPixel>::type, (void*)frame.getData()); break; case SENSOR_IR: image = Mat(frame.getHeight(), frame.getWidth(), CV_8U, (void*)frame.getData()); break; default: break; } namedWindow( "Display window", WINDOW_AUTOSIZE ); // Create a window for display. imshow( "Display window", image ); // Show our image inside it. outputVideo << image; } stream.stop(); stream.destroy(); device.close(); OpenNI::shutdown(); return 0; }
int main() { // 2. initialize OpenNI Status rc = OpenNI::initialize(); if (rc != STATUS_OK) { printf("Initialize failed\n%s\n", OpenNI::getExtendedError()); return 1; } // 3. open a device Device device; rc = device.open(ANY_DEVICE); if (rc != STATUS_OK) { printf("Couldn't open device\n%s\n", OpenNI::getExtendedError()); return 2; } // 4. create depth stream VideoStream depth; if (device.getSensorInfo(SENSOR_DEPTH) != NULL){ rc = depth.create(device, SENSOR_DEPTH); if (rc != STATUS_OK){ printf("Couldn't create depth stream\n%s\n", OpenNI::getExtendedError()); return 3; } } VideoStream color; if (device.getSensorInfo(SENSOR_COLOR) != NULL){ rc = color.create(device, SENSOR_COLOR); if (rc != STATUS_OK){ printf("Couldn't create color stream\n%s\n", OpenNI::getExtendedError()); return 4; } } // 5. create OpenCV Window cv::namedWindow("Depth Image", CV_WINDOW_AUTOSIZE); cv::namedWindow("Color Image", CV_WINDOW_AUTOSIZE); // 6. start rc = depth.start(); if (rc != STATUS_OK) { printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError()); return 5; } rc = color.start(); if (rc != STATUS_OK){ printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError()); return 6; } VideoFrameRef colorframe; VideoFrameRef depthframe; int iMaxDepth = depth.getMaxPixelValue(); int iColorFps = color.getVideoMode().getFps(); cv::Size iColorFrameSize = cv::Size(color.getVideoMode().getResolutionX(), color.getVideoMode().getResolutionY()); cv::Mat colorimageRGB; cv::Mat colorimageBGR; cv::Mat depthimage; cv::Mat depthimageScaled; #ifdef F_RECORDVIDEO cv::VideoWriter outputvideo_color; cv::FileStorage outputfile_depth; time_t timenow = time(0); tm ltime; localtime_s(<ime, &timenow); int tyear = 1900 + ltime.tm_year; int tmouth = 1 + ltime.tm_mon; int tday = ltime.tm_mday; int thour = ltime.tm_hour; int tmin = ltime.tm_min; int tsecond = ltime.tm_sec; string filename_rgb = "RGB/rgb_" + to_string(tyear) + "_" + to_string(tmouth) + "_" + to_string(tday) + "_" + to_string(thour) + "_" + to_string(tmin) + "_" + to_string(tsecond) + ".avi"; string filename_d = "D/d_" + to_string(tyear) + "_" + to_string(tmouth) + "_" + to_string(tday) + "_" + to_string(thour) + "_" + to_string(tmin) + "_" + to_string(tsecond) + ".yml"; outputvideo_color.open(filename_rgb, CV_FOURCC('I', '4', '2', '0'), iColorFps, iColorFrameSize, true); if (!outputvideo_color.isOpened()){ cout << "Could not open the output color video for write: " << endl; return 7; } outputfile_depth.open(filename_d, cv::FileStorage::WRITE); if (!outputfile_depth.isOpened()){ cout << "Could not open the output depth file for write: " << endl; return 8; } #endif // F_RECORDVIDEO // 7. main loop, continue read while (!wasKeyboardHit()) { // 8. check is color stream is available if (color.isValid()){ if (color.readFrame(&colorframe) == STATUS_OK){ colorimageRGB = { colorframe.getHeight(), colorframe.getWidth(), CV_8UC3, (void*)colorframe.getData() }; cv::cvtColor(colorimageRGB, colorimageBGR, CV_RGB2BGR); } } // 9. check is depth stream is available if (depth.isValid()){ if (depth.readFrame(&depthframe) == STATUS_OK){ depthimage = { depthframe.getHeight(), depthframe.getWidth(), CV_16UC1, (void*)depthframe.getData() }; depthimage.convertTo(depthimageScaled, CV_8U, 255.0 / iMaxDepth); } } cv::imshow("Color Image", colorimageBGR); cv::imshow("Depth Image", depthimageScaled); #ifdef F_RECORDVIDEO outputvideo_color << colorimageBGR; outputfile_depth << "Mat" << depthimage; #endif // F_RECORDVIDEO cv::waitKey(10); } color.stop(); depth.stop(); color.destroy(); depth.destroy(); device.close(); OpenNI::shutdown(); return 0; }
void* camera_thread(void*){ VideoFrameRef frame; VideoFrameRef rgbframe; int changed_stream; VideoStream* pStream; VideoStream* pRgbStream; sensor_msgs::CameraInfo rgb_info; sensor_msgs::CameraInfo depth_info; sensor_msgs::Image image; image.header.frame_id=frame_id; image.is_bigendian=0; int count = 0; int num_frames_stat=64; struct timeval previous_time; gettimeofday(&previous_time, 0); uint64_t last_stamp = 0; while(run){ bool new_frame = false; uint64_t current_stamp = 0; openni::OpenNI::waitForAnyStream(streams, 2, &changed_stream); switch (changed_stream) { //DEPTH case 0: depth.readFrame(&frame); depth_info.header.stamp=ros::Time::now(); image.header.stamp=ros::Time::now(); if(!frame.isValid()) break; current_stamp=frame.getTimestamp(); if (current_stamp-last_stamp>5000){ count++; new_frame = true; } last_stamp = current_stamp; if (! (count % (_frame_skip)) ){ image.header.seq = count; if (! _registration){ image.header.frame_id=frame_id+"_depth"; } else image.header.frame_id=frame_id+"_rgb"; depth_info.width=frame.getWidth(); depth_info.height=frame.getHeight(); depth_info.header.seq = count; depth_info.distortion_model = sensor_msgs::distortion_models::PLUMB_BOB; depth_info.D.resize(5, 0.0); depth_info.K[0]=depth_info.width/(2*tan(depth.getHorizontalFieldOfView()/2)); //fx depth_info.K[4]=depth_info.height/(2*tan(depth.getVerticalFieldOfView()/2));; //fy depth_info.K[2]=depth_info.width/2; //cx depth_info.K[5]=depth_info.height/2; //cy depth_info.K[8]=1; depth_info.header.frame_id=image.header.frame_id; image.height=frame.getHeight(); image.width=frame.getWidth(); image.encoding="mono16"; image.step=frame.getWidth()*2; image.data.resize(image.step*image.height); memcpy((char*)(&image.data[0]),frame.getData(),image.height*image.width*2); pub_depth.publish(image); pub_camera_info_depth.publish(depth_info); } break; //RGB case 1: rgb.readFrame(&rgbframe); image.header.stamp=ros::Time::now(); rgb_info.header.stamp=ros::Time::now(); if(!rgbframe.isValid()) break; current_stamp=rgbframe.getTimestamp(); if (current_stamp-last_stamp>5000){ count++; new_frame = true; } last_stamp = current_stamp; if (_gain>=0) { if (count > 64 && gain_status==0){ rgb.getCameraSettings()->setAutoExposureEnabled(false); rgb.getCameraSettings()->setAutoWhiteBalanceEnabled(false); rgb.getCameraSettings()->setExposure(1); rgb.getCameraSettings()->setGain(100); gain_status=1; } else if (count > 128 && gain_status==1){ rgb.getCameraSettings()->setExposure(_exposure); rgb.getCameraSettings()->setGain(_gain); gain_status=2; } } if (! (count%(_frame_skip)) ){ rgb_info.header.frame_id=frame_id+"_rgb"; rgb_info.width=rgbframe.getWidth(); rgb_info.height=rgbframe.getHeight(); rgb_info.header.seq = count; rgb_info.K[0]=rgb_info.width/(2*tan(rgb.getHorizontalFieldOfView()/2)); //fx rgb_info.K[4]=rgb_info.height/(2*tan(rgb.getVerticalFieldOfView()/2));; //fy rgb_info.K[2]=rgb_info.width/2; //cx rgb_info.K[5]=rgb_info.height/2; //cy rgb_info.K[8]=1; image.header.seq = count; image.header.frame_id=frame_id+"_rgb"; image.height=rgbframe.getHeight(); image.width=rgbframe.getWidth(); image.encoding="rgb8"; image.step=rgbframe.getWidth()*3; image.data.resize(image.step*image.height); memcpy((char*)(&image.data[0]),rgbframe.getData(),image.height*image.width*3); pub_rgb.publish(image); pub_camera_info_rgb.publish(rgb_info); } break; default: printf("Error in wait\n"); } if (!(count%num_frames_stat) && new_frame){ struct timeval current_time, interval; gettimeofday(¤t_time, 0); timersub(¤t_time, &previous_time, &interval); previous_time = current_time; double fps = num_frames_stat/(interval.tv_sec +1e-6*interval.tv_usec); printf("running at %lf fps\n", fps); fflush(stdout); } } }
int _tmain(int argc, _TCHAR* argv[]) { DepthDetector detector(ThresholdMin, ThresholdMax); ScanLineSegmenter segmenter; OpenNI::initialize(); Device device; if (device.open(ANY_DEVICE) != STATUS_OK) { std::cout << "could not open any device\r\n"; return 1; } if (device.hasSensor(SENSOR_DEPTH)) { auto info = device.getSensorInfo(SENSOR_DEPTH); auto& modes = info->getSupportedVideoModes(); std::cout << "depth sensor supported modes:\r\n"; for (int i = 0; i < modes.getSize(); ++i) { auto& mode = modes[i]; std::cout << "pixel format: " << mode.getPixelFormat() << "\t with: " << mode.getResolutionX() << "x" << mode.getResolutionY() << "@" << mode.getFps() << " fps\r\n"; } } VideoStream stream; stream.create(device, SENSOR_DEPTH); VideoMode mode; mode.setFps(25); mode.setPixelFormat(PIXEL_FORMAT_DEPTH_1_MM); mode.setResolution(320, 240); stream.setMirroringEnabled(true); stream.setVideoMode(mode); stream.start(); std::cout << "press any key to capture background\r\n"; std::cin.get(); VideoFrameRef frame; stream.readFrame(&frame); DepthImage image(320, 240); copyFrameToImage(frame, image); detector.background(image); std::cout << "starting capture loop\r\n"; CenterPointExtractor centerPointExtractor(MinBlobSize); std::chrono::high_resolution_clock timer; auto startTime = timer.now(); int frameId = 0; while (true) { stream.readFrame(&frame); copyFrameToImage(frame, image); detector.detect(image); std::vector<LineSegment> segments; segmenter.segment(detector.mask(), segments); std::vector<std::pair<float, float>> centerPoints; centerPointExtractor.extract(segments, centerPoints); if (centerPoints.size()) { std::cout << "point count: " << centerPoints.size(); std::cout << "\t points: "; for (auto& point : centerPoints) { std::cout << "(" << point.first << ", " << point.second << ") "; } std::cout << "\r\n"; } ++frameId; if (frameId % 64 == 0) { auto stopTime = timer.now(); auto elapsedTime = stopTime - startTime; auto elapsedMilliseconds = std::chrono::duration_cast<std::chrono::milliseconds>(elapsedTime).count(); std::cout << "\t total frames: " << frameId << "\t fps: " << elapsedMilliseconds / 64 << std::endl; startTime = stopTime; } } openni::OpenNI::shutdown(); return 0; }
void DepthCallback::onNewFrame(VideoStream& stream) { stream.readFrame(&m_frame); analyzeFrame(m_frame); }
int _tmain(int argc, _TCHAR* argv[]) { sdl::Application app; DepthDetector detector(ThresholdMin, ThresholdMax); ScanLineSegmenter segmenter; OpenNI::initialize(); Device device; if (device.open(ANY_DEVICE) != STATUS_OK) { std::cout << "could not open any device\r\n"; return 1; } if (device.hasSensor(SENSOR_DEPTH)) { auto info = device.getSensorInfo(SENSOR_DEPTH); auto& modes = info->getSupportedVideoModes(); std::cout << "depth sensor supported modes:\r\n"; for (int i = 0; i < modes.getSize(); ++i) { auto& mode = modes[i]; std::cout << "pixel format: " << mode.getPixelFormat() << "\t with: " << mode.getResolutionX() << "x" << mode.getResolutionY() << "@" << mode.getFps() << " fps\r\n"; } } VideoStream stream; stream.create(device, SENSOR_DEPTH); VideoMode mode; mode.setFps(25); mode.setPixelFormat(PIXEL_FORMAT_DEPTH_1_MM); mode.setResolution(320, 240); stream.setMirroringEnabled(true); stream.setVideoMode(mode); stream.start(); std::cout << "press any key to capture background\r\n"; std::cin.get(); VideoFrameRef frame; stream.readFrame(&frame); DepthImage image(320, 240); copyFrameToImage(frame, image); detector.background(image); std::cout << "starting capture loop\r\n"; sdl::GLContext::setVersion(4, 3); ImageViewer viewer; viewer.add(0, 0, 320, 240); viewer.add(320, 0, 320, 240); viewer.add(0, 240, 320, 240); viewer.add(320, 240, 320, 240); CenterPointExtractor centerPointExtractor(MinBlobSize); MotionRecorder recorder; while (true) { stream.readFrame(&frame); copyFrameToImage(frame, image); detector.detect(image); std::vector<LineSegment> segments; segmenter.segment(detector.mask(), segments); std::vector<std::pair<float, float>> centerPoints; centerPointExtractor.extract(segments, centerPoints); recorder.track(centerPoints); viewer.crosses.clear(); std::transform(begin(centerPoints), end(centerPoints), std::back_inserter(viewer.crosses), [](std::pair<float, float>& coord) { return Cross{ coord.first, coord.second }; }); viewer.lines.clear(); std::transform(begin(recorder.motions()), end(recorder.motions()), std::back_inserter(viewer.lines), [](const Motion& motion) { return Lines{ motion.points }; }); viewer[0].update(detector.mask()); viewer[1].update(image); viewer[2].update(detector.background()); viewer[3].update(detector.difference()); viewer.update(); } openni::OpenNI::shutdown(); return 0; }
int main() { FILE *fptrI = fopen("C:\\Users\\Alan\\Documents\\ShapeFeatures.csv","w"); fprintf(fptrI, "Classtype, Area, Perimeter, Circularity, Extent\n"); fclose(fptrI); Mat input = imread("C:\\Users\\Alan\\Pictures\\Science Fair 2014\\SVM\\Shape Features\\Fingers.bmp", 1); Mat input2 = imread("C:\\Users\\Alan\\Pictures\\Science Fair 2014\\SVM\\Shape Features\\NotFingers.bmp", 1); Mat inputF = imread("C:\\Users\\Alan\\Pictures\\Science Fair 2014\\SVM\\Shape Features\\ImageFeaturesBinaryF.bmp", 1); Mat gray(input.rows, input.cols, CV_8UC3); Mat gray2(input.rows, input.cols, CV_8UC3); Mat grayF(input.rows, input.cols, CV_8UC3); cvtColor(input, gray, CV_BGR2GRAY); cvtColor(input2, gray2, CV_BGR2GRAY); cvtColor(inputF, grayF, CV_BGR2GRAY); shapeFeatures(gray, input, 1); shapeFeatures(gray2, input2, 2); namedWindow("Image"); imshow("Image", input); namedWindow("Image2"); imshow("Image2", input2); //------------------------------------------------------ //--------[SVM]-------- // Read input data from file created above double parameters[5]; vector<double> svmI, svmA, svmP, svmC, svmE; int size = 1; double index = 0; double area = 0; double perimeter = 0; double circularity = 0; char buffer[1024]; char *record, *line; FILE* fptrR = fopen("C:\\Users\\Alan\\Documents\\ShapeFeatures.csv", "r"); fscanf(fptrR, "%*[^\n]\n", NULL); svmI.resize(size); svmA.resize(size); svmP.resize(size); svmC.resize(size); while((line=fgets(buffer, sizeof(buffer), fptrR))!=NULL) { size++; svmI.resize(size); svmA.resize(size); svmP.resize(size); svmC.resize(size); svmE.resize(size); record = strtok(line, ";"); for(int i = 0; i < 5; i++); { double value = atoi(record); record = strtok(line,";"); } char *lineCopy = record; char *pch; pch = strtok(lineCopy, ","); parameters[0] = atoi(pch); int j = 1; while( j < 5 ) { pch = strtok (NULL, ","); parameters[j] = atof(pch); j++; } svmI[size-1] = parameters[0]; svmA[size-1] = parameters[1]; svmP[size-1] = parameters[2]; svmC[size-1] = parameters[3]; svmE[size-1] = parameters[4]; } fclose(fptrR); //--------------------- // Data for visual representation int width = 512, height = 512; Mat image = Mat::zeros(height, width, CV_8UC3); // Set up training data //float labels[8] = {1.0, -1.0, -1.0, -1.0}; float labels[1000]; for(int i = 0; i < svmI.size()-1; i++) { labels[i] = svmI[i+1]; } Mat labelsMat(1000, 1, CV_32FC1, labels); float trainingData[1000][4]; for(int i = 0; i < svmE.size()-1; i++) { trainingData[i][0] = svmE[i+1]; trainingData[i][1] = svmC[i+1]; trainingData[i][2] = svmA[i+1]; trainingData[i][3] = svmP[i+1]; } Mat trainingDataMat(1000, 4, CV_32FC1, trainingData); // Set up SVM's parameters CvSVMParams params; params = SVMFinger.get_params(); //params.svm_type = CvSVM::C_SVC; //params.kernel_type = CvSVM::LINEAR; //params.term_crit = cvTermCriteria(CV_TERMCRIT_ITER, 100, 1e-6); // Train the SVM SVMFinger.train_auto(trainingDataMat, labelsMat, Mat(), Mat(), params); // Mat sampleMat = (Mat_<float>(1,2) << 138.5, 57); // float response = SVMFinger.predict(sampleMat); waitKey(); destroyWindow("Image"); destroyWindow("Image2"); //------------------------------------------ OpenNI::initialize(); Device devAnyDevice; devAnyDevice.open(ANY_DEVICE); //----------------[Define Video Settings]------------------- //Set Properties of Depth Stream VideoMode mModeDepth; mModeDepth.setResolution( 640, 480 ); mModeDepth.setFps( 30 ); mModeDepth.setPixelFormat( PIXEL_FORMAT_DEPTH_100_UM ); //Set Properties of Color Stream VideoMode mModeColor; mModeColor.setResolution( 640, 480 ); mModeColor.setFps( 30 ); mModeColor.setPixelFormat( PIXEL_FORMAT_RGB888 ); //---------------------------------------------------------- //----------------------[Initial Streams]--------------------- VideoStream streamInitDepth; streamInitDepth.create( devAnyDevice, SENSOR_DEPTH ); VideoStream streamInitColor; streamInitColor.create( devAnyDevice, SENSOR_COLOR ); streamInitDepth.setVideoMode( mModeDepth ); streamInitColor.setVideoMode( mModeColor ); namedWindow( "Depth Image (Init)", CV_WINDOW_AUTOSIZE ); namedWindow( "Color Image (Init)", CV_WINDOW_AUTOSIZE ); //namedWindow( "Thresholded Image (Init)", CV_WINDOW_AUTOSIZE ); VideoFrameRef frameDepthInit; VideoFrameRef frameColorInit; streamInitDepth.start(); streamInitColor.start(); cv::Mat BackgroundFrame; int avgDist = 0; int iMaxDepthInit = streamInitDepth.getMaxPixelValue(); OutX.clear(); OutY.clear(); vector<int> OldOutX, OldOutY; OldOutX.clear(); OldOutY.clear(); //------------------------------------------------------------ //--------------------[Initiation Process]-------------------- while( true ) { streamInitDepth.readFrame( &frameDepthInit ); streamInitColor.readFrame( &frameColorInit ); const cv::Mat mImageDepth( frameDepthInit.getHeight(), frameDepthInit.getWidth(), CV_16UC1, (void*)frameDepthInit.getData()); cv::Mat mScaledDepth; mImageDepth.convertTo( mScaledDepth, CV_8U, 255.0 / iMaxDepthInit ); cv::imshow( "Depth Image (Init)", mScaledDepth ); const cv::Mat mImageRGB(frameColorInit.getHeight(), frameColorInit.getWidth(), CV_8UC3, (void*)frameColorInit.getData()); cv::Mat cImageBGR; cv::cvtColor( mImageRGB, cImageBGR, CV_RGB2BGR ); //--------------------[Get Average Distance]--------------------- int depthVal = 0; int frameHeight = frameDepthInit.getHeight(); int frameWidth = frameDepthInit.getWidth(); //------------ //backgroundDepth.resize(frameHeight * frameWidth); //--------------------------------------------------------------- int initCount = 0; for(int i = 0; i < frameHeight; i++) { for(int j = 0; j < frameWidth; j++) { depthVal = mImageDepth.at<unsigned short>(i, j) + depthVal; initCount++; } } avgDist = depthVal / ((frameHeight) * (frameWidth)); cout << "Average Distance: " << avgDist << endl; cv::imshow( "Color Image (Init)", cImageBGR ); if( cv::waitKey(1) == 'q') { mImageDepth.copyTo(BackgroundFrame); break; } } streamInitDepth.destroy(); streamInitColor.destroy(); destroyWindow( "Depth Image (Init)" ); destroyWindow( "Color Image (Init)" ); VideoStream streamDepth; streamDepth.create( devAnyDevice, SENSOR_DEPTH ); VideoStream streamColor; streamColor.create( devAnyDevice, SENSOR_COLOR ); streamDepth.setVideoMode( mModeDepth ); streamColor.setVideoMode( mModeColor ); streamDepth.start(); streamColor.start(); namedWindow( "Depth Image", CV_WINDOW_AUTOSIZE ); namedWindow( "Color Image", CV_WINDOW_AUTOSIZE ); namedWindow( "Thresholded Image", CV_WINDOW_AUTOSIZE ); int iMaxDepth = streamDepth.getMaxPixelValue(); VideoFrameRef frameColor; VideoFrameRef frameDepth; OutX.clear(); OutY.clear(); //------------------------------------------------------------ //------------------------------------------------------------ //-----------------------[Main Process]----------------------- while( true ) { streamDepth.readFrame( &frameDepth ); streamColor.readFrame( &frameColor ); const cv::Mat mImageDepth( frameDepth.getHeight(), frameDepth.getWidth(), CV_16UC1, (void*)frameDepth.getData()); cv::Mat mScaledDepth; mImageDepth.convertTo( mScaledDepth, CV_8U, 255.0 / iMaxDepth ); //////////////////////////////////////////////////////////////////////////////////////////// //---------------------[Downsampling]------------------------------------------------------- double min; double max; cv::minMaxIdx(mImageDepth, &min, &max); cv::Mat adjMap; // expand your range to 0..255. Similar to histEq(); float scale = 255 / (max-min); mImageDepth.convertTo(adjMap,CV_8UC1, scale, -min*scale); // this is great. It converts your grayscale image into a tone-mapped one, // much more pleasing for the eye // function is found in contrib module, so include contrib.hpp // and link accordingly cv::Mat falseColorsMap; applyColorMap(adjMap, falseColorsMap, cv::COLORMAP_AUTUMN); cv::imshow("Out", falseColorsMap); //------------------------------------------------------------------------------------------ //////////////////////////////////////////////////////////////////////////////////////////// cv::imshow( "Depth Image", mScaledDepth ); cv::imshow( "Depth Image2", adjMap ); const cv::Mat mImageRGB(frameColor.getHeight(), frameColor.getWidth(), CV_8UC3, (void*)frameColor.getData()); cv::Mat cImageBGR; cv::cvtColor( mImageRGB, cImageBGR, CV_RGB2BGR ); //-------------[Threshold]----------------- cv::Mat mImageThres( frameDepth.getHeight(), frameDepth.getWidth(), CV_8UC1 ); int backgroundPxlCount = 0; for(int i = 0; i < 480; i++) { for(int j = 0; j < 640; j++) { int depthVal = mImageDepth.at<unsigned short>(i, j); avgDist = BackgroundFrame.at<unsigned short>(i, j)-2; if((depthVal > (avgDist-14)) && (depthVal <= (avgDist-7))) { //mImageThres.data[mImageThres.step[0]*i + mImageThres.step[1]*j] = 255; mImageThres.at<uchar>(i, j) = 255; } else { //mImageThres.data[mImageThres.step[0]*i + mImageThres.step[1]*j] = 0; mImageThres.at<uchar>(i, j) = 0; } backgroundPxlCount++; } } GaussianBlur( mImageThres, mImageThres, Size(3,3), 0, 0 ); fingerDetection( mImageThres, cImageBGR, OldOutX, OldOutY); cv::imshow("Thresholded Image", mImageThres); //---------------------------------------- if( cv::waitKey(1) == 'q') { break; } //------------------------------------------------ cv::imshow( "Color Image", cImageBGR ); //---------------------------------- OldOutX.clear(); OldOutY.clear(); OldOutX = OutX; OldOutY = OutY; OutX.clear(); OutY.clear(); } return 0; }
void onNewFrame(VideoStream& stream) { stream.readFrame(&m_frame); analyzeFrame(m_frame); }
void DepthSource::onNewFrame(VideoStream& stream) { Status status = stream.readFrame(&videoFrameRef); if (status != STATUS_OK) { ofLogError() << "DepthSource readFrame FAIL: " << OpenNI::getExtendedError(); } const DepthPixel* oniDepthPixels = (const DepthPixel*)videoFrameRef.getData(); if (doRawDepth) { if (doDoubleBuffering) { backRawPixels->setFromPixels(oniDepthPixels, width, height, 1); swap(backRawPixels,currentRawPixels); }else { currentRawPixels->setFromPixels(oniDepthPixels, width, height, 1); } } ofPixels* pixelBuffer = currentPixels; if (doDoubleBuffering) { pixelBuffer = backPixels; } float max = 255; unsigned char nearColor=255; unsigned char farColor=20; for (unsigned short y = 0; y < height; y++) { unsigned char * pixel = pixelBuffer->getPixels() + y * width * 4; for (unsigned short x = 0; x < width; x++, oniDepthPixels++, pixel += 4) { unsigned char red = 0; unsigned char green = 0; unsigned char blue = 0; unsigned char alpha = 255; unsigned short col_index; if( (*oniDepthPixels > nearClipping) && (*oniDepthPixels< farClipping) ){ unsigned char a = //(unsigned char)(((*oniDepthPixels) / ( deviceMaxDepth / max))); ofMap(*oniDepthPixels, nearClipping, farClipping, nearColor, farColor, true); red = a; green = a; blue = a; pixel[0] = red; pixel[1] = green; pixel[2] = blue; if (*oniDepthPixels == 0) { pixel[3] = 0; } else { pixel[3] = alpha; } }else{ pixel[0] = 0; pixel[1] = 0; pixel[2] = 0; pixel[3] = 255; } } } if (doDoubleBuffering) { swap(backPixels,currentPixels); } }
int main() { // 2. initialize OpenNI Status rc = OpenNI::initialize(); if (rc != STATUS_OK) { printf("Initialize failed\n%s\n", OpenNI::getExtendedError()); return 1; } // 3. open a device Device device; rc = device.open(ANY_DEVICE); if (rc != STATUS_OK) { printf("Couldn't open device\n%s\n", OpenNI::getExtendedError()); return 2; } // 4. create depth stream VideoStream depth; if (device.getSensorInfo(SENSOR_DEPTH) != NULL){ rc = depth.create(device, SENSOR_DEPTH); if (rc != STATUS_OK){ printf("Couldn't create depth stream\n%s\n", OpenNI::getExtendedError()); return 3; } } VideoStream color; if (device.getSensorInfo(SENSOR_COLOR) != NULL){ rc = color.create(device, SENSOR_COLOR); if (rc != STATUS_OK){ printf("Couldn't create color stream\n%s\n", OpenNI::getExtendedError()); return 4; } } // 5. create OpenCV Window cv::namedWindow("Depth Image", CV_WINDOW_AUTOSIZE); cv::namedWindow("Color Image", CV_WINDOW_AUTOSIZE); // 6. start rc = depth.start(); if (rc != STATUS_OK) { printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError()); return 5; } rc = color.start(); if (rc != STATUS_OK){ printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError()); return 6; } VideoFrameRef colorframe; VideoFrameRef depthframe; int iMaxDepth = depth.getMaxPixelValue(); cv::Mat colorimageRGB; cv::Mat colorimageBGR; cv::Mat depthimage; cv::Mat depthimageScaled; // 7. main loop, continue read while (!wasKeyboardHit()) { // 8. check is color stream is available if (color.isValid()){ if (color.readFrame(&colorframe) == STATUS_OK){ colorimageRGB = { colorframe.getHeight(), colorframe.getWidth(), CV_8UC3, (void*)colorframe.getData() }; cv::cvtColor(colorimageRGB, colorimageBGR, CV_RGB2BGR); } } // 9. check is depth stream is available if (depth.isValid()){ if (depth.readFrame(&depthframe) == STATUS_OK){ depthimage = { depthframe.getHeight(), depthframe.getWidth(), CV_16UC1, (void*)depthframe.getData() }; depthimage.convertTo(depthimageScaled, CV_8U, 255.0 / iMaxDepth); } } cv::imshow("Color Image", colorimageBGR); cv::imshow("Depth Image", depthimageScaled); cv::waitKey(10); } color.stop(); depth.stop(); color.destroy(); depth.destroy(); device.close(); OpenNI::shutdown(); return 0; }