// ------------------------------------------------------ // TestCameraCaptureAsk // ------------------------------------------------------ void TestCameraCaptureAsk() { CCameraSensorPtr cam = prepareVideoSourceFromUserSelection(); if (!cam) { cerr << "The user didn't pick any camera. Exiting." << endl; return; } CDisplayWindow win("Live video"); cout << "Close the window to exit." << endl; double counter = 0; mrpt::utils::CTicTac tictac; while (win.isOpen()) { if( !counter ) tictac.Tic(); mrpt::obs::CObservationPtr obs = cam->getNextFrame(); ASSERT_(obs); if (IS_CLASS(obs,CObservationImage)) { CObservationImagePtr o=CObservationImagePtr(obs); win.showImage(o->image); } else if (IS_CLASS(obs,CObservationStereoImages)) { CObservationStereoImagesPtr o=CObservationStereoImagesPtr(obs); win.showImage(o->imageRight); } if( ++counter == 10 ) { double t = tictac.Tac(); cout << "Frame Rate: " << counter/t << " fps" << endl; counter = 0; } mrpt::system::sleep(2); } cout << "Closing..." << endl; }
// ------------------------------------------------------ // TestCameraCaptureAsk // ------------------------------------------------------ void TestCameraCaptureAsk() { CCameraSensorPtr cam = prepareVideoSourceFromUserSelection(); if (!cam) { cerr << "The user didn't pick any camera. Exiting." << endl; return; } CDisplayWindow win("Live video"); cout << "Press 's' to save frames.\nClose the window to exit.\n"; double counter = 0; mrpt::utils::CTicTac tictac; while (win.isOpen()) { if( !counter ) tictac.Tic(); mrpt::obs::CObservationPtr obs = cam->getNextFrame(); ASSERT_(obs); CImage *img = NULL; if (IS_CLASS(obs,CObservationImage)) { CObservationImagePtr o=CObservationImagePtr(obs); img = &o->image; } else if (IS_CLASS(obs,CObservationStereoImages)) { CObservationStereoImagesPtr o=CObservationStereoImagesPtr(obs); img = &o->imageRight; } else if (IS_CLASS(obs,CObservation3DRangeScan)) { CObservation3DRangeScanPtr o=CObservation3DRangeScanPtr(obs); if (o->hasIntensityImage) img = &o->intensityImage; } if (img) win.showImage(*img); if( ++counter == 10 ) { double t = tictac.Tac(); cout << "Frame Rate: " << counter/t << " fps" << endl; counter = 0; } // Process keystrokes: if (mrpt::system::os::kbhit()) { const int key_code = mrpt::system::os::getch(); switch (key_code) { case 's': case 'S': { static int cnt=0; const std::string sFile = mrpt::format("frame%05i.png",cnt++); cout << "Saving frame to: " << sFile << endl; img->saveToFile(sFile); } break; default: break; }; } mrpt::system::sleep(2); } cout << "Closing..." << endl; }
// ------------------------------------------------------ // TestCamera3DFaceDetection // ------------------------------------------------------ void TestCamera3DFaceDetection( CCameraSensorPtr cam ) { CDisplayWindow win("Live video"); CDisplayWindow win2("FaceDetected"); cout << "Close the window to exit." << endl; mrpt::gui::CDisplayWindow3D win3D("3D camera view",800,600); mrpt::gui::CDisplayWindow3D win3D2; win3D.setCameraAzimuthDeg(140); win3D.setCameraElevationDeg(20); win3D.setCameraZoom(6.0); win3D.setCameraPointingToPoint(2.5,0,0); mrpt::opengl::COpenGLScenePtr &scene = win3D.get3DSceneAndLock(); mrpt::opengl::COpenGLScenePtr scene2; mrpt::opengl::CPointCloudColouredPtr gl_points = mrpt::opengl::CPointCloudColoured::Create(); gl_points->setPointSize(4.5); mrpt::opengl::CPointCloudColouredPtr gl_points2 = mrpt::opengl::CPointCloudColoured::Create(); gl_points2->setPointSize(4.5); // Create the Opengl object for the point cloud: scene->insert( gl_points ); scene->insert( mrpt::opengl::CGridPlaneXY::Create() ); scene->insert( mrpt::opengl::stock_objects::CornerXYZ() ); win3D.unlockAccess3DScene(); if ( showEachDetectedFace ) { win3D2.setWindowTitle("3D Face detected"); win3D2.resize(400,300); win3D2.setCameraAzimuthDeg(140); win3D2.setCameraElevationDeg(20); win3D2.setCameraZoom(6.0); win3D2.setCameraPointingToPoint(2.5,0,0); scene2 = win3D2.get3DSceneAndLock(); scene2->insert( gl_points2 ); scene2->insert( mrpt::opengl::CGridPlaneXY::Create() ); win3D2.unlockAccess3DScene(); } double counter = 0; mrpt::utils::CTicTac tictac; CVectorDouble fps; while (win.isOpen()) { if( !counter ) tictac.Tic(); CObservation3DRangeScanPtr o; try{ o = CObservation3DRangeScanPtr(cam->getNextFrame()); } catch ( CExceptionEOF &) { break; } ASSERT_(o); vector_detectable_object detected; //CObservation3DRangeScanPtr o = CObservation3DRangeScanPtr(obs); faceDetector.detectObjects( o, detected ); //static int x = 0; if ( detected.size() > 0 ) { for ( unsigned int i = 0; i < detected.size(); i++ ) { ASSERT_( IS_CLASS(detected[i],CDetectable3D ) ) CDetectable3DPtr obj = CDetectable3DPtr( detected[i] ); if ( showEachDetectedFace ) { CObservation3DRangeScan face; o->getZoneAsObs( face, obj->m_y, obj->m_y + obj->m_height, obj->m_x, obj->m_x + obj->m_width ); win2.showImage( face.intensityImage ); if ( o->hasPoints3D ) { win3D2.get3DSceneAndLock(); CColouredPointsMap pntsMap; if ( !o->hasConfidenceImage ) { pntsMap.colorScheme.scheme = CColouredPointsMap::cmFromIntensityImage; pntsMap.loadFromRangeScan( face ); } else { vector<float> xs, ys, zs; unsigned int i = 0; for ( unsigned int j = 0; j < face.confidenceImage.getHeight(); j++ ) for ( unsigned int k = 0; k < face.confidenceImage.getWidth(); k++, i++ ) { unsigned char c = *(face.confidenceImage.get_unsafe( k, j, 0 )); if ( c > faceDetector.m_options.confidenceThreshold ) { xs.push_back( face.points3D_x[i] ); ys.push_back( face.points3D_y[i] ); zs.push_back( face.points3D_z[i] ); } } pntsMap.setAllPoints( xs, ys, zs ); } gl_points2->loadFromPointsMap(&pntsMap); win3D2.unlockAccess3DScene(); win3D2.repaint(); } } o->intensityImage.rectangle( obj->m_x, obj->m_y, obj->m_x+obj->m_width, obj->m_y + obj->m_height, TColor(255,0,0) ); //x++; //if (( showEachDetectedFace ) && ( x > 430 ) ) //system::pause(); } } win.showImage(o->intensityImage); /*if (( showEachDetectedFace ) && ( detected.size() )) system::pause();*/ win3D.get3DSceneAndLock(); CColouredPointsMap pntsMap; pntsMap.colorScheme.scheme = CColouredPointsMap::cmFromIntensityImage; pntsMap.loadFromRangeScan(*(o.pointer())); gl_points->loadFromPointsMap(&pntsMap); win3D.unlockAccess3DScene(); win3D.repaint(); if( ++counter == 10 ) { double t = tictac.Tac(); cout << "Frame Rate: " << counter/t << " fps" << endl; fps.push_back( counter/t ); counter = 0; } mrpt::system::sleep(2); } cout << "Fps mean: " << fps.sumAll() / fps.size() << endl; faceDetector.experimental_showMeasurements(); cout << "Closing..." << endl; }
// ------------------------------------------------------ // TestCameraFaceDetection // ------------------------------------------------------ void TestCameraFaceDetection() { CCameraSensorPtr cam = prepareVideoSourceFromUserSelection(); if (!cam) { cerr << "The user didn't pick any camera. Exiting." << endl; return; } mrpt::obs::CObservationPtr obs = cam->getNextFrame(); ASSERT_(obs); if ( IS_CLASS(obs, CObservation3DRangeScan) ) { TestCamera3DFaceDetection( cam ); return; } CDisplayWindow win("Live video"); cout << "Close the window to exit." << endl; double counter = 0; mrpt::utils::CTicTac tictac; while (win.isOpen()) { if( !counter ) tictac.Tic(); mrpt::obs::CObservationPtr obs; try { obs = cam->getNextFrame(); } catch (CExceptionEOF &) // Check if eof, f.i. for RawLog files { break; } ASSERT_(obs); if (IS_CLASS(obs,CObservationImage)) { vector_detectable_object detected; faceDetector.detectObjects( obs, detected ); CObservationImagePtr o = CObservationImagePtr(obs); for ( unsigned int i = 0; i < detected.size(); i++ ) { ASSERT_( IS_CLASS(detected[i],CDetectable2D ) ) CDetectable2DPtr obj = CDetectable2DPtr( detected[i] ); o->image.rectangle( obj->m_x, obj->m_y, obj->m_x+obj->m_width, obj->m_y + obj->m_height, TColor(255,0,0) ); } win.showImage(o->image); } else if (IS_CLASS(obs,CObservationStereoImages)) { vector_detectable_object detected; faceDetector.detectObjects( obs, detected ); CObservationStereoImagesPtr o=CObservationStereoImagesPtr(obs); for ( unsigned int i = 0; i < detected.size(); i++ ) { ASSERT_( IS_CLASS(detected[i],CDetectable2D ) ) CDetectable2DPtr obj = CDetectable2DPtr( detected[i] ); o->imageRight.rectangle( obj->m_x, obj->m_y, obj->m_x+obj->m_width, obj->m_y + obj->m_height, TColor(255,0,0) ); } win.showImage(o->imageRight); } if( ++counter == 10 ) { double t = tictac.Tac(); cout << "Frame Rate: " << counter/t << " fps" << endl; counter = 0; } mrpt::system::sleep(2); } cout << "Closing..." << endl; }
// ------------------------------------------------------ // DoTrackingDemo // ------------------------------------------------------ int DoTrackingDemo(CCameraSensorPtr cam, bool DO_SAVE_VIDEO) { win = mrpt::gui::CDisplayWindow3D::Create("Tracked features",800,600); mrpt::vision::CVideoFileWriter vidWritter; bool hasResolution = false; TCamera cameraParams; // For now, will only hold the image resolution on the arrive of the first frame. TSimpleFeatureList trackedFeats; unsigned int step_num = 0; bool SHOW_FEAT_IDS = true; bool SHOW_RESPONSES = true; bool SHOW_FEAT_TRACKS = true; const double SAVE_VIDEO_FPS = 30; // If DO_SAVE_VIDEO=true, the FPS of the video file const char* SAVE_VIDEO_CODEC = "XVID"; // "XVID", "PIM1", "MJPG" bool DO_HIST_EQUALIZE_IN_GRAYSCALE = false; string VIDEO_OUTPUT_FILE = "./tracking_video.avi"; const double MAX_FPS = 5000; // 5.0; // Hz (to slow down visualization). CGenericFeatureTrackerAutoPtr tracker; // "CFeatureTracker_KL" is by far the most robust implementation for now: tracker = CGenericFeatureTrackerAutoPtr( new CFeatureTracker_KL ); tracker->enableTimeLogger(true); // Do time profiling. // Set of parameters common to any tracker implementation: // ------------------------------------------------------------- // To see all the existing params and documentation, see mrpt::vision::CGenericFeatureTracker tracker->extra_params["remove_lost_features"] = 1; // automatically remove out-of-image and badly tracked features tracker->extra_params["add_new_features"] = 1; // track, AND ALSO, add new features tracker->extra_params["add_new_feat_min_separation"] = 32; tracker->extra_params["minimum_KLT_response_to_add"] = 10; tracker->extra_params["add_new_feat_max_features"] = 350; tracker->extra_params["add_new_feat_patch_size"] = 11; tracker->extra_params["update_patches_every"] = 0; // Don't update patches. tracker->extra_params["check_KLT_response_every"] = 5; // Re-check the KLT-response to assure features are in good points. tracker->extra_params["minimum_KLT_response"] = 5; // Specific params for "CFeatureTracker_KL" // ------------------------------------------------------ tracker->extra_params["window_width"] = 5; tracker->extra_params["window_height"] = 5; //tracker->extra_params["LK_levels"] = 3; //tracker->extra_params["LK_max_iters"] = 10; //tracker->extra_params["LK_epsilon"] = 0.1; //tracker->extra_params["LK_max_tracking_error"] = 150; // -------------------------------- // The main loop // -------------------------------- CImage previous_image; TSequenceFeatureObservations feat_track_history; bool save_tracked_history = true; // Dump feat_track_history to a file at the end TCameraPoseID curCamPoseId = 0; cout << endl << "TO END THE PROGRAM: Close the window.\n"; mrpt::opengl::COpenGLViewportPtr gl_view; { mrpt::opengl::COpenGLScenePtr scene = win->get3DSceneAndLock(); gl_view = scene->getViewport("main"); win->unlockAccess3DScene(); } // Aux data for drawing the recent track of features: static const size_t FEATS_TRACK_LEN = 10; std::map<TFeatureID,std::list<TPixelCoord> > feat_tracks; // infinite loop, until we close the win: while( win->isOpen() ) { CObservationPtr obs; try { obs= cam->getNextFrame(); } catch (CExceptionEOF &) { // End of a rawlog file. break; } if (!obs) { cerr << "*Warning* getNextFrame() returned NULL!\n"; mrpt::system::sleep(50); continue; } CImage theImg; // The grabbed image: if (IS_CLASS(obs,CObservationImage)) { CObservationImagePtr o = CObservationImagePtr(obs); theImg.copyFastFrom(o->image); } else if (IS_CLASS(obs,CObservationStereoImages)) { CObservationStereoImagesPtr o = CObservationStereoImagesPtr(obs); theImg.copyFastFrom(o->imageLeft); } else if (IS_CLASS(obs,CObservation3DRangeScan)) { CObservation3DRangeScanPtr o = CObservation3DRangeScanPtr(obs); if (o->hasIntensityImage) theImg.copyFastFrom(o->intensityImage); } else { continue; // Silently ignore non-image observations. } // Make sure the image is loaded (for the case it came from a rawlog file) if (theImg.isExternallyStored()) theImg.loadFromFile( theImg.getExternalStorageFileAbsolutePath()); // Take the resolution upon first valid frame. if (!hasResolution) { hasResolution = true; // cameraParams.scaleToResolution()... cameraParams.ncols = theImg.getWidth(); cameraParams.nrows = theImg.getHeight(); } // Do tracking: if (step_num>1) // we need "previous_image" to be valid. { // This single call makes: detection, tracking, recalculation of KLT_response, etc. tracker->trackFeatures(previous_image, theImg, trackedFeats); } // Save the image for the next step: previous_image = theImg; // Save history of feature observations: tracker->getProfiler().enter("Save history"); for (size_t i=0;i<trackedFeats.size();++i) { TSimpleFeature &f = trackedFeats[i]; const TPixelCoordf pxRaw(f.pt.x,f.pt.y); TPixelCoordf pxUndist; //mrpt::vision::pinhole::undistort_point(pxRaw,pxUndist, cameraParams); pxUndist = pxRaw; feat_track_history.push_back( TFeatureObservation(f.ID,curCamPoseId, pxUndist ) ); } curCamPoseId++; tracker->getProfiler().leave("Save history"); // now that we're done with the image, we can directly write onto it // for the display // ---------------------------------------------------------------- if (DO_HIST_EQUALIZE_IN_GRAYSCALE && !theImg.isColor()) theImg.equalizeHistInPlace(); // Convert to color so we can draw color marks, etc. theImg.colorImageInPlace(); double extra_tim_to_wait=0; { // FPS: static CTicTac tictac; const double T = tictac.Tac(); tictac.Tic(); const double fps = 1.0/(std::max(1e-5,T)); //theImg.filledRectangle(1,1,175,25,TColor(0,0,0)); const int current_adapt_thres = tracker->getDetectorAdaptiveThreshold(); theImg.selectTextFont("6x13B"); theImg.textOut(3,3,format("FPS: %.03f Hz", fps ),TColor(200,200,0) ); theImg.textOut(3,22,format("# feats: %u - Adaptive threshold: %i", (unsigned int)trackedFeats.size(), current_adapt_thres ),TColor(200,200,0) ); theImg.textOut(3,41, format("# raw feats: %u - Removed: %u", (unsigned int)tracker->last_execution_extra_info.raw_FAST_feats_detected, (unsigned int)tracker->last_execution_extra_info.num_deleted_feats ), TColor(200,200,0) ); extra_tim_to_wait = 1.0/MAX_FPS - 1.0/fps; } // Draw feature tracks if (SHOW_FEAT_TRACKS) { // Update new feature coords: tracker->getProfiler().enter("drawFeatureTracks"); std::set<TFeatureID> observed_IDs; for (size_t i=0;i<trackedFeats.size();++i) { const TSimpleFeature &ft = trackedFeats[i]; std::list<TPixelCoord> & seq = feat_tracks[ft.ID]; observed_IDs.insert(ft.ID); if (seq.size()>=FEATS_TRACK_LEN) seq.erase(seq.begin()); seq.push_back(ft.pt); // Draw: if (seq.size()>1) { const std::list<TPixelCoord>::const_iterator it_end = seq.end(); std::list<TPixelCoord>::const_iterator it = seq.begin(); std::list<TPixelCoord>::const_iterator it_prev = it++; for (;it!=it_end;++it) { theImg.line(it_prev->x,it_prev->y,it->x,it->y, TColor(190,190,190) ); it_prev = it; } } } tracker->getProfiler().leave("drawFeatureTracks"); // Purge old data: for (std::map<TFeatureID,std::list<TPixelCoord> >::iterator it=feat_tracks.begin();it!=feat_tracks.end(); ) { if (observed_IDs.find(it->first)==observed_IDs.end()) { std::map<TFeatureID,std::list<TPixelCoord> >::iterator next_it = it; next_it++; feat_tracks.erase(it); it = next_it; } else ++it; } } // Draw Tracked feats: { theImg.selectTextFont("5x7"); tracker->getProfiler().enter("drawFeatures"); theImg.drawFeatures(trackedFeats, TColor(0,0,255), SHOW_FEAT_IDS, SHOW_RESPONSES); tracker->getProfiler().leave("drawFeatures"); } // Update window: win->get3DSceneAndLock(); gl_view->setImageView(theImg); win->unlockAccess3DScene(); win->repaint(); // Save debug output video: // ---------------------------------- if (DO_SAVE_VIDEO) { static bool first = true; if (first) { first=false; if (vidWritter.open( VIDEO_OUTPUT_FILE, SAVE_VIDEO_FPS /* fps */, theImg.getSize(), SAVE_VIDEO_CODEC, true /* force color video */ ) ) { cout << "[track-video] Saving tracking video to: " << VIDEO_OUTPUT_FILE << endl; } else cerr << "ERROR: Trying to create output video: " << VIDEO_OUTPUT_FILE << endl; } vidWritter << theImg; } if (extra_tim_to_wait>0) mrpt::system::sleep(1000*extra_tim_to_wait); step_num++; } // end infinite loop // Save tracked feats: if (save_tracked_history) { cout << "Saving tracked features to: tracked_feats.txt..."; cout.flush(); feat_track_history.saveToTextFile("./tracked_feats.txt"); cout << "Done!\n"; cout.flush(); #if 0 // SBA: cout << "Saving cams.txt & pts.txt files in SBA library format..."; cout.flush(); feat_track_history.removeFewObservedFeatures(3); feat_track_history.decimateCameraFrames(20); feat_track_history.compressIDs(); TLandmarkLocationsVec locs; TFramePosesVec cams; feat_track_history.saveAsSBAFiles(locs,"pts.txt", cams, "cams.txt"); cout << "Done!\n"; cout.flush(); #endif } return 0; // End ok. }