int SlamSystem::findConstraintsForNewKeyFrames(Frame* newKeyFrame, bool forceParent, bool useFABMAP, float closeCandidatesTH)
{
	if(!newKeyFrame->hasTrackingParent())
	{
		newConstraintMutex.lock();
		keyFrameGraph->addKeyFrame(newKeyFrame);
		newConstraintAdded = true;
		newConstraintCreatedSignal.notify_all();
		newConstraintMutex.unlock();
		return 0;
	}

	if(!forceParent && (newKeyFrame->lastConstraintTrackedCamToWorld * newKeyFrame->getScaledCamToWorld().inverse()).log().norm() < 0.01)
		return 0;


	newKeyFrame->lastConstraintTrackedCamToWorld = newKeyFrame->getScaledCamToWorld();

	// =============== get all potential candidates and their initial relative pose. =================
	std::vector<KFConstraintStruct*, Eigen::aligned_allocator<KFConstraintStruct*> > constraints;
	Frame* fabMapResult = 0;
	std::unordered_set<Frame*, std::hash<Frame*>, std::equal_to<Frame*>,
		Eigen::aligned_allocator< Frame* > > candidates = trackableKeyFrameSearch->findCandidates(newKeyFrame, fabMapResult, useFABMAP, closeCandidatesTH);
	std::map< Frame*, Sim3, std::less<Frame*>, Eigen::aligned_allocator<std::pair<Frame*, Sim3> > > candidateToFrame_initialEstimateMap;


	// erase the ones that are already neighbours.
	for(std::unordered_set<Frame*>::iterator c = candidates.begin(); c != candidates.end();)
	{
		if(newKeyFrame->neighbors.find(*c) != newKeyFrame->neighbors.end())
		{
			if(enablePrintDebugInfo && printConstraintSearchInfo)
				printf("SKIPPING %d on %d cause it already exists as constraint.\n", (*c)->id(), newKeyFrame->id());
			c = candidates.erase(c);
		}
		else
			++c;
	}

	poseConsistencyMutex.lock_shared();
	for (Frame* candidate : candidates)
	{
		Sim3 candidateToFrame_initialEstimate = newKeyFrame->getScaledCamToWorld().inverse() * candidate->getScaledCamToWorld();
		candidateToFrame_initialEstimateMap[candidate] = candidateToFrame_initialEstimate;
	}

	std::unordered_map<Frame*, int> distancesToNewKeyFrame;
	if(newKeyFrame->hasTrackingParent())
		keyFrameGraph->calculateGraphDistancesToFrame(newKeyFrame->getTrackingParent(), &distancesToNewKeyFrame);
	poseConsistencyMutex.unlock_shared();





	// =============== distinguish between close and "far" candidates in Graph =================
	// Do a first check on trackability of close candidates.
	std::unordered_set<Frame*, std::hash<Frame*>, std::equal_to<Frame*>,
		Eigen::aligned_allocator< Frame* > > closeCandidates;
	std::vector<Frame*, Eigen::aligned_allocator<Frame*> > farCandidates;
	Frame* parent = newKeyFrame->hasTrackingParent() ? newKeyFrame->getTrackingParent() : 0;

	int closeFailed = 0;
	int closeInconsistent = 0;

	SO3 disturbance = SO3::exp(Sophus::Vector3d(0.05,0,0));

	for (Frame* candidate : candidates)
	{
		if (candidate->id() == newKeyFrame->id())
			continue;
		if(!candidate->pose->isInGraph)
			continue;
		if(newKeyFrame->hasTrackingParent() && candidate == newKeyFrame->getTrackingParent())
			continue;
		if(candidate->idxInKeyframes < INITIALIZATION_PHASE_COUNT)
			continue;

		SE3 c2f_init = se3FromSim3(candidateToFrame_initialEstimateMap[candidate].inverse()).inverse();
		c2f_init.so3() = c2f_init.so3() * disturbance;
		SE3 c2f = constraintSE3Tracker->trackFrameOnPermaref(candidate, newKeyFrame, c2f_init);
		if(!constraintSE3Tracker->trackingWasGood) {closeFailed++; continue;}


		SE3 f2c_init = se3FromSim3(candidateToFrame_initialEstimateMap[candidate]).inverse();
		f2c_init.so3() = disturbance * f2c_init.so3();
		SE3 f2c = constraintSE3Tracker->trackFrameOnPermaref(newKeyFrame, candidate, f2c_init);
		if(!constraintSE3Tracker->trackingWasGood) {closeFailed++; continue;}

		if((f2c.so3() * c2f.so3()).log().norm() >= 0.09) {closeInconsistent++; continue;}

		closeCandidates.insert(candidate);
	}



	int farFailed = 0;
	int farInconsistent = 0;
	for (Frame* candidate : candidates)
	{
		if (candidate->id() == newKeyFrame->id())
			continue;
		if(!candidate->pose->isInGraph)
			continue;
		if(newKeyFrame->hasTrackingParent() && candidate == newKeyFrame->getTrackingParent())
			continue;
		if(candidate->idxInKeyframes < INITIALIZATION_PHASE_COUNT)
			continue;

		if(candidate == fabMapResult)
		{
			farCandidates.push_back(candidate);
			continue;
		}

		if(distancesToNewKeyFrame.at(candidate) < 4)
			continue;

		farCandidates.push_back(candidate);
	}




	int closeAll = closeCandidates.size();
	int farAll = farCandidates.size();

	// erase the ones that we tried already before (close)
	for(std::unordered_set<Frame*>::iterator c = closeCandidates.begin(); c != closeCandidates.end();)
	{
		if(newKeyFrame->trackingFailed.find(*c) == newKeyFrame->trackingFailed.end())
		{
			++c;
			continue;
		}
		auto range = newKeyFrame->trackingFailed.equal_range(*c);

		bool skip = false;
		Sim3 f2c = candidateToFrame_initialEstimateMap[*c].inverse();
		for (auto it = range.first; it != range.second; ++it)
		{
			if((f2c * it->second).log().norm() < 0.1)
			{
				skip=true;
				break;
			}
		}

		if(skip)
		{
			if(enablePrintDebugInfo && printConstraintSearchInfo)
				printf("SKIPPING %d on %d (NEAR), cause we already have tried it.\n", (*c)->id(), newKeyFrame->id());
			c = closeCandidates.erase(c);
		}
		else
			++c;
	}

	// erase the ones that are already neighbours (far)
	for(unsigned int i=0;i<farCandidates.size();i++)
	{
		if(newKeyFrame->trackingFailed.find(farCandidates[i]) == newKeyFrame->trackingFailed.end())
			continue;

		auto range = newKeyFrame->trackingFailed.equal_range(farCandidates[i]);

		bool skip = false;
		for (auto it = range.first; it != range.second; ++it)
		{
			if((it->second).log().norm() < 0.2)
			{
				skip=true;
				break;
			}
		}

		if(skip)
		{
			if(enablePrintDebugInfo && printConstraintSearchInfo)
				printf("SKIPPING %d on %d (FAR), cause we already have tried it.\n", farCandidates[i]->id(), newKeyFrame->id());
			farCandidates[i] = farCandidates.back();
			farCandidates.pop_back();
			i--;
		}
	}



	if (enablePrintDebugInfo && printConstraintSearchInfo)
		printf("Final Loop-Closure Candidates: %d / %d close (%d failed, %d inconsistent) + %d / %d far (%d failed, %d inconsistent) = %d\n",
				(int)closeCandidates.size(),closeAll, closeFailed, closeInconsistent,
				(int)farCandidates.size(), farAll, farFailed, farInconsistent,
				(int)closeCandidates.size() + (int)farCandidates.size());



	// =============== limit number of close candidates ===============
	// while too many, remove the one with the highest connectivity.
	while((int)closeCandidates.size() > maxLoopClosureCandidates)
	{
		Frame* worst = 0;
		int worstNeighbours = 0;
		for(Frame* f : closeCandidates)
		{
			int neightboursInCandidates = 0;
			for(Frame* n : f->neighbors)
				if(closeCandidates.find(n) != closeCandidates.end())
					neightboursInCandidates++;

			if(neightboursInCandidates > worstNeighbours || worst == 0)
			{
				worst = f;
				worstNeighbours = neightboursInCandidates;
			}
		}

		closeCandidates.erase(worst);
	}


	// =============== limit number of far candidates ===============
	// delete randomly
	int maxNumFarCandidates = (maxLoopClosureCandidates +1) / 2;
	if(maxNumFarCandidates < 5) maxNumFarCandidates = 5;
	while((int)farCandidates.size() > maxNumFarCandidates)
	{
		int toDelete = rand() % farCandidates.size();
		if(farCandidates[toDelete] != fabMapResult)
		{
			farCandidates[toDelete] = farCandidates.back();
			farCandidates.pop_back();
		}
	}







	// =============== TRACK! ===============

	// make tracking reference for newKeyFrame.
	newKFTrackingReference->importFrame(newKeyFrame);


	for (Frame* candidate : closeCandidates)
	{
		KFConstraintStruct* e1=0;
		KFConstraintStruct* e2=0;

		testConstraint(
				candidate, e1, e2,
				candidateToFrame_initialEstimateMap[candidate],
				loopclosureStrictness);

		if(enablePrintDebugInfo && printConstraintSearchInfo)
			printf(" CLOSE (%d)\n", distancesToNewKeyFrame.at(candidate));

		if(e1 != 0)
		{
			constraints.push_back(e1);
			constraints.push_back(e2);

			// delete from far candidates if it's in there.
			for(unsigned int k=0;k<farCandidates.size();k++)
			{
				if(farCandidates[k] == candidate)
				{
					if(enablePrintDebugInfo && printConstraintSearchInfo)
						printf(" DELETED %d from far, as close was successful!\n", candidate->id());

					farCandidates[k] = farCandidates.back();
					farCandidates.pop_back();
				}
			}
		}
	}


	for (Frame* candidate : farCandidates)
	{
		KFConstraintStruct* e1=0;
		KFConstraintStruct* e2=0;

		testConstraint(
				candidate, e1, e2,
				Sim3(),
				loopclosureStrictness);

		if(enablePrintDebugInfo && printConstraintSearchInfo)
			printf(" FAR (%d)\n", distancesToNewKeyFrame.at(candidate));

		if(e1 != 0)
		{
			constraints.push_back(e1);
			constraints.push_back(e2);
		}
	}



	if(parent != 0 && forceParent)
	{
		KFConstraintStruct* e1=0;
		KFConstraintStruct* e2=0;
		testConstraint(
				parent, e1, e2,
				candidateToFrame_initialEstimateMap[parent],
				100);
		if(enablePrintDebugInfo && printConstraintSearchInfo)
			printf(" PARENT (0)\n");

		if(e1 != 0)
		{
			constraints.push_back(e1);
			constraints.push_back(e2);
		}
		else
		{
			float downweightFac = 5;
			const float kernelDelta = 5 * sqrt(6000*loopclosureStrictness) / downweightFac;
			printf("warning: reciprocal tracking on new frame failed badly, added odometry edge (Hacky).\n");

			poseConsistencyMutex.lock_shared();
			constraints.push_back(new KFConstraintStruct());
			constraints.back()->firstFrame = newKeyFrame;
			constraints.back()->secondFrame = newKeyFrame->getTrackingParent();
			constraints.back()->secondToFirst = constraints.back()->firstFrame->getScaledCamToWorld().inverse() * constraints.back()->secondFrame->getScaledCamToWorld();
			constraints.back()->information  <<
					0.8098,-0.1507,-0.0557, 0.1211, 0.7657, 0.0120, 0,
					-0.1507, 2.1724,-0.1103,-1.9279,-0.1182, 0.1943, 0,
					-0.0557,-0.1103, 0.2643,-0.0021,-0.0657,-0.0028, 0.0304,
					 0.1211,-1.9279,-0.0021, 2.3110, 0.1039,-0.0934, 0.0005,
					 0.7657,-0.1182,-0.0657, 0.1039, 1.0545, 0.0743,-0.0028,
					 0.0120, 0.1943,-0.0028,-0.0934, 0.0743, 0.4511, 0,
					0,0, 0.0304, 0.0005,-0.0028, 0, 0.0228;
			constraints.back()->information *= (1e9/(downweightFac*downweightFac));

			constraints.back()->robustKernel = new g2o::RobustKernelHuber();
			constraints.back()->robustKernel->setDelta(kernelDelta);

			constraints.back()->meanResidual = 10;
			constraints.back()->meanResidualD = 10;
			constraints.back()->meanResidualP = 10;
			constraints.back()->usage = 0;

			poseConsistencyMutex.unlock_shared();
		}
	}


	newConstraintMutex.lock();

	keyFrameGraph->addKeyFrame(newKeyFrame);
	for(unsigned int i=0;i<constraints.size();i++)
		keyFrameGraph->insertConstraint(constraints[i]);


	newConstraintAdded = true;
	newConstraintCreatedSignal.notify_all();
	newConstraintMutex.unlock();

	newKFTrackingReference->invalidate();
	candidateTrackingReference->invalidate();



	return constraints.size();
}
예제 #2
0
int main(int argc, char** argv)
{
    ////////////////////////////////////////////////////////////////////
    // Create command line options. Check if we should print usage.

    GetPot cl(argc,argv);

    if(cl.search(3, "-help", "-h", "?") || argc < 2) {
        std::cout << sUriInfo << std::endl;
        return -1;
    }

    ////////////////////////////////////////////////////////////////////
    // Default configuration values

    // Default grid printed on US Letter
    int grid_preset = GridPresetGWUSmall;
    double grid_spacing;
    int grid_rows;
    int grid_cols;
    uint32_t grid_seed;
    double grid_large_rad;
    double grid_small_rad;
    std::string save_grid;

    // Use no input cameras by default
    std::vector<calibu::CameraAndPose > input_cameras;

    // Fix cameras intrinsic parameters during optimisation, changing
    // only their relative poses.
    bool fix_intrinsics = false;

    // Require user to start playing the video
    bool start_paused = false;

    // Output file for camera rig
    std::string output_filename = "cameras.xml";

    ////////////////////////////////////////////////////////////////////
    // Parse command line

    grid_preset = cl.follow(grid_preset, "-grid-preset");
    switch(grid_preset)
    {
      case GridPresetGWUSmall:
        grid_spacing = 0.254 / 18;  // meters
        grid_large_rad = 0.00423; // m
        grid_small_rad = 0.00283; // m
        grid_rows = 10; // grid dots
        grid_cols = 19; // grid dots
        grid_seed = 71;
        break;
      case GridPresetGoogleLarge:
        grid_spacing = 0.03156;  // meters
        grid_large_rad = 0.00889; // m
        grid_small_rad = 0.00635; // m
        grid_rows = 36; // grid dots
        grid_cols = 25; // grid dots
        grid_seed = 71;
        break;
    }

    grid_spacing = cl.follow(grid_spacing,"-grid-spacing");
    grid_seed = cl.follow((int)grid_seed,"-grid-seed");
    grid_cols = cl.follow((int)grid_cols,"-grid-cols");
    grid_rows = cl.follow((int)grid_rows,"-grid-rows");
    grid_large_rad = cl.follow(grid_large_rad,"-grid-large-rad");
    grid_small_rad = cl.follow(grid_small_rad,"-grid-small-rad");
    fix_intrinsics = cl.search(2, "-fix-intrinsics", "-f");
    start_paused = cl.search(2, "-paused", "-p");
    output_filename = cl.follow(output_filename.c_str(), 2, "-output", "-o");
    save_grid = cl.follow("", "-save-grid");
    const Eigen::Vector2i grid_size(grid_cols, grid_rows);

    ////////////////////////////////////////////////////////////////////
    // Setup Grid pattern

    ConicFinder conic_finder;
    conic_finder.Params().conic_min_area = 4.0;
    conic_finder.Params().conic_min_density = 0.6;
    conic_finder.Params().conic_min_aspect = 0.2;

    std::unique_ptr<TargetGridDot> target;
    if(grid_preset == GridPresetGoogleLarge)
        target.reset(new TargetGridDot(grid_spacing, GoogleLargeGrid()));
    else if(grid_preset == GridPresetGWUSmall)
        target.reset(new TargetGridDot(grid_spacing, GWUSmallGrid()));
    else
        target.reset(new TargetGridDot(grid_spacing, grid_size, grid_seed));

    // Save grid and exit if required
    if(!save_grid.empty()) {
        saveGrid(*target, save_grid, grid_large_rad, grid_small_rad);
        return 0;
    }

    ////////////////////////////////////////////////////////////////////
    // Setup Video Source

    // Last argument or parameter - Video URI
    std::string cam_param = cl.follow("", 2, "-video_url", "-cam");
    std::string uri = cam_param.empty() ? argv[argc-1] : cam_param;

    hal::Camera  cam;
    try {
      cam = hal::Camera( uri );
    } catch (...) {
        if(!cam_param.empty())
          std::cerr << "Could not create camera from URI: " << uri
                    << std::endl;
        return -1;
    }
    if(cam.Empty()) return -1;

    // For the moment, assume all N cameras have same resolution
    const size_t N = cam.NumChannels();
    const size_t w = cam.Width();
    const size_t h = cam.Height();

    std::shared_ptr<pb::ImageArray> imageArray = pb::ImageArray::Create();
    cam.Capture(*imageArray);

    std::vector<cv::Mat> vImages;
    std::vector<uint64_t> vSerialNos;
    vImages.reserve(imageArray->Size());
    vSerialNos.reserve(imageArray->Size());
    for( int i = 0; i < imageArray->Size(); ++i ){
        pb::Image& image = *(*imageArray)[i];
        cv::Mat im = image.Mat();
        if( im.type() != CV_8UC1 ){
           std::cerr << "Input channels must be GRAY8 format. Use "
               "Convert:[fmt=MONO8]// video scheme." << std::endl;
        } else {
            vImages.emplace_back(im);
            vSerialNos.emplace_back(image.SerialNumber());
        }
    }

    // Load camera hints from command line
    cl.disable_loop();
    cl.reset_cursor();
    for(std::string filename = cl.follow("",2,"-cameras","-c");
        !filename.empty(); filename = cl.follow("",2,"-cameras","-c") ) {
        const size_t i = input_cameras.size();
        if(i < N) {
            if(filename == "fov") {
                CameraModelT<Fov> starting_cam(w, h);
                starting_cam.Params()  << 300, 300, w/2.0, h/2.0, 0.2;
                input_cameras.push_back( CameraAndPose(CameraModel(starting_cam), Sophus::SE3d() ) );
            }else if(filename == "poly2") {
                CameraModelT<Poly2> starting_cam(w, h);
                starting_cam.Params()  << 300, 300, w/2.0, h/2.0, 0.0, 0.0;
                input_cameras.push_back( CameraAndPose(CameraModel(starting_cam), Sophus::SE3d() ) );
            }else if(filename == "poly3" || filename =="poly") {
                CameraModelT<Poly3> starting_cam(w, h);
                starting_cam.Params()  << 300, 300, w/2.0, h/2.0, 0.0, 0.0, 0.0;
                input_cameras.push_back( CameraAndPose(CameraModel(starting_cam), Sophus::SE3d() ) );
            }else if(filename == "kb4") {
                CameraModelT<ProjectionKannalaBrandt> starting_cam(w, h);
                starting_cam.Params()  << 300, 300, w/2.0, h/2.0, 0.0, 0.0, 0.0, 0.0;
                input_cameras.push_back( CameraAndPose(CameraModel(starting_cam), Sophus::SE3d() ) );
            }else{
                const CameraRig rig = ReadXmlRig(filename);
                for(const CameraModelAndTransform& cop : rig.cameras ) {
                    input_cameras.push_back( CameraAndPose(cop.camera, cop.T_wc.inverse()) );
                }
            }
        }else{
            throw std::runtime_error("Too many camera files provided.");
        }
    }


    if(input_cameras.size() > 0 && input_cameras.size() != N) {
        std::cerr << "Number of cameras specified in files does not match video source" << std::endl;
        return -1;
    }

    ////////////////////////////////////////////////////////////////////
    // Setup image processing pipeline

    ImageProcessing image_processing(w,h);
    image_processing.Params().black_on_white = true;
    image_processing.Params().at_threshold = 0.9;
    image_processing.Params().at_window_ratio = 30.0;

    CVarUtils::AttachCVar("proc.adaptive.threshold", &image_processing.Params().at_threshold);
    CVarUtils::AttachCVar("proc.adaptive.window_ratio", &image_processing.Params().at_window_ratio);
    CVarUtils::AttachCVar("proc.black_on_white", &image_processing.Params().black_on_white);

    ////////////////////////////////////////////////////////////////////
    // Initialize Calibration object and tracking params

    Calibrator calibrator;
    calibrator.FixCameraIntrinsics(fix_intrinsics);

    int calib_cams[N];
    bool tracking_good[N];
    std::vector<Sophus::SE3d> T_hw;
    T_hw.resize(N);

    for(size_t i=0; i<N; ++i) {
        const int w_i = cam.Width();
        const int h_i = cam.Height();
        if(i < input_cameras.size() ) {
            input_cameras[i].camera.SetSerialNumber(vSerialNos[i]);
            calib_cams[i] = calibrator.AddCamera(
                        input_cameras[i].camera, input_cameras[i].T_ck
                        );
        }else{
            // Generic starting set of parameters.
            CameraModelT<Fov> starting_cam(w_i, h_i);
            starting_cam.Params()  << 300, 300, w_i/2.0, h_i/2.0, 0.2;
            starting_cam.SetSerialNumber(vSerialNos[i]);

            calib_cams[i] = calibrator.AddCamera(
                        CameraModel(starting_cam),
                        Sophus::SE3d()
                        );
        }
    }

    ////////////////////////////////////////////////////////////////////
    // Setup GUI

    const int PANEL_WIDTH = 150;
    pangolin::CreateWindowAndBind("Main",(N+1)*w/2.0+PANEL_WIDTH,h/2.0);

    // Make things look prettier...
    glEnable(GL_LINE_SMOOTH);
    glHint( GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST );
    glHint( GL_LINE_SMOOTH_HINT, GL_NICEST );
    glEnable (GL_BLEND);
    glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
    glDepthFunc( GL_LEQUAL );
    glEnable( GL_DEPTH_TEST );
    glLineWidth(1.7);

    // Pangolin 3D Render state
    pangolin::OpenGlRenderState stacks;
    stacks.SetProjectionMatrix(pangolin::ProjectionMatrixRDF_TopLeft(640,480,420,420,320,240,0.01,1E6));
    stacks.SetModelViewMatrix(pangolin::ModelViewLookAtRDF(0,0,-0.5, 0,0,0, 0, -1, 0) );

    // Create viewport for video with fixed aspect
    pangolin::CreatePanel("ui").SetBounds(1.0,0.0,0,pangolin::Attach::Pix(PANEL_WIDTH));

    pangolin::View& container = pangolin::CreateDisplay()
            .SetBounds(1.0,0.0, pangolin::Attach::Pix(PANEL_WIDTH),1.0)
            .SetLayout(pangolin::LayoutEqual);

    // Add view for each camera stream
    for(size_t c=0; c < N; ++c) {
        container.AddDisplay( pangolin::CreateDisplay().SetAspect(w/(float)h) );
    }

    // Add 3d view, attach input handler
    pangolin::Handler3D handler(stacks);
    pangolin::View& v3D = pangolin::CreateDisplay().SetAspect((float)w/h).SetHandler(&handler);
    container.AddDisplay(v3D);

    // OpenGl Texture for video frame
    pangolin::GlTexture tex(w,h,GL_LUMINANCE8);

    ////////////////////////////////////////////////////////////////////
    // Display Variables

    pangolin::Var<bool> run("ui.Play video", !start_paused, true);

    pangolin::Var<double> disp_mse("ui.MSE");
    pangolin::Var<int> disp_frame("ui.frame");

    pangolin::Var<bool> add("ui.Add Frames", true, true);

    pangolin::Var<bool> disp_thresh("ui.Display Thresh",false);
    pangolin::Var<bool> disp_lines("ui.Display Lines",true);
    pangolin::Var<bool> disp_cross("ui.Display crosses",true);
    pangolin::Var<bool> disp_bbox("ui.Display bbox",true);

    ////////////////////////////////////////////////////////////////////
    // Key shortcuts

    // 1,2,3,... keys hide and show viewports
    for(size_t i=0; i<container.NumChildren(); ++i) {
        pangolin::RegisterKeyPressCallback('1'+i, [&container,i](){container[i].ToggleShow();} );
    }

    pangolin::RegisterKeyPressCallback('[', [&](){calibrator.Start();} );
    pangolin::RegisterKeyPressCallback(']', [&](){calibrator.Stop();} );

    bool step = false;
    pangolin::RegisterKeyPressCallback(pangolin::PANGO_SPECIAL+ pangolin::PANGO_KEY_RIGHT, [&](){step = true;} );
    pangolin::RegisterKeyPressCallback(' ', [&](){run = !run;} );

    pangolin::RegisterKeyPressCallback('r', [&](){calibrator.PrintResults();} );
    pangolin::RegisterKeyPressCallback('q', &pangolin::Quit);

    ////////////////////////////////////////////////////////////////////
    // Main event loop

    for(int frame=0; !pangolin::ShouldQuit();){
        const bool go = (frame==0) || run || pangolin::Pushed(step);

        int calib_frame = -1;

        if( go ) {
            if( cam.Capture( vImages ) ){
                if(add) {
                    calib_frame = calibrator.AddFrame(Sophus::SE3d(Sophus::SO3d(), Eigen::Vector3d(0,0,1000)) );
                }
                ++frame;
            }else{
                run = false;
            }
        }

        glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT);

        for(size_t iI = 0; iI < N; ++iI)
        {
            if (vImages.size() != N) break;

            image_processing.Process( vImages[iI].data, vImages[iI].cols, vImages[iI].rows, vImages[iI].cols );
            conic_finder.Find( image_processing );

            const std::vector<Conic, Eigen::aligned_allocator<Conic> >& conics =
                conic_finder.Conics();
            std::vector<int> ellipse_target_map;

            tracking_good[iI] = target->FindTarget(
                        image_processing, conic_finder.Conics(),
                        ellipse_target_map
                        );

            if(tracking_good[iI]) {
                // Generate map and point structures
              std::vector<Eigen::Vector2d,
                          Eigen::aligned_allocator<Eigen::Vector2d> > ellipses;
                for( size_t i=0; i < conics.size(); ++i ) {
                    ellipses.push_back(conics[i].center);
                }

                // find camera pose given intrinsics
                PosePnPRansac(
                    calibrator.GetCamera(iI).camera, ellipses, target->Circles3D(),
                    ellipse_target_map,
                    0, 0, &T_hw[iI]
                );

                if(calib_frame >= 0) {
                    if(iI==0 || !tracking_good[0]) {
                        // Initialize pose of frame for least squares optimisation
                        calibrator.GetFrame(calib_frame) = T_hw[iI];
                    }

                    for(size_t p=0; p < ellipses.size(); ++p) {
                        const Eigen::Vector2d pc = ellipses[p];
                        const Eigen::Vector2i pg = target->Map()[p].pg;

                        if( 0<= pg(0) && pg(0) < grid_size(0) &&  0<= pg(1) && pg(1) < grid_size(1) )
                        {
                            const Eigen::Vector3d pg3d = grid_spacing * Eigen::Vector3d(pg(0), pg(1), 0);
                            // TODO: Add these correspondences in bulk to avoid
                            //       hitting mutex each time.
                            calibrator.AddObservation(calib_frame, calib_cams[iI], pg3d, pc );
                        }
                    }
                }
            }

            if(container[iI].IsShown()) {
                container[iI].ActivateScissorAndClear();
                glColor3f(1,1,1);

                // Display camera image
                if(!disp_thresh) {
                    tex.Upload(image_processing.Img(),GL_LUMINANCE,GL_UNSIGNED_BYTE);
                    tex.RenderToViewportFlipY();
                }else{
                    tex.Upload(image_processing.ImgThresh(),GL_LUMINANCE,GL_UNSIGNED_BYTE);
                    tex.RenderToViewportFlipY();
                }

                // Setup orthographic pixel drawing
                glMatrixMode(GL_PROJECTION);
                glLoadIdentity();
                glOrtho(-0.5,w-0.5,h-0.5,-0.5,0,1.0);
                glMatrixMode(GL_MODELVIEW);

                if(disp_lines) {
                    for(std::list<LineGroup>::const_iterator i = target->LineGroups().begin(); i != target->LineGroups().end(); ++i)
                    {
                        glColor3f(0.5,0.5,0.5);
                        glBegin(GL_LINE_STRIP);
                        for(std::list<size_t>::const_iterator el = i->ops.begin(); el != i->ops.end(); ++el)
                        {
                            const Eigen::Vector2d p = conics[*el].center;
                            glVertex2d(p(0), p(1));
                        }
                        glEnd();
                    }
                }

                if(disp_cross) {
                    for( size_t i=0; i < conics.size(); ++i ) {
                        const Eigen::Vector2d pc = conics[i].center;
                        pangolin::glColorBin( target->Map()[i].value, 2);
                        pangolin::glDrawCross(pc, conics[i].bbox.Width()*0.75 );
                    }
                }

                if(disp_bbox) {
                    for( size_t i=0; i < conics.size(); ++i ) {
                        const Eigen::Vector2i pg = tracking_good[iI] ? target->Map()[i].pg : Eigen::Vector2i(0,0);
                        if( 0<= pg(0) && pg(0) < grid_size(0) &&  0<= pg(1) && pg(1) < grid_size(1) ) {
                            pangolin::glColorBin(pg(1)*grid_size(0)+pg(0), grid_size(0)*grid_size(1));
                            glDrawRectPerimeter(conics[i].bbox);
                        }
                    }
                }
            }
        }

        if(v3D.IsShown()) {
            v3D.ActivateScissorAndClear(stacks);

            calibu::glDrawTarget(*target, Eigen::Vector2d(0,0), 1.0, 0.8, 1.0);

            for(size_t c=0; c< calibrator.NumCameras(); ++c) {
                const Eigen::Matrix3d Kinv = calibrator.GetCamera(c).camera.Kinv();

                const CameraAndPose cap = calibrator.GetCamera(c);
                const Sophus::SE3d T_ck = cap.T_ck;

                // Draw keyframes
                pangolin::glColorBin(c, 2, 0.2);
                for(size_t k=0; k< calibrator.NumFrames(); ++k) {
                    pangolin::glDrawAxis((T_ck * calibrator.GetFrame(k)).inverse().matrix(), 0.01);
                }

                // Draw current camera
                if(tracking_good[c]) {
                    pangolin::glColorBin(c, 2, 0.5);
                    pangolin::glDrawFrustrum(Kinv,w,h,T_hw[c].inverse().matrix(),0.05);
                }
            }
        }

        disp_mse = calibrator.MeanSquareError();
        disp_frame = frame;

        // Process window events via GLUT
        pangolin::FinishFrame();
    }

    calibrator.Stop();
    calibrator.PrintResults();
    calibrator.WriteCameraModels(output_filename);

}
  void GeometricConsistencyGrouping::recognize(
    const sensor_msgs::PointCloud2::ConstPtr& scene_cloud_msg,
    const sensor_msgs::PointCloud2::ConstPtr& scene_feature_msg)
  {
    boost::mutex::scoped_lock lock(mutex_);
    if (!reference_cloud_ || !reference_feature_) {
      NODELET_ERROR_THROTTLE(1.0, "Not yet reference is available");
      return;
    }

    pcl::PointCloud<pcl::SHOT352>::Ptr
      scene_feature (new pcl::PointCloud<pcl::SHOT352>);
    pcl::PointCloud<pcl::PointNormal>::Ptr
      scene_cloud (new pcl::PointCloud<pcl::PointNormal>);
    pcl::fromROSMsg(*scene_cloud_msg, *scene_cloud);
    pcl::fromROSMsg(*scene_feature_msg, *scene_feature);

    pcl::CorrespondencesPtr model_scene_corrs (new pcl::Correspondences ());
    pcl::KdTreeFLANN<pcl::SHOT352> match_search;
    match_search.setInputCloud (reference_feature_);

    for (size_t i = 0; i < scene_feature->size(); ++i)
    {
      std::vector<int> neigh_indices(1);
      std::vector<float> neigh_sqr_dists(1);
      if (!pcl_isfinite (scene_feature->at(i).descriptor[0])) { //skipping NaNs
        continue;
      }
      int found_neighs
        = match_search.nearestKSearch(scene_feature->at(i), 1,
                                      neigh_indices, neigh_sqr_dists);
      //  add match only if the squared descriptor distance is less than 0.25
      // (SHOT descriptor distances are between 0 and 1 by design)
      if (found_neighs == 1 && neigh_sqr_dists[0] < 0.25f) {
        pcl::Correspondence corr (neigh_indices[0], static_cast<int> (i), neigh_sqr_dists[0]);
        model_scene_corrs->push_back (corr);
      }
    }

    pcl::GeometricConsistencyGrouping<pcl::PointNormal, pcl::PointNormal> gc_clusterer;
    gc_clusterer.setGCSize(gc_size_);
    gc_clusterer.setGCThreshold(gc_thresh_);

    gc_clusterer.setInputCloud(reference_cloud_);
    gc_clusterer.setSceneCloud(scene_cloud);
    gc_clusterer.setModelSceneCorrespondences(model_scene_corrs);

    //gc_clusterer.cluster (clustered_corrs);
    std::vector<pcl::Correspondences> clustered_corrs;
    std::vector<Eigen::Matrix4f,
                Eigen::aligned_allocator<Eigen::Matrix4f> > rototranslations;
    gc_clusterer.recognize(rototranslations, clustered_corrs);
    if (rototranslations.size() > 0) {
      NODELET_INFO("detected %lu objects", rototranslations.size());
      Eigen::Matrix4f result_mat = rototranslations[0];
      Eigen::Affine3f affine(result_mat);
      geometry_msgs::PoseStamped ros_pose;
      tf::poseEigenToMsg(affine, ros_pose.pose);
      ros_pose.header = scene_cloud_msg->header;
      pub_output_.publish(ros_pose);
    }
    else {
      NODELET_WARN("Failed to find object");
    }
    
  }