Example #1
0
void MyGlWindow::refine_from_accepted_correspondences ()
{
	ExtrinsicParameters original_pose = _camera->getPose();

	double max_distance = _LUT.inchToModelUnit( mmToInch(_camera->_max_translation_speed) );

	CorrespondenceVector ds;
	for (int i=0;i<SM_Size();i++) {
		if ( SM_Get(i).valid() && SM_Get(i).line->status == ACCEPTED &&  SM_Get(i).eid != -1) {
			Correspondence d;
			d.first = SM_Get(i).line;
			d.second = SM_Get(i).eps[SM_Get(i).eid];
			ds.push_back( d );
		}
	}

	int n = ds.size();

	if ( n < 15 ) {
		return;
	} else {

		LOG(LEVEL_INFO, "refining from accepted correspondences (%d).", n );
	}

	refineCameraPoseFromNCorrespondences( ds );

	if ( len( _camera->getTranslation() - original_pose.getTranslation()) > max_distance )
		_camera->setPose( original_pose );
}
Example #2
0
// find inliers given the current camera pose
// and refine camera pose based on it
//
void MyGlWindow::find_inliers_and_refine_camera_pose( corresVector &correspondences )
{

	CorrespondenceVector inliers;

	ExtrinsicParameters pose = _camera->getPose();

	// for each correspondence, determine whether it is an inlier
	for (int i=0;i<correspondences.size();i++) {

		corres c = correspondences[i];

		Edge *line = c.line;

		for (int j=0;j<c.eps.size();j++) {
	
			EdgePlane ep = c.eps[j];

			ep.fromCameraFrameToWorldFrame( pose );

			if ( ep.overlap( line ) > INIT_REFINEMENT_OVERLAP  && ep.angle( line ) < INIT_REFINEMENT_ANGLE ) {
				Correspondence d;
				d.first = line;
				d.second = c.eps[j];
				inliers.push_back( d );
				break;
			}
		}
	}

	// refine pose on inliers
	refineCameraPoseFromNCorrespondences( inliers );
}
Example #3
0
// init the camera pose
// find the combination of sm correspondences that maximizes the score
//
void MyGlWindow::update_init_pose( std::vector< intVector > &edges_buckets )
{
	ExtrinsicParameters original_pose = _camera->getPose();

	ExtrinsicParameters best_pose = original_pose;

	double best_score = -1.0;

	int n = SM_Size();

	intVector best_eids;

	for (int run=0;run<1000;run++) {

		_camera->setPose( original_pose );

		// pick a guess randomly for each correspondence
		CorrespondenceVector set;

		intVector eids;

		for (int i=0;i<n;i++) {
			corres cc = SM_Get(i);
			Correspondence c;
			c.first = cc.line;
			int p = MIN(cc.eps.size()-1, int((double)rand()/(RAND_MAX+1) * (cc.eps.size()-1)));
			c.second = cc.eps[p];
			eids.push_back( p );

			set.push_back( c );
		}

		refineCameraPoseFromNCorrespondences( set );

		double score = score_camera_pose( _camera->getPose(), edges_buckets );

		if ( score > best_score ) {

			best_score = score;

			best_pose = _camera->getPose();

			best_eids = eids;

		}
	}

	if ( !best_eids.empty() ) {
		for (int i=0;i<n;i++)
			SM_SetId( i, best_eids[i] );
	}

	_camera->setPose( best_pose );
}
  bool IdCorrespondenceValidator::operator()(const CorrespondenceVector& correspondences, const IndexVector& indices, int k){
    if (k>minimalSetSize())
      return true;
    
    assert(indices.size()>=k && "VALIDATION_INDEX_OUT_OF_BOUND");
    assert(correspondences.size()>=indices[k] && "VALIDATION_CORRESPONDENCE_INDEX_OUT_OF_BOUND");
    const g2o::OptimizableGraph::Edge* edgek = correspondences[indices[k]].edge();
    int idk1=edgek->vertex(0)->id();
    int idk2=edgek->vertex(1)->id();
    
    for (int i=0; i<k-1; i++){
      const g2o::OptimizableGraph::Edge* edge = correspondences[indices[i]].edge();
      int id1=edge->vertex(0)->id();
      int id2=edge->vertex(1)->id();
      if (idk1==id1)
	return false;
      if (idk2==id2)
	return false;
    }
    return true;
  }
Example #5
0
bool MyGlWindow::update_correspondences()
{
	ExtrinsicParameters original_pose = _camera->getPose();

	Vec3d eps = get_expected_position(8);
	Quaternion epr = get_expected_rotation();

	//_camera->setTranslation( eps );

	// update correspondences
	SM_UpdateCorrespondences( );

	printf("correspondences updated [%d]\n", SM_Size());

	// enforce geometric constraints
	SM_VerifyCorrespondences( _camera->getPose() );

	int i,j,run;

	// quick hack for video -- remove asap
	/*for (i=0;i<pose_history.size();i++) {
		if ( pose_history[i].id == frameId ) {
			_camera->setPose( pose_history[i] );
			break;
		}
	}

	return true;*/
	// end quick hack

	//detect_edges();

	//std::vector< intVector > edges_buckets;
	//distribute_edgeplanes_into_buckets( edges_buckets );

	//ExtrinsicParameters pose = original_pose;
	write_correspondences( frameId );
	
	ExtrinsicParameters best_pose = original_pose;

	// keep only valid correspondences
	CorrespondenceVector ds;
	for (i=0;i<SM_Size();i++) {
		if ( SM_Get(i).valid() && SM_Get(i).age >= 4 &&  SM_Get(i).eid != -1) {
			Correspondence d;
			d.first = SM_Get(i).line;
			d.second = SM_Get(i).eps[SM_Get(i).eid];
			ds.push_back( d );
		}
	}

	int n = ds.size();

	if ( n < 5 ) {
		LOG(LEVEL_INFO, "too few correspondences to localize (%d).", n );

		return false;
	}

	_n_correspondences = n;

	LOG(LEVEL_INFO, "using %d correspondences for localization", ds.size());
	
	double max_distance = 2 * _LUT.inchToModelUnit( mmToInch(_camera->_max_translation_speed) );

	// RANSAC
	_point_cloud.clear();

	double best_penalty = 1E10;

	int size = INIT_MIN_CORRESPONDENCES;

	std::vector< ExtrinsicParameters > poses;
	double wweight = 0.0;
	//double nweight = 0.0;
	Vec3d average_position = Vec3d(0,0,0);

	for (run=0;run<6000;run++) {

		_camera->setPose( original_pose );

		CorrespondenceVector set;

		intVector indices;

		selectNRandomInt( size, ds.size(), indices );

		for (i=0;i<indices.size();i++) {
			Correspondence d = ds[indices[i]];

			set.push_back( d );
		}

		refineCameraPoseFromNCorrespondences( set );

		if ( len(_camera->getTranslation() - original_pose.getTranslation()) > max_distance ) 
			continue;

		_point_cloud.push_back( _camera->getTranslation() );
		
		poses.push_back( _camera->getPose() );

		// keep pose that scores the best
		double penalty = 0.0;
		int counter = 0;

		Vec3d center = _camera->getTranslation();

		for (i=0;i<n;i++) {

			bool used = false;
			for (j=0;j<indices.size();j++) {
				if ( indices[j] == i ) {
					used = true;
					break;
				}
			}

			if ( used )
				continue;

			counter++;

			Edge *line = ds[i].first;
			Vec3d a = line->getA()-center;
			Vec3d b = line->getB()-center;
			EdgePlane edgeplane = EdgePlane( a, b, center, 0, 0, 0 );
			edgeplane.fromWorldFrameToCameraFrame( _camera->getPose() );
		
			penalty += fabs( edgeplane.angle( ds[i].second ) );
		}

		if ( counter > 0 ) {
			penalty /= counter;
		} else {
			continue;
		}

		if ( penalty > EPS ) {
			// update the average
			average_position += 1.0 / penalty * _camera->getTranslation();
			wweight += 1.0 / penalty;
			//printf("weight = %f\n", wweight);
		}

		if ( penalty < best_penalty ) {

			best_penalty = penalty;

			best_pose = _camera->getPose();
		}

		//_camera->getPose().print();
	}

	_camera->setPose( best_pose );
	
	//if ( wweight > EPS ) {
	//	printf("weight = %f\n", wweight);
	//	_camera->setTranslation( average_position / wweight );
	//}

	double dd = len(_camera->getTranslation() - original_pose.getTranslation());

	if ( dd > max_distance ) {
		_camera->setPose( original_pose );
		return true;
	}
			
	return true;
}
Example #6
0
// relock
void MyGlWindow::relock ( double max_dihedral_angle, int nruns )
{
	// lookup LUT
	_LUT.lookup( _camera->came.getTranslation(), MIN_SUBTENDED_ANGLE, 0.0, 100, 0);

	int i;

	printf("detecting edges\n");

	// detect edges
	detect_edges();

	// distribute visible edges into buckets
	std::vector< intVector > edges_buckets;
	distribute_edgeplanes_into_buckets( edges_buckets );

	// compute the region size on the tessellatio
	printf("computing region\n");

	int level = 1 + max_dihedral_angle / SPHERE_TESSELLATION_RESOLUTION;
	LOG(LEVEL_INFO, "region size: %d", level);

	int N = _LUT._lines.size();

	printf("converting lines\n");
	// convert the model lines into edgeplanes in the camera coordinate frame
	edgePlaneVector lines_edgeplanes;
	intVector line_ids;
	for (i=0;i<N;i++) {
		Vec3d a = _camera->fromWorldFrameToCameraFrame( _LUT._lines[i]->getA() );
		Vec3d b = _camera->fromWorldFrameToCameraFrame( _LUT._lines[i]->getB() );
		Vec3d s = Vec3d(0,0,0);
		lines_edgeplanes.push_back( EdgePlane( a, b, s, -1, _LUT._lines[i]->_id, i ) );
		line_ids.push_back( _LUT._lines[i]->_id );
	}

	corresVector correspondences;

	// clear the state machine
	SM_Clear();

	// for each model line, search the possible matches
	for (i=0;i<N;i++) {

		EdgePlane line = lines_edgeplanes[i];

		intVector cells;
		get_edgeplane_buckets( line, cells );

		// create a correspondence
		corres c ( _LUT._lines[i] );

		for (int m=0;m<cells.size();m++) {

			int bucket_id = cells[m];

			for (int j=0;j<edges_buckets[bucket_id].size();j++) {

				int edge_id = edges_buckets[bucket_id][j];

				EdgePlane edge;
				if (!_camera->_frame.get_edgeplane_chained( edge_id, edge ) ) {
					LOG(LEVEL_ERROR, "error accessing edgeplane %d out of %d edgeplanes.", edge_id, _camera->_frame.n_edgeplanes_chained );
					continue;
				}

				double angle = line.angle( edge );

				if ( angle > max_dihedral_angle ) 
					continue;

				c.eps.push_back( edge );
			}

			if ( c.valid() ) {
				correspondences.push_back( c );
				SM_InsertItem( c );
			}
		}
	}

	ExtrinsicParameters original_pose = _camera->getPose();
	ExtrinsicParameters best_pose = original_pose;
	double best_score = score_camera_pose( original_pose, edges_buckets );
	
	LOG(LEVEL_INFO, "start score: %f", best_score );
	
	// find the best camera pose possible
	for (int run=0; run < nruns; run++) {

		// reset camera pose
		_camera->setPose( original_pose );

		// draw random correspondences
		CorrespondenceVector cs;

		intVector indices;
		selectNRandomInt( 5/*INIT_MIN_CORRESPONDENCES*/, SM_Size(), indices);

		// for each correspondence, pick a choice randomly
		for (i=0;i<indices.size();i++) {
			Correspondence d;
			d.first = SM_Get(indices[i]).line;
			int p = MIN( SM_Get(indices[i]).eps.size()-1, (double)rand() / (RAND_MAX+1) * (SM_Get(indices[i]).eps.size()-1));
			d.second = SM_Get(indices[i]).eps[p];
			cs.push_back( d );
		}

		refineCameraPoseFromNCorrespondences( cs );

		double score = score_camera_pose( _camera->getPose(), edges_buckets );

		if ( score > best_score ) {

			best_score = score;
			best_pose = _camera->getPose();
		}	
	}

	// keep the best camera pose found so far
	_camera->setPose( best_pose );

	LOG(LEVEL_INFO, "new score: %f", best_score );

	// clear the state machine
	SM_Clear();

	// populate correspondences again
	correspondences.clear();
	init_correspondences( correspondences, edges_buckets, MAINTENANCE_DIHEDRAL_ANGLE, MAINTENANCE_MIN_OVERLAP, true );
}
Example #7
0
// ransac-based refine camera pose based on set of correspondences
//
double MyGlWindow::refine_camera_pose( corresVector &correspondences, std::vector< intVector > &edges_buckets )
{
	ExtrinsicParameters best_pose = _camera->getPose();

	double best_score = score_camera_pose( best_pose, edges_buckets );

	int n = correspondences.size();

	int i,j,k;

	_line_matching_history.clear();
	_line_matching_pose_history.clear();

	if ( n < INIT_MIN_CORRESPONDENCES )
		return best_score;

	// for each run, draw a random subset of correspondences and compute the camera pose based on it
	for (int run = 0; run < 1000; run++) {

		CorrespondenceVector cs;

		intVector indices;

		selectNRandomInt( INIT_MIN_CORRESPONDENCES, n-1, indices ); // select random correspondences

		for (i=0;i<indices.size();i++) {

			corres c = correspondences[indices[i]];

			int index = MIN(c.eps.size()-1, int( (double)rand()/(RAND_MAX+1) * (c.eps.size()-1) )); // pick a random edge in each correspondence

			Correspondence d;
			d.first = c.line;
			d.second = c.eps[index];
			cs.push_back( d );
		}

		if ( cs.size() < INIT_MIN_CORRESPONDENCES )
			LOG(LEVEL_ERROR, "warning: number of correspondences mismatch: %d instead of %d", cs.size(), INIT_MIN_CORRESPONDENCES);

		// compute the coarse camera pose from three lines
		bool success = false;

		for (i=0;i<cs.size();i++) {
			for (j=i+1;j<cs.size();j++) {
				for (k=j+1;k<cs.size();k++) {

					ExtrinsicParameters pose = _camera->getPose();
					success = cameraPoseFromThreeCorrespondences( cs[i].second, cs[j].second, cs[k].second, cs[i].first, cs[j].first, cs[k].first, pose );
					if ( success ) {
						_camera->setPose( pose );
						break;
					}
				}

				if ( success ) break;
			}

			if ( success ) break;
		}

		// update the history (for debugging only)
		intVector lines_id_history;
		edgePlaneVector edgeplanes_history;

		for (k=0;k<cs.size();k++) {
			lines_id_history.push_back( cs[k].first->_id );
			edgeplanes_history.push_back( cs[k].second );
		}

		_line_matching_history.push_back( std::pair< intVector, edgePlaneVector > (lines_id_history, edgeplanes_history) );
		// end debugging

		find_inliers_and_refine_camera_pose( correspondences );

		_line_matching_pose_history.push_back( _camera->getPose() );

		double score = score_camera_pose( _camera->getPose(), edges_buckets );

		if ( score > best_score  ) {

			if ( _camera->getTranslation()[2] > 0.0 && _camera->getTranslation()[2] < INIT_MAX_CAMERA_HEIGHT && _LUT.valid_pose( _camera->getPose() ) ) {
				best_pose = _camera->getPose();
				best_score = score;
			}
		}
	}

	// refine the translation
	//refine_camera_translation( best_pose, _camera->getHeight()-_LUT.inchToModelUnit(20.0), _camera->getHeight()+_LUT.inchToModelUnit(20.0), edges_buckets);

	// set camera pose to best solution found so far
	_camera->setPose( best_pose );

	return best_score;

}
Example #8
0
// RANSAC computation of the camera pose from a set of correspondences KS
// each correspondence consists of a line and a set of possible image matches (edgeplanes)
// at each step:
// - select a random set of correspondences S
// - within the subset S, run a RANSAC inner loop to select the top inliers S2
// - compute the camera pose from S2 and score the remaining correspondences (KS - S2)
// - if the score is higher than some threshold, accept; otherwise, continue.
// correspondences which are BLACKLISTED are not taken into account for this calculation
// no change is made on the correspondence state at the end of the computation - this is all up to the state machine
// note that the size of S should be about 15 correspondences
// note that, for performance issues, correspondences are not all updated at the beginning
// instead, they are updated on the fly
bool SM_ComputePose(int pose_max_elements, int pose_min_elements, int pose_max_trials, int pose_threshold_score, corresVector &cs, CorrespondenceVector &inliers)
{
    // save the current camera pose
    ExtrinsicParameters pose = w->_camera->getPose();

    PerfTimer timer;

    // initialize a list of light correspondence structures
    std::vector<cr_pose> correspondences;
    int i,k;
    int counter=0;
    for (corresVector::iterator iter = cs.begin(); iter != cs.end(); iter++,counter++) {
        if ( iter->status() != _BLACKLISTED ) {
            cr_pose item = cr_pose( counter, 0, (iter->status() == _CONNECTED) ? 0 : 1, 0, iter->age);
            correspondences.push_back( item );
        }
    }

    int N = correspondences.size();
    int n = MIN( N, POSE_MAX_ELEMENTS);

    if ( N < POSE_MIN_ELEMENTS ) // if too few correspondences, quit
        return false;

    // iterate until (1) ransac success or (2) max number of trials reached or (3) all possible solutions tried
    int trial = 0;
    double best_score = -1.0;
    ExtrinsicParameters best_pose = pose;

    while ( 1 ) {

        LOG(LEVEL_INFO,"trial %d", trial);

        // exit if max number of trials reached
        if ( trial > POSE_MAX_TRIALS )
            break;

        // reset the camera pose (to be done before any call to PROJ_TEST!)
        w->_camera->setPose( pose );

        // shuffle randomly
        std::random_shuffle( correspondences.begin(), correspondences.end() );

        // select the first n valid elements
        corresVector subset; // this is the subset of correspondences that has been selected

        int marker = 0;
        while ( subset.size() < n && marker < correspondences.size() ) {
            Correspondence c;
            if ( correspondences[marker].d == 0 ) { // if the correspondence has not been computed yet, compute it
                corres cc = cs[correspondences[marker].i];
                assert ( cc.line != NULL );
                correspondences[marker].k = w->PROJ_TEST( cc, true, false, CLUSTER_MAX_ANGLE_THRESHOLD ) ? 0 : 1;
                cs[correspondences[marker].i] = cc;
                correspondences[marker].j = cs[correspondences[marker].i].eps.size();
                correspondences[marker].d = 1;
            }
            corres d = cs[correspondences[marker].i];

            if ( d.valid() ) { // update the subset with this correspondence
                subset.push_back( d );
            }
            marker++;
        }

        // exit if the table is not complete -- it means that not many correspondences are available so we quit
        //if ( subset.size() < n )  {
        //	LOG(LEVEL_INFO,"table incomplete ( %d ) . exiting.", subset.size());
        //	break;
        //}

        // from the subset S of n correspondences, run a RANSAC inner loop to find the top inliers

        // initialize the counters
        intVector counters, max_counters;
        double max_score = -1.0;
        intVector best_indices;

        for (i=0; i<subset.size(); i++) {
            counters.push_back(0);
            max_counters.push_back(subset[i].eps.size());
        }

        inliers.clear();

        for (k=0; k<300; k++) {

            // randomly select the number of correspondences to take
            int p = MIN( subset.size(), POSE_MIN_ELEMENTS - 1 + (double)rand() / (RAND_MAX+1) * (n - POSE_MIN_ELEMENTS + 1));
            //LOG(LEVEL_INFO, "selecting %d elements ( %d elements in the subset)\n", p, subset.size());

            // select a random subset of one-to-one correspondences
            CorrespondenceVector v;
            intVector indices;
            selectNRandomInt( p, subset.size(), indices);
            //LOG(LEVEL_INFO, "selected elements out of %d:", subset.size());
            //for (int pp=0;pp<indices.size();pp++)
            //LOG(LEVEL_INFO,"%d",indices[pp]);

            for (i=0; i<indices.size(); i++) {
                Correspondence d;
                d.first = subset[indices[i]].line;
                int q = MIN( subset[indices[i]].eps.size(), (double)rand() / (RAND_MAX+1) * subset[indices[i]].eps.size() );
                //LOG(LEVEL_INFO, "element %d: selected %d edgeplane / %d", indices[i], q, subset[indices[i]].eps.size());

                d.second = subset[indices[i]].eps[q];
                v.push_back( d );
            }

            // refine camera pose from selected correspondences
            //LOG(LEVEL_INFO, "refining camera pose...");
            w->_camera->setPose( pose );
            w->refineCameraPoseFromNCorrespondences( v );

            // IDEA: inject ideal position to check!!!!!!!!!!!!!!
            //if ( k == 0 ) {
            //	w->_camera->setPose( w->GetSyntheticPose( w->frameId );
            //	v.clear();
            //	for (i=0;i<subset.size();i++) {
            //LOG(LEVEL_INFO, "done.");

            // compute the residuals
            double score = 0.0;
            for (i=0; i<indices.size(); i++) {
                EdgePlane ep = v[i].second;
                score += subset[indices[i]].computeScore( w->_camera->getPose() );
                //ep.fromCameraFrameToWorldFrame( w->_camera->getPose() );
                //double angle = ep.angle( v[i].first );
                //penalty += angle;
            }

            //penalty /= indices.size();
            score /= indices.size();
            //LOG(LEVEL_INFO, "score = %f", score);

            // if better, keep camera pose
            if ( score > max_score ) {
                best_pose = w->_camera->getPose();
                max_score = score;
                inliers = v;
                best_indices = indices;
            }
        }

        // keep the best pose
        if ( max_score > 0.0 )
            w->_camera->setPose( best_pose );
        else
            continue;

        // for each inlier, find the best match
        inliers.clear();
        for (i=0; i<best_indices.size(); i++) {
            Correspondence d;
            d.first = subset[best_indices[i]].line;
            subset[best_indices[i]].computeScore( w->_camera->getPose() ); // find the best match
            if ( subset[best_indices[i]].best_ep != -1 ) {
                d.second = subset[best_indices[i]].eps[subset[best_indices[i]].best_ep];
                inliers.push_back( d );
            }
        }

        w->_camera->setPose( pose );
        w->refineCameraPoseFromNCorrespondences( inliers );

        // update the trial counter
        trial++;
        //timer.print("pose: refine pose");

        // sanity check on speed (rotation and translation)
        if ( !w->_camera->testCameraMotion( pose ) ) {
            //LOG(LEVEL_INFO,"camera motion test failed.");
            continue;
        }

        // score the corresponding pose according to the rest of the correspondences
        double score = (marker >= correspondences.size()) ? 1.0 : 0.0;

        for (i=marker; i<correspondences.size(); i++) {
            corres d = cs[correspondences[i].i];
            assert( d.line != NULL );

            if ( w->PROJ_TEST( d, true, false, POSE_MAX_ANGLE_THRESHOLD ) ) // project the line and look for a match
                score += 1.0 / (correspondences.size() - marker);

            if ( ( score > POSE_THRESHOLD_SCORE ) && (score > best_score) ) // if beat the threshold, exit the loop
                break;
        }
        //LOG(LEVEL_INFO, "score: %f", score);
        //timer.print("pose: scoring");

        // keep score if better
        if ( score > best_score ) {
            best_score = score;
            best_pose = w->_camera->getPose();
        }

        // exit if threshold reached
        if ( score > POSE_THRESHOLD_SCORE )
            break;

        // cleanup the correspondence table
        correspondences.erase( std::remove_if(correspondences.begin(), correspondences.end(), cr_pose_invalid), correspondences.end() );

    }

    w->_camera->setPose( best_pose );
    //timer.print("finish");

    return best_score;
}