Пример #1
0
void NRICP::calculateNonRigidTransformation()
{
   GLfloat previous_seconds = glfwGetTime();

   MatrixXf* X_prev = new MatrixXf(4 * m_templateVertCount, 3);
   X_prev->setZero(4 * m_templateVertCount, 3);
   m_stiffnessChanged = true;

   addLandmarkInformation();
   findCorrespondences();
   determineNonRigidOptimalDeformation();
   deformTemplate();
   GLfloat changes = normedDifference(X_prev, m_X);

   while (changes > m_epsilon)
   {
    (*X_prev) = (*m_X);
    findCorrespondences();
    determineNonRigidOptimalDeformation();
    deformTemplate();
    changes = normedDifference(X_prev, m_X);
   }

   delete X_prev;

   GLfloat current_seconds = glfwGetTime();
   GLfloat elapsed_seconds = current_seconds - previous_seconds;
   printf(" NRICP takes %f seconds \n ", elapsed_seconds);
}
Пример #2
0
ICCVTutorial<FeatureType>::ICCVTutorial(boost::shared_ptr<pcl::Keypoint<pcl::PointXYZRGB, pcl::PointXYZI> >keypoint_detector,
                                        typename pcl::Feature<pcl::PointXYZRGB, FeatureType>::Ptr feature_extractor,
                                        boost::shared_ptr<pcl::PCLSurfaceBase<pcl::PointXYZRGBNormal> > surface_reconstructor,
                                        typename pcl::PointCloud<pcl::PointXYZRGB>::ConstPtr source,
                                        typename pcl::PointCloud<pcl::PointXYZRGB>::ConstPtr target)
: source_keypoints_ (new pcl::PointCloud<pcl::PointXYZI> ())
, target_keypoints_ (new pcl::PointCloud<pcl::PointXYZI> ())
, keypoint_detector_ (keypoint_detector)
, feature_extractor_ (feature_extractor)
, surface_reconstructor_ (surface_reconstructor)
, source_ (source)
, target_ (target)
, source_segmented_ (new pcl::PointCloud<pcl::PointXYZRGB>)
, target_segmented_ (new pcl::PointCloud<pcl::PointXYZRGB>)
, source_transformed_ (new pcl::PointCloud<pcl::PointXYZRGB>)
, source_registered_ (new pcl::PointCloud<pcl::PointXYZRGB>)
, source_features_ (new pcl::PointCloud<FeatureType>)
, target_features_ (new pcl::PointCloud<FeatureType>)
, correspondences_ (new pcl::Correspondences)
, show_source2target_ (false)
, show_target2source_ (false)
, show_correspondences (false)
{
  // visualizer_.registerKeyboardCallback(&ICCVTutorial::keyboard_callback, *this, 0);
  
  segmentation (source_, source_segmented_);
  segmentation (target_, target_segmented_);  
  
  detectKeypoints (source_segmented_, source_keypoints_);
  detectKeypoints (target_segmented_, target_keypoints_);
  
  extractDescriptors (source_segmented_, source_keypoints_, source_features_);
  extractDescriptors (target_segmented_, target_keypoints_, target_features_);
  
  findCorrespondences (source_features_, target_features_, source2target_);
  findCorrespondences (target_features_, source_features_, target2source_);
  
  filterCorrespondences ();
  
  determineInitialTransformation ();
  determineFinalTransformation ();
  
  // reconstructSurface ();
}
Пример #3
0
void OpticalFlow::computeFlow() {
  CHECK(frame_added_ && features_computed_ && !flow_computed_,
        "Optical Flow function called out of order!");

  if (num_frames_ < 2) {
    LOGV("Frame index was %d, skipping computation.", num_frames_);
    return;
  }

  FramePair* const curr_change = &frame_pairs_[geNthIndexFromEnd(0)];

  findCorrespondences(curr_change);

  flow_computed_ = true;
}
Пример #4
0
	void callback(const sensor_msgs::ImageConstPtr& image,
			const sensor_msgs::ImageConstPtr& imageDepth,
			const sensor_msgs::CameraInfoConstPtr& camInfo)
	{
		if(!(image->encoding.compare(sensor_msgs::image_encodings::MONO8) ==0 ||
			 image->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0 ||
			 image->encoding.compare(sensor_msgs::image_encodings::RGB8) == 0) &&
			 imageDepth->encoding.compare(sensor_msgs::image_encodings::TYPE_32FC1)!=0)
		{
			ROS_ERROR("Input type must be image=mono8,rgb8,bgr8 and image_depth=32FC1");
			return;
		}

		cv_bridge::CvImageConstPtr imgPtr = cv_bridge::toCvShare(image);
		cv::Mat imageMono2;
		// convert to grayscale
		if(image->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0)
		{
			cv::cvtColor(imgPtr->image, imageMono2, cv::COLOR_BGR2GRAY);
		}
		else if(image->encoding.compare(sensor_msgs::image_encodings::RGB8) == 0)
		{
			cv::cvtColor(imgPtr->image, imageMono2, cv::COLOR_RGB2GRAY);
		}
		else
		{
			imageMono2 = imgPtr->image.clone();
		}
		cv::Mat imageDepth2 = cv_bridge::toCvShare(imageDepth)->image.clone();
		if(!lastImages_.first.empty())
		{
			cv::Mat imageMono1 = lastImages_.first;
			cv::Mat imageDepth1 = lastImages_.second;

			if( imageMono1.cols != imageMono2.cols || imageMono1.rows != imageMono2.rows ||
				imageDepth1.cols != imageDepth2.cols || imageDepth1.rows != imageDepth2.rows)
			{
				lastImages_.first = imageMono2;
				lastImages_.second = imageDepth2;
				ROS_ERROR("clouds must be the same size!\n");
				return;
			}

			ros::WallTime time = ros::WallTime::now();

			// Find 2d correspondences
			std::list<std::pair<cv::Point2f, cv::Point2f> > correspondences = findCorrespondences(imageMono1, imageMono2);

			// Extract XYZ point clouds from correspondences
			pcl::PointCloud<pcl::PointXYZ>::Ptr inliers1(new pcl::PointCloud<pcl::PointXYZ>);
			pcl::PointCloud<pcl::PointXYZ>::Ptr inliers2(new pcl::PointCloud<pcl::PointXYZ>);
			extractXYZCorrespondences(
			    correspondences,
				imageDepth1,
				imageDepth2,
				*camInfo,
				*inliers1,
				*inliers2);

			if(correspondences.size() < 15)
			{
				ROS_WARN("Low correspondences -> %d", correspondences.size());
			}

			pcl::PointCloud<pcl::PointXYZ>::Ptr inliersTransformed1;
			pcl::PointCloud<pcl::PointXYZ>::Ptr inliersTransformed2;
			if(frameId_.size())
			{
				inliers1->header.frame_id = image->header.frame_id;
				inliers2->header.frame_id = image->header.frame_id;
				inliersTransformed1.reset(new pcl::PointCloud<pcl::PointXYZ>);
				inliersTransformed2.reset(new pcl::PointCloud<pcl::PointXYZ>);
				pcl_ros::transformPointCloud(frameId_, *inliers1, *inliersTransformed1, tfListener_);
				pcl_ros::transformPointCloud(frameId_, *inliers2, *inliersTransformed2, tfListener_);
			}
			else
			{
				inliersTransformed1 = inliers1;
				inliersTransformed2 = inliers2;
			}

			// Compute transform
			tf::Transform transform = transformFromXYZCorrespondences(inliersTransformed1, inliersTransformed2);

			if(std::isfinite(transform.getOrigin().x()))
			{
				// from optical frame to world frame
				tf::Transform motion = transform;

				//Update pose
				pose_ *= motion;

				double roll, pitch, yaw;
				pose_.getBasis().getEulerYPR(yaw,pitch,roll);
				ROS_INFO("Odom xyz(%f,%f,%f) rpy(%f,%f,%f) update time(%f s)",
						pose_.getOrigin().x(),
						pose_.getOrigin().y(),
						pose_.getOrigin().z(),
						roll,
						pitch,
						yaw,
						(ros::WallTime::now()-time).toSec());
			}
			else
			{
				ROS_WARN("transform nan");
			}
		}
		lastImages_.first = imageMono2;
		lastImages_.second = imageDepth2;

		//*********************
		// Publish odometry
		//*********************
		//first, we'll publish the transform over tf
		geometry_msgs::TransformStamped trans;
		trans.header.stamp = image->header.stamp;
		trans.header.frame_id = "odom";
		trans.child_frame_id = frameId_;
		tf::transformTFToMsg(pose_, trans.transform);

		//send the transform
		tfBroadcaster_.sendTransform(trans);
		if(odomPub_.getNumSubscribers())
		{
			//next, we'll publish the odometry message over ROS
			nav_msgs::Odometry odom;
			odom.header.stamp = image->header.stamp; // use corresponding time stamp to cloud
			odom.header.frame_id = "odom";

			//set the position
			odom.pose.pose.position.x = pose_.getOrigin().x();
			odom.pose.pose.position.y = pose_.getOrigin().y();
			odom.pose.pose.position.z = pose_.getOrigin().z();
			tf::quaternionTFToMsg(pose_.getRotation(), odom.pose.pose.orientation);

			//publish the message
			odomPub_.publish(odom);
		}
	}
Пример #5
0
double ObjectAspect::matchAspects(ObjectAspect &other, Eigen::Matrix4f &transform) {
    std::vector<int> o_pcl2, t_pcl2;
    std::vector<cv::Point2i> correspondences;
    findCorrespondences(other, correspondences);

#ifdef DEBUG_VIS
    cv::Mat3b image_stack(std::max(this->image.size().height,other.image.size().height), this->image.size().width+other.image.size().width );

    cv::Mat3b img1 = image_stack(cv::Rect(0,0,this->image.size().width,this->image.size().height));
    cv::Mat3b img2 = image_stack(cv::Rect(this->image.size().width,0,other.image.size().width,other.image.size().height));

    this->image.copyTo(img1);
    other.image.copyTo(img2);

    this->plotKeypoints(img1);
    other.plotKeypoints(img2);
#endif

    for (size_t i = 0; i < correspondences.size(); i++) {

        int kpidx = correspondences[i].x;
        int lkpidx = correspondences[i].y;

        //cv::line(image, keypoints.at(kpidx ).pt,other.keypoints.at(lkpidx).pt,cv::Scalar(205,0,255),3);

        if ( map2D3D.find(kpidx) != map2D3D.end() && other.map2D3D.find(lkpidx) != other.map2D3D.end() ) {
            t_pcl2.push_back(kpidx);
            o_pcl2.push_back(lkpidx);
#ifdef DEBUG_VIS
            cv::line(image_stack,this->keypoints[kpidx].pt,other.keypoints[lkpidx].pt+cv::Point2f(this->image.size().width,0),cv::Scalar(255,0,0));
#endif
        }
    }

//    ROS_INFO("found %d correspondences.",correspondences.size());
//    ROS_INFO("found %d 3D correspondences.",t_pcl2.size());

    // distances that differ by <= EPSILON are considered equal
    const float EPSILON = 0.001;
    // distance threshold between 2 keypoints to be considered equal
    const float DISTANCE_THRESHOLD = 0.02;

    PointCloud o_pcl, t_pcl;
    if (t_pcl2.size()>=3) {
        for (size_t i = 0; i < t_pcl2.size()-2; i ++ ){
            PointType npt1 = this->keypoints3D->points[this->map2D3D[t_pcl2.at(i)]];
            PointType lpt1 = other.keypoints3D->points[other.map2D3D[o_pcl2.at(i)]];
            cv::Point3f np1 = cv::Point3f(npt1.x,npt1.y,npt1.z);
            cv::Point3f lp1 = cv::Point3f(lpt1.x,lpt1.y,lpt1.z);
            for (size_t j = i+1; j < t_pcl2.size()-1; j++) {
                PointType npt2 = this->keypoints3D->points[this->map2D3D[t_pcl2.at(j)]];
                PointType lpt2 = other.keypoints3D->points[other.map2D3D[o_pcl2.at(j)]];
                cv::Point3f np2 = cv::Point3f(npt2.x,npt2.y,npt2.z);
                cv::Point3f lp2 = cv::Point3f(lpt2.x,lpt2.y,lpt2.z);
                if ( Utils3D::distPoints(np1,np2) < DISTANCE_THRESHOLD || fabs(Utils3D::distPoints(np1,np2) - Utils3D::distPoints(lp1,lp2)) > EPSILON )
                    continue;
                for (size_t k = j+1; k < t_pcl2.size(); k++) {
                    PointType npt3 = this->keypoints3D->points[this->map2D3D[t_pcl2.at(k)]];
                    PointType lpt3 = other.keypoints3D->points[other.map2D3D[o_pcl2.at(k)]];
                    cv::Point3f np3 = cv::Point3f(npt3.x,npt3.y,npt3.z);
                    cv::Point3f lp3 = cv::Point3f(lpt3.x,lpt3.y,lpt3.z);
                    if ( Utils3D::distPoints(np1,np3) < DISTANCE_THRESHOLD ||  fabs(Utils3D::distPoints(np1,np3) - Utils3D::distPoints(lp1,lp3)) > EPSILON )
                        continue;

                    if (Utils3D::distPoints(np3,np2) < DISTANCE_THRESHOLD ||  fabs(Utils3D::distPoints(np3,np2) - Utils3D::distPoints(lp3,lp2)) > EPSILON )
                        continue;

                    cv::Point3f v1 = np3-np2;
                    cv::Point3f v2 = np1-np2;
                    cv::Point3f v3 = np3-np1;

                    double angle = acos(v1.ddot(v2)/sqrt(v1.ddot(v1))/sqrt(v2.ddot(v2)))/CV_PI*180.;
                    v1 = -1*v1;
                    double angle2 = acos(v1.ddot(v3)/sqrt(v1.ddot(v1))/sqrt(v3.ddot(v3)))/CV_PI*180.;

//                    cerr << "angles: " << angle << ", " << angle2 << endl;

                    // check whether the 3 vectors are linearly independent
                    const double ANGLE_THRESH = 30;
                    if ((fabs(angle)<ANGLE_THRESH) || (fabs(angle2)<ANGLE_THRESH) ||(fabs(angle)>180-ANGLE_THRESH)||(fabs(angle2)>180-ANGLE_THRESH))
                        continue;

//                    std::cerr << "distances: " << Utils3D::distPoints(np1,np3)
//                         << "\t" << Utils3D::distPoints(np3,np2)
//                         << "\t" << Utils3D::distPoints(np1,np2) << " i,j,k: " << i << ","<< j << "," << k <<  std::endl;
#ifdef DEBUG_VIS
                    cv::circle(image_stack, this->keypoints[t_pcl2.at(i)].pt, 3, cv::Scalar(255,255,0),-1);
                    cv::circle(image_stack, this->keypoints[t_pcl2.at(j)].pt, 3, cv::Scalar(255,255,0),-1);
                    cv::circle(image_stack, this->keypoints[t_pcl2.at(k)].pt, 3, cv::Scalar(255,255,0),-1);

                    cv::circle(image_stack, other.keypoints[o_pcl2.at(i)].pt+cv::Point2f(this->image.size().width,0), 3, cv::Scalar(255,255,0),-1);
                    cv::circle(image_stack, other.keypoints[o_pcl2.at(j)].pt+cv::Point2f(this->image.size().width,0), 3, cv::Scalar(255,255,0),-1);
                    cv::circle(image_stack, other.keypoints[o_pcl2.at(k)].pt+cv::Point2f(this->image.size().width,0), 3, cv::Scalar(255,255,0),-1);

                    cv::line(image_stack,this->keypoints[t_pcl2.at(i)].pt,other.keypoints[o_pcl2.at(i)].pt+cv::Point2f(this->image.size().width,0),cv::Scalar(0,255,255),2);
                    cv::line(image_stack,this->keypoints[t_pcl2.at(j)].pt,other.keypoints[o_pcl2.at(j)].pt+cv::Point2f(this->image.size().width,0),cv::Scalar(0,255,255),2);
                    cv::line(image_stack,this->keypoints[t_pcl2.at(k)].pt,other.keypoints[o_pcl2.at(k)].pt+cv::Point2f(this->image.size().width,0),cv::Scalar(0,255,255),2);
#endif
                    o_pcl.push_back(lpt1);
                    o_pcl.push_back(lpt2);
                    o_pcl.push_back(lpt3);

                    t_pcl.push_back(npt1);
                    t_pcl.push_back(npt2);
                    t_pcl.push_back(npt3);
                    break;
                }
                if (t_pcl.size() != 0)
                    break;
            }
            if (t_pcl.size() != 0)
                break;
        }
    }


    if(t_pcl.points.size()==3) {
        // get transformation
        Eigen::Matrix4f t;
        pcl::registration::TransformationEstimationSVD<PointType, PointType> te_svd;
        te_svd.estimateRigidTransformation(t_pcl, o_pcl, t);

#ifdef DEBUG_VIS
    cv::namedWindow("correspondences");
    cv::imshow("correspondences",image_stack);
#endif
        transform = t;
        return 1;

    }
    return DBL_MAX;
}