예제 #1
0
openMVG::image::Image<openMVG::image::RGBColor> OpenCVHelper::createThumbnail(
	const openMVG::image::Image<openMVG::image::RGBColor> &orig, int thWidth, int thHeight)
{
	const int width = orig.Width();
	const int height = orig.Height();
	const float image_aspect = static_cast<float>(width) / static_cast<float>(height);
	const float thumb_aspect = static_cast<float>(thWidth) / static_cast<float>(thHeight);

	int rescale_width, rescale_height;
	if(image_aspect > thumb_aspect)
	{
		rescale_width = std::ceil(thHeight * image_aspect);
		rescale_height = thHeight;
	}
	else
	{
		rescale_width = thWidth;
		rescale_height = std::ceil(thWidth / image_aspect);
	}

	// Convert image to OpenCV data
	cv::Mat cvimg(height, width, CV_8UC3);
	cv::Vec3b *cvPtr = cvimg.ptr<cv::Vec3b>(0);
	for(int y = 0; y < height; y++)
	{
		for(int x = 0; x < width; x++)
		{
			(*cvPtr)[2] = orig(y, x).r();	// OpenCV usually stores BGR, not RGB
			(*cvPtr)[1] = orig(y, x).g();
			(*cvPtr)[0] = orig(y, x).b();
			cvPtr++;
		}
	}

	// Resize
	cv::Mat cvoutimg;
	cv::resize(cvimg, cvoutimg, cv::Size(thWidth, thHeight), 0, 0, cv::INTER_AREA);

	// Convert back to openMVG::image
	cvPtr = cvoutimg.ptr<cv::Vec3b>(0);
	openMVG::image::Image<openMVG::image::RGBColor> outImg;
	outImg.resize(thWidth, thHeight);
	for(int y = 0; y < thHeight; y++)
	{
		for(int x = 0; x < thWidth; x++)
		{
			outImg(y, x).r() = (*cvPtr)[2];	// OpenCV usually stores BGR, not RGB
			outImg(y, x).g() = (*cvPtr)[1];
			outImg(y, x).b() = (*cvPtr)[0];
			cvPtr++;
		}
	}

	return outImg;
}
QImage CpuFilter::operator()(const QImage& img, std::size_t dim, const std::vector<float>& filter)
{
	int halfDim = static_cast<int>(dim / 2);
	int width = img.width() - halfDim;
	int height = img.height() - halfDim;
	QImage outImg(width, height, QImage::Format_RGB32);

	float sumR = 0.0f, sumG = 0.0f, sumB = 0.0f;

	for (int y = halfDim; y < width; y++)
	{
		for (int x = halfDim; x < height; x++, sumR = 0, sumG = 0, sumB = 0)
		{
			for (int i = -halfDim; i <= halfDim; i++)
			{
				for (int j = -halfDim; j <= halfDim; j++)
				{
					sumR += filter[((j + halfDim) * dim) + (i + halfDim)] * qRed(img.pixel(x - i, y - j));
					sumG += filter[((j + halfDim) * dim) + (i + halfDim)] * qGreen(img.pixel(x - i, y - j));
					sumB += filter[((j + halfDim) * dim) + (i + halfDim)] * qBlue(img.pixel(x - i, y - j));
				}
			}
			outImg.setPixel(x, y, qRgb(sumR, sumG, sumB));
		}
	}

	return outImg;
}
static void myDrawEpipolarLinesDouble(const std::string& title, const cv::Matx<T1, 3, 3> F, const cv::Mat& img1, const cv::Mat& img2,
    const std::vector<cv::Point_<T2> > points1, const std::vector<cv::Point_<T2> > points2) {
  cv::Mat outImg(img1.rows, img1.cols * 2, CV_8UC3);
  cv::Mat outImgInit(img1.rows, img1.cols * 2, CV_8UC3);
  cv::Rect rect1(0, 0, img1.cols, img1.rows);
  cv::Rect rect2(img1.cols, 0, img1.cols, img1.rows);
  /*
   * Allow color drawing
   */
  if (img1.type() == CV_8U) {
    cv::cvtColor(img1, outImg(rect1), CV_GRAY2BGR);
    cv::cvtColor(img2, outImg(rect2), CV_GRAY2BGR);
  } else {
    img1.copyTo(outImg(rect1));
    img2.copyTo(outImg(rect2));
  }
  outImg.copyTo(outImgInit);
  std::vector<cv::Vec<T2, 3> > epilines1, epilines2;
  cv::computeCorrespondEpilines(points1, 1, F, epilines1); //Index starts with 1
  cv::computeCorrespondEpilines(points2, 2, F, epilines2);

  CV_Assert(points1.size() == points2.size() && points2.size() == epilines1.size() && epilines1.size() == epilines2.size());

  cv::RNG rng(0);
  for (size_t i = 0; i < points1.size(); i++) {

    outImg.copyTo(outImgInit);
    cv::Scalar color(rng(256), rng(256), rng(256));
    cv::Mat temp = outImgInit.clone();
    cv::Mat tmp2 = outImgInit(rect2);
    cv::Mat tmp1 = outImgInit(rect1);
    //outImg.copyTo(temp);

    cv::line(tmp2, cv::Point(0, -epilines1[i][2] / epilines1[i][1]),
        cv::Point(img1.cols, -(epilines1[i][2] + epilines1[i][0] * img1.cols) / epilines1[i][1]), cv::Scalar(0, 255, 0));
    cv::circle(tmp2, points1[i], 3, cv::Scalar(255, 0, 0), -1, CV_AA);

    cv::line(tmp1, cv::Point(0, -epilines2[i][2] / epilines2[i][1]),
        cv::Point(img2.cols, -(epilines2[i][2] + epilines2[i][0] * img2.cols) / epilines2[i][1]), cv::Scalar(0, 255, 0));
    cv::circle(tmp1, points2[i], 3, cv::Scalar(255, 0, 0), -1, CV_AA);

    cv::imshow(title, outImgInit);
    cv::waitKey(0);
  }
}
예제 #4
0
void testSkinRecognition(const BayesianClassifier &classifier, ImageType &image, ImageType &ref, std::string out, bool YCbCr){

    int height, width, levels;
    image.getImageInfo(height,width,levels);
    ImageType outImg(height,width,levels);

    RGB val1, val2;
    int label;
    std::vector<double> color(2);
    int TP = 0, TN = 0, FN = 0, FP = 0;
    RGB white(255,255,255);
    RGB black(0,0,0);

    for(int row = 0; row < height; row++){
      for(int col = 0; col < width; col++){
        image.getPixelVal(row, col, val1);
        ref.getPixelVal(row, col, val2);

        if(YCbCr == true){
          color[0] = -0.169*val1.r - 0.332*val1.g+ 0.500*val1.b;
          color[1] = 0.500*val1.r - 0.419*val1.g - 0.081*val1.b;
        }
        else{
          color[0] = val1.r/float(val1.r+val1.g+val1.b);
          color[1] = val1.g/float(val1.r+val1.g+val1.b);
        }

        label = classifier.predict(color);

        if(label == 0){
          outImg.setPixelVal(row, col, white);
          if(val2 != black){ TP++; } else{ FP++; }
        }
        else{
          outImg.setPixelVal(row, col, black);
          if(val2 == black){ TN++; } else{ FN++; }
        }
      }
    }  // end outer for loop

    std::cout << std::endl
              << "TP: " << TP << std::endl
              << "TN: " << TN << std::endl
              << "FP: " << FP << std::endl
              << "FN: " << FN << std::endl;

    /*std::stringstream ss;
    ss << FP << " " << FN;
    Debugger debugger("Data_Prog2/errors3a.txt",true);
    debugger.debug(ss.str());
    */

    writeImage(out.c_str(), outImg);
}
예제 #5
0
cv::Mat ImageGenerator::convertToMat(const CharImage &img) const {
  cv::Mat outImg(img.width, img.height, CV_8UC1);

  for (int y = 0; y < outImg.rows; y++) {
    for (int x = 0; x < outImg.cols; x++) {
      Vec1b &v = outImg.at<Vec1b>(y, x);
      v[0] = static_cast<unsigned char>(img.pixels[x + y * img.width] * 255);
    }
  }

  return outImg;
}
예제 #6
0
Mat inPutimgs(int _N, int _M, int _rows, int _cols)
{
    char pathNames[100]; // 保存读入图像的路径
    Mat imagergb(_rows, _cols, CV_8U, Scalar(0, 0, 0)); // 读入的RGB图
    Mat imagegray(_rows, _cols, CV_8U, Scalar(0)); // 读入的RGB图转换为灰度图
    Mat imagegrayt(_rows, _cols, CV_8U, Scalar(0)); // 图像反转,是为了读入数据是与matlab保持一致,可删去
    Mat imagegray_f(_rows, _cols, CV_32F, Scalar(0.0)); // uchar图保存为float图,保持计算精度
    Mat outImg(_N * _M, _rows * _cols, CV_32F, Scalar(0.0)); // 输出的图像矩阵
    Mat imgROI; // 输出图像的感兴趣区域

    for (int i = 1; i <= _N; i++)
    {
        for (int j = 1; j <= _M; j++)
        {
            sprintf_s(pathNames, "D:\\Document\\vidpic\\人脸图像\\训练\\orl_%d_%d.bmp", i, j);
            imagergb = imread(pathNames); // 读入rgb图像
            if(!imagergb.data)
            {
                cout << "未找到指定的图像!" << endl;
                Sleep(2000);
                exit(0);
            }
            cvtColor(imagergb, imagegray, CV_BGR2GRAY); //rgb2gray
            imagegrayt = imagegray.t(); // 图像反转,是为了读入数据是与matlab保持一致,可删去
            imagegrayt.convertTo(imagegray_f, CV_32F, 1.0, 0.0); // uchar to float
            normalize(imagegray_f, imagegray_f, 1.0, 0.0, NORM_INF); // 用最大值归一化

            //将每一幅图像拉成一行,依次存储在outImg中
            imgROI = outImg(Rect(0, (i - 1) * _M + j - 1, _rows * _cols, 1)); // 选择感兴趣区域
            imagegray_f.reshape(1, 1).copyTo(imgROI); // 图像拉成一行,并保存在感兴趣区域

            //cout << i << "_" << j << endl;
            //namedWindow("test");
            //imshow("test",imagegray);
            //waitKey(10);
        }
    }
    return outImg;
}
static void drawEpipolarLines(const std::string& title, const cv::Matx<T1, 3, 3> F, const cv::Mat& img1, const cv::Mat& img2,
    const std::vector<cv::Point_<T2> > points1, const std::vector<cv::Point_<T2> > points2, const float inlierDistance = -1) {

  CV_Assert(img1.size() == img2.size() && img1.type() == img2.type());
  cv::Mat outImg(img1.rows, img1.cols * 2, CV_8UC3);
  cv::Rect rect1(0, 0, img1.cols, img1.rows);
  cv::Rect rect2(img1.cols, 0, img1.cols, img1.rows);
  /*
   * Allow color drawing
   */
  if (img1.type() == CV_8U) {
    cv::cvtColor(img1, outImg(rect1), CV_GRAY2BGR);
    cv::cvtColor(img2, outImg(rect2), CV_GRAY2BGR);
  } else {
    img1.copyTo(outImg(rect1));
    img2.copyTo(outImg(rect2));
  }
  std::vector<cv::Vec<T2, 3> > epilines1, epilines2;
  cv::computeCorrespondEpilines(points1, 1, F, epilines1); //Index starts with 1
  cv::computeCorrespondEpilines(points2, 2, F, epilines2);

  CV_Assert(points1.size() == points2.size() && points2.size() == epilines1.size() && epilines1.size() == epilines2.size());

  cv::RNG rng(0);
  for (size_t i = 0; i < points1.size(); i++) {
    if (inlierDistance > 0) {
      if (distancePointLine(points1[i], epilines2[i]) > inlierDistance || distancePointLine(points2[i], epilines1[i]) > inlierDistance) {
        //The point match is no inlier
        continue;
      }
    }
    /*
     * Epipolar lines of the 1st point set are drawn in the 2nd image and vice-versa
     */
    cv::Scalar color(rng(256), rng(256), rng(256));
    cv::Mat temp = outImg.clone();
    cv::Mat tmp2 = outImg(rect2);
    cv::Mat tmp1 = outImg(rect1);
    //outImg.copyTo(temp);

    cv::line(tmp2, cv::Point(0, -epilines1[i][2] / epilines1[i][1]),
        cv::Point(img1.cols, -(epilines1[i][2] + epilines1[i][0] * img1.cols) / epilines1[i][1]), color);
    cv::circle(tmp1, points1[i], 3, color, -1, CV_AA);

    cv::line(tmp1, cv::Point(0, -epilines2[i][2] / epilines2[i][1]),
        cv::Point(img2.cols, -(epilines2[i][2] + epilines2[i][0] * img2.cols) / epilines2[i][1]), color);
    cv::circle(tmp2, points2[i], 3, color, -1, CV_AA);
  }
  cv::imshow(title, outImg);
  cv::waitKey(1);
}
static void myDrawEpipolarLines(const std::string& title, const cv::Matx<T1, 3, 3> F, const cv::Mat& img1, const cv::Mat& img2,
    const std::vector<cv::Point_<T2> > points1, const std::vector<cv::Point_<T2> > points2) {

  cv::Mat outImg(img1.rows, img1.cols, CV_8UC3);

  cv::Mat outImgInit(img1.rows, img1.cols, CV_8UC3);

  if (img1.type() == CV_8U) {
    cv::cvtColor(img1, outImg, CV_GRAY2BGR);
  } else {
    img1.copyTo(outImg);
  }

  outImg.copyTo(outImgInit);
  std::vector<cv::Vec<T2, 3> > epilines;
  cv::computeCorrespondEpilines(points1, 1, F, epilines);

  for (size_t i = 0; i < points1.size(); i++) {

    cv::Mat outImg2(img1.rows, img1.cols, CV_8UC3);
    outImgInit.copyTo(outImg2);

    cv::line(outImg2, cv::Point(0, -epilines[i][2] / epilines[i][1]),
        cv::Point(img1.cols, -(epilines[i][2] + epilines[i][0] * img1.cols) / epilines[i][1]), cv::Scalar(255, 0, 0));
    cv::circle(outImg2, points2[i], 3, cv::Scalar(0, 255, 255), 2);
    cv::line(outImg, cv::Point(0, -epilines[i][2] / epilines[i][1]),
        cv::Point(img1.cols, -(epilines[i][2] + epilines[i][0] * img1.cols) / epilines[i][1]), cv::Scalar(255, 0, 0));
    cv::circle(outImg, points2[i], 3, cv::Scalar(0, 255, 255), 2);

    std::cout << "Current Distance to point" << points2[i] << ": " << distancePointLine(points2[i], epilines[i]) << std::endl;
    cv::imshow(title, outImg2);
    cv::waitKey(0);
  }
  cv::imshow(title, outImg);
  cv::waitKey(0);
}
예제 #9
0
  void imageCallback(
      const sensor_msgs::ImageConstPtr& l_image_msg,
      const sensor_msgs::ImageConstPtr& r_image_msg,
      const sensor_msgs::CameraInfoConstPtr& l_info_msg,
      const sensor_msgs::CameraInfoConstPtr& r_info_msg)
  {
 
    bool first_run = false;
    // create odometer if not exists
    if (!visual_odometer_)
    {
      first_run = true;
#if(DEBUG)
      cv_bridge::CvImageConstPtr l_cv_ptr, r_cv_ptr;
      l_cv_ptr = cv_bridge::toCvShare(l_image_msg, sensor_msgs::image_encodings::MONO8);
      r_cv_ptr = cv_bridge::toCvShare(r_image_msg, sensor_msgs::image_encodings::MONO8);
      last_l_image_ = l_cv_ptr->image;
      last_r_image_ = r_cv_ptr->image;
#endif
      initOdometer(l_info_msg, r_info_msg);
    }

    // convert images if necessary
    uint8_t *l_image_data, *r_image_data;
    int l_step, r_step;
    cv_bridge::CvImageConstPtr l_cv_ptr, r_cv_ptr;
    l_cv_ptr = cv_bridge::toCvShare(l_image_msg, sensor_msgs::image_encodings::MONO8);
    l_image_data = l_cv_ptr->image.data;
    l_step = l_cv_ptr->image.step[0];
    r_cv_ptr = cv_bridge::toCvShare(r_image_msg, sensor_msgs::image_encodings::MONO8);
    r_image_data = r_cv_ptr->image.data;
    r_step = r_cv_ptr->image.step[0];

    ROS_ASSERT(l_step == r_step);
    ROS_ASSERT(l_image_msg->width == r_image_msg->width);
    ROS_ASSERT(l_image_msg->height == r_image_msg->height);

    int32_t dims[] = {l_image_msg->width, l_image_msg->height, l_step};
    // on first run or when odometer got lost, only feed the odometer with 
    // images without retrieving data
    if (first_run || got_lost_)
    {
      visual_odometer_->process(l_image_data, r_image_data, dims);
      got_lost_ = false;
    }
    else
    {
      bool success = visual_odometer_->process(
          l_image_data, r_image_data, dims, last_motion_small_);
      if (success)
      {
        Matrix motion = Matrix::inv(visual_odometer_->getMotion());
        ROS_DEBUG("Found %i matches with %i inliers.", 
                  visual_odometer_->getNumberOfMatches(),
                  visual_odometer_->getNumberOfInliers());
        ROS_DEBUG_STREAM("libviso2 returned the following motion:\n" << motion);
        Matrix camera_motion;
        // if image was replaced due to small motion we have to subtract the 
        // last motion to get the increment
        if (last_motion_small_)
        {
          camera_motion = Matrix::inv(reference_motion_) * motion;
        }
        else
        {
          // image was not replaced, report full motion from odometer
          camera_motion = motion;
        }
        reference_motion_ = motion; // store last motion as reference
        std::cout<< camera_motion << "\n" <<std::endl;
#if (USE_MOVEMENT_CONSTRAIN)
        double deltaRoll = atan2(camera_motion.val[2][1], camera_motion.val[2][2]);
        double deltaPitch = asin(-camera_motion.val[2][0]);
        double deltaYaw = atan2(camera_motion.val[1][0], camera_motion.val[0][0]);
        double tanRoll = camera_motion.val[2][1] / camera_motion.val[2][2];
        double tanPitch = tan(deltaPitch);
        printf("deltaroll is %lf\n", deltaRoll);
        printf("deltaPitch is %lf\n", deltaPitch);
        printf("deltaYaw is %lf\n", deltaYaw);
        double deltaX = camera_motion.val[0][3];
        double deltaY = camera_motion.val[1][3];
        double deltaZ = camera_motion.val[2][3];
        printf("dx %lf, dy %lf, dz %lf, tanRoll %lf tanPitch %lf\n",deltaX, deltaY, deltaZ, tanRoll, tanPitch);
        if(deltaY > 0 && deltaY > tanRoll * deltaZ)
        {
           camera_motion.val[1][3] = tanRoll * deltaZ;
          //printf("dy %lf deltaY, dynew %lf\n", deltaY,camera_motion.val[2][3]);
        }
        else if(deltaY < 0 && deltaY < -tanRoll * deltaZ)
        {
           camera_motion.val[1][3] = -tanRoll * deltaZ;
          //printf("dy %lf deltaY, dynew %lf\n", deltaY,camera_motion.val[2][3]);
        }

        /*if(deltaX > 0 && deltaX > tanPitch * deltaZ)
        {
           camera_motion.val[0][3] = tanPitch * deltaZ;
          printf("dx %lf, dxnew %lf\n", deltaX,camera_motion.val[1][3]);
        }
        else if(deltaX < 0 && deltaX < -tanPitch * deltaZ)
        {
           camera_motion.val[0][3] = -tanPitch * deltaZ;
          printf("dx %lf, dxnew %lf\n", deltaX,camera_motion.val[1][3]);
        }*/
        /*
        if(deltaPitch > 0)
        {
          deltaPitch = deltaPitch+fabs(deltaRoll)+fabs(deltaYaw);
        }
        else
        {
          deltaPitch = deltaPitch-fabs(deltaRoll)-fabs(deltaYaw);
        }*/
        deltaPitch = deltaPitch+deltaYaw;
#endif
        // calculate current feature flow
        std::vector<Matcher::p_match> matches = visual_odometer_->getMatches();
        std::vector<int> inlier_indices = visual_odometer_->getInlierIndices();
#if(DEBUG)
        cv::Mat img1 = l_cv_ptr->image;
        cv::Mat img2 = r_cv_ptr->image;
        cv::Size size(last_l_image_.cols, last_l_image_.rows+last_r_image_.rows);
        cv::Mat outImg(size,CV_MAKETYPE(img1.depth(), 3));
        cv::Mat outImg1 = outImg( cv::Rect(0, 0, last_l_image_.cols, last_l_image_.rows) );
        cv::Mat outImg2 = outImg( cv::Rect(0, last_l_image_.rows, img1.cols, img1.rows) );
        
        if( last_l_image_.type() == CV_8U )
          cvtColor( last_l_image_, outImg1, CV_GRAY2BGR );
        else
          last_l_image_.copyTo( outImg1 );
        
        if( img1.type() == CV_8U )
          cvtColor( img1, outImg2, CV_GRAY2BGR );
        else
          img1.copyTo( outImg2 );
        for (size_t i = 0; i < matches.size(); ++i)
        {
          cv::Point pt1(matches[i].u1p,matches[i].v1p);
          cv::Point pt2(matches[i].u1c,matches[i].v1c + last_l_image_.rows);
          if(pt1.y > 239)
            cv::line(outImg, pt1, pt2, cv::Scalar(255,0,0));
          //else
            //cv::line(outImg, pt1, pt2, cv::Scalar(0,255,0));
        }
        cv::imshow("matching image", outImg);
    cv::waitKey(10);
        



        last_l_image_ = img1;
        last_r_image_ = img2;
#endif
        double feature_flow = computeFeatureFlow(matches);
        last_motion_small_ = (feature_flow < motion_threshold_);
        ROS_DEBUG_STREAM("Feature flow is " << feature_flow 
            << ", marking last motion as " 
            << (last_motion_small_ ? "small." : "normal."));

        btMatrix3x3 rot_mat(
          cos(deltaPitch), 0, sin(deltaPitch),
          0,               1,           0, 
          -sin(deltaPitch), 0, cos(deltaPitch));
        /*btMatrix3x3 rot_mat(
          camera_motion.val[0][0], camera_motion.val[0][1], camera_motion.val[0][2],
          camera_motion.val[1][0], camera_motion.val[1][1], camera_motion.val[1][2],
          camera_motion.val[2][0], camera_motion.val[2][1], camera_motion.val[2][2]);*/
        btVector3 t(camera_motion.val[0][3], camera_motion.val[1][3], camera_motion.val[2][3]);
   //rotation
   /*double delta_yaw = 0;
   double delta_pitch = 0;
   double delta_roll = 0;
   delta_yaw = delta_yaw*M_PI/180.0;
   delta_pitch = delta_pitch*M_PI/180.0;
   delta_roll = delta_roll*M_PI/180.0;
   //btMatrix3x3 initialTrans;
    Eigen::Matrix4f initialTrans = Eigen::Matrix4f::Identity();
   initialTrans(0,0) = cos(delta_pitch)*cos(delta_yaw); 
   initialTrans(0,1) = -cos(delta_roll)*sin(delta_yaw) + sin(delta_roll)*sin(delta_pitch)*cos(delta_yaw);
   initialTrans(0,2) = sin(delta_roll)*sin(delta_yaw) + cos(delta_roll)*sin(delta_pitch)*cos(delta_yaw);
   initialTrans(1,0) = cos(delta_pitch)*sin(delta_yaw);
   initialTrans(1,1) = cos(delta_roll)*cos(delta_yaw) + sin(delta_roll)*sin(delta_pitch)*sin(delta_yaw);
   initialTrans(1,2) = -sin(delta_roll)*cos(delta_yaw) + cos(delta_roll)*sin(delta_pitch)*sin(delta_yaw);
   initialTrans(2,0) = -sin(delta_pitch);
   initialTrans(2,1) = sin(delta_roll)*cos(delta_pitch);
   initialTrans(2,2) = cos(delta_roll)*cos(delta_pitch);
        btMatrix3x3 rot_mat(
          initialTrans(0,0), initialTrans(0,1), initialTrans(0,2),
          initialTrans(1,0), initialTrans(1,1), initialTrans(1,2),
          initialTrans(2,0), initialTrans(2,1), initialTrans(2,2));
        btVector3 t(0.0, 0.00, 0.01);*/
        tf::Transform delta_transform(rot_mat, t);

        setPoseCovariance(STANDARD_POSE_COVARIANCE);
        setTwistCovariance(STANDARD_TWIST_COVARIANCE);

        integrateAndPublish(delta_transform, l_image_msg->header.stamp);

        if (point_cloud_pub_.getNumSubscribers() > 0)
        {
          computeAndPublishPointCloud(l_info_msg, l_image_msg, r_info_msg, matches, inlier_indices);
        }
      }
      else
      {
        setPoseCovariance(BAD_COVARIANCE);
        setTwistCovariance(BAD_COVARIANCE);
        tf::Transform delta_transform;
        delta_transform.setIdentity();
        integrateAndPublish(delta_transform, l_image_msg->header.stamp);

        ROS_DEBUG("Call to VisualOdometryStereo::process() failed.");
        ROS_WARN_THROTTLE(1.0, "Visual Odometer got lost!");
        got_lost_ = true;
      }
    }
  }