예제 #1
0
  void imageCallback(
      const sensor_msgs::ImageConstPtr& l_image_msg,
      const sensor_msgs::ImageConstPtr& r_image_msg,
      const sensor_msgs::CameraInfoConstPtr& l_info_msg,
      const sensor_msgs::CameraInfoConstPtr& r_info_msg)
  {
 
    bool first_run = false;
    // create odometer if not exists
    if (!visual_odometer_)
    {
      first_run = true;
#if(DEBUG)
      cv_bridge::CvImageConstPtr l_cv_ptr, r_cv_ptr;
      l_cv_ptr = cv_bridge::toCvShare(l_image_msg, sensor_msgs::image_encodings::MONO8);
      r_cv_ptr = cv_bridge::toCvShare(r_image_msg, sensor_msgs::image_encodings::MONO8);
      last_l_image_ = l_cv_ptr->image;
      last_r_image_ = r_cv_ptr->image;
#endif
      initOdometer(l_info_msg, r_info_msg);
    }

    // convert images if necessary
    uint8_t *l_image_data, *r_image_data;
    int l_step, r_step;
    cv_bridge::CvImageConstPtr l_cv_ptr, r_cv_ptr;
    l_cv_ptr = cv_bridge::toCvShare(l_image_msg, sensor_msgs::image_encodings::MONO8);
    l_image_data = l_cv_ptr->image.data;
    l_step = l_cv_ptr->image.step[0];
    r_cv_ptr = cv_bridge::toCvShare(r_image_msg, sensor_msgs::image_encodings::MONO8);
    r_image_data = r_cv_ptr->image.data;
    r_step = r_cv_ptr->image.step[0];

    ROS_ASSERT(l_step == r_step);
    ROS_ASSERT(l_image_msg->width == r_image_msg->width);
    ROS_ASSERT(l_image_msg->height == r_image_msg->height);

    int32_t dims[] = {l_image_msg->width, l_image_msg->height, l_step};
    // on first run or when odometer got lost, only feed the odometer with 
    // images without retrieving data
    if (first_run || got_lost_)
    {
      visual_odometer_->process(l_image_data, r_image_data, dims);
      got_lost_ = false;
    }
    else
    {
      bool success = visual_odometer_->process(
          l_image_data, r_image_data, dims, last_motion_small_);
      if (success)
      {
        Matrix motion = Matrix::inv(visual_odometer_->getMotion());
        ROS_DEBUG("Found %i matches with %i inliers.", 
                  visual_odometer_->getNumberOfMatches(),
                  visual_odometer_->getNumberOfInliers());
        ROS_DEBUG_STREAM("libviso2 returned the following motion:\n" << motion);
        Matrix camera_motion;
        // if image was replaced due to small motion we have to subtract the 
        // last motion to get the increment
        if (last_motion_small_)
        {
          camera_motion = Matrix::inv(reference_motion_) * motion;
        }
        else
        {
          // image was not replaced, report full motion from odometer
          camera_motion = motion;
        }
        reference_motion_ = motion; // store last motion as reference
        std::cout<< camera_motion << "\n" <<std::endl;
#if (USE_MOVEMENT_CONSTRAIN)
        double deltaRoll = atan2(camera_motion.val[2][1], camera_motion.val[2][2]);
        double deltaPitch = asin(-camera_motion.val[2][0]);
        double deltaYaw = atan2(camera_motion.val[1][0], camera_motion.val[0][0]);
        double tanRoll = camera_motion.val[2][1] / camera_motion.val[2][2];
        double tanPitch = tan(deltaPitch);
        printf("deltaroll is %lf\n", deltaRoll);
        printf("deltaPitch is %lf\n", deltaPitch);
        printf("deltaYaw is %lf\n", deltaYaw);
        double deltaX = camera_motion.val[0][3];
        double deltaY = camera_motion.val[1][3];
        double deltaZ = camera_motion.val[2][3];
        printf("dx %lf, dy %lf, dz %lf, tanRoll %lf tanPitch %lf\n",deltaX, deltaY, deltaZ, tanRoll, tanPitch);
        if(deltaY > 0 && deltaY > tanRoll * deltaZ)
        {
           camera_motion.val[1][3] = tanRoll * deltaZ;
          //printf("dy %lf deltaY, dynew %lf\n", deltaY,camera_motion.val[2][3]);
        }
        else if(deltaY < 0 && deltaY < -tanRoll * deltaZ)
        {
           camera_motion.val[1][3] = -tanRoll * deltaZ;
          //printf("dy %lf deltaY, dynew %lf\n", deltaY,camera_motion.val[2][3]);
        }

        /*if(deltaX > 0 && deltaX > tanPitch * deltaZ)
        {
           camera_motion.val[0][3] = tanPitch * deltaZ;
          printf("dx %lf, dxnew %lf\n", deltaX,camera_motion.val[1][3]);
        }
        else if(deltaX < 0 && deltaX < -tanPitch * deltaZ)
        {
           camera_motion.val[0][3] = -tanPitch * deltaZ;
          printf("dx %lf, dxnew %lf\n", deltaX,camera_motion.val[1][3]);
        }*/
        /*
        if(deltaPitch > 0)
        {
          deltaPitch = deltaPitch+fabs(deltaRoll)+fabs(deltaYaw);
        }
        else
        {
          deltaPitch = deltaPitch-fabs(deltaRoll)-fabs(deltaYaw);
        }*/
        deltaPitch = deltaPitch+deltaYaw;
#endif
        // calculate current feature flow
        std::vector<Matcher::p_match> matches = visual_odometer_->getMatches();
        std::vector<int> inlier_indices = visual_odometer_->getInlierIndices();
#if(DEBUG)
        cv::Mat img1 = l_cv_ptr->image;
        cv::Mat img2 = r_cv_ptr->image;
        cv::Size size(last_l_image_.cols, last_l_image_.rows+last_r_image_.rows);
        cv::Mat outImg(size,CV_MAKETYPE(img1.depth(), 3));
        cv::Mat outImg1 = outImg( cv::Rect(0, 0, last_l_image_.cols, last_l_image_.rows) );
        cv::Mat outImg2 = outImg( cv::Rect(0, last_l_image_.rows, img1.cols, img1.rows) );
        
        if( last_l_image_.type() == CV_8U )
          cvtColor( last_l_image_, outImg1, CV_GRAY2BGR );
        else
          last_l_image_.copyTo( outImg1 );
        
        if( img1.type() == CV_8U )
          cvtColor( img1, outImg2, CV_GRAY2BGR );
        else
          img1.copyTo( outImg2 );
        for (size_t i = 0; i < matches.size(); ++i)
        {
          cv::Point pt1(matches[i].u1p,matches[i].v1p);
          cv::Point pt2(matches[i].u1c,matches[i].v1c + last_l_image_.rows);
          if(pt1.y > 239)
            cv::line(outImg, pt1, pt2, cv::Scalar(255,0,0));
          //else
            //cv::line(outImg, pt1, pt2, cv::Scalar(0,255,0));
        }
        cv::imshow("matching image", outImg);
    cv::waitKey(10);
        



        last_l_image_ = img1;
        last_r_image_ = img2;
#endif
        double feature_flow = computeFeatureFlow(matches);
        last_motion_small_ = (feature_flow < motion_threshold_);
        ROS_DEBUG_STREAM("Feature flow is " << feature_flow 
            << ", marking last motion as " 
            << (last_motion_small_ ? "small." : "normal."));

        btMatrix3x3 rot_mat(
          cos(deltaPitch), 0, sin(deltaPitch),
          0,               1,           0, 
          -sin(deltaPitch), 0, cos(deltaPitch));
        /*btMatrix3x3 rot_mat(
          camera_motion.val[0][0], camera_motion.val[0][1], camera_motion.val[0][2],
          camera_motion.val[1][0], camera_motion.val[1][1], camera_motion.val[1][2],
          camera_motion.val[2][0], camera_motion.val[2][1], camera_motion.val[2][2]);*/
        btVector3 t(camera_motion.val[0][3], camera_motion.val[1][3], camera_motion.val[2][3]);
   //rotation
   /*double delta_yaw = 0;
   double delta_pitch = 0;
   double delta_roll = 0;
   delta_yaw = delta_yaw*M_PI/180.0;
   delta_pitch = delta_pitch*M_PI/180.0;
   delta_roll = delta_roll*M_PI/180.0;
   //btMatrix3x3 initialTrans;
    Eigen::Matrix4f initialTrans = Eigen::Matrix4f::Identity();
   initialTrans(0,0) = cos(delta_pitch)*cos(delta_yaw); 
   initialTrans(0,1) = -cos(delta_roll)*sin(delta_yaw) + sin(delta_roll)*sin(delta_pitch)*cos(delta_yaw);
   initialTrans(0,2) = sin(delta_roll)*sin(delta_yaw) + cos(delta_roll)*sin(delta_pitch)*cos(delta_yaw);
   initialTrans(1,0) = cos(delta_pitch)*sin(delta_yaw);
   initialTrans(1,1) = cos(delta_roll)*cos(delta_yaw) + sin(delta_roll)*sin(delta_pitch)*sin(delta_yaw);
   initialTrans(1,2) = -sin(delta_roll)*cos(delta_yaw) + cos(delta_roll)*sin(delta_pitch)*sin(delta_yaw);
   initialTrans(2,0) = -sin(delta_pitch);
   initialTrans(2,1) = sin(delta_roll)*cos(delta_pitch);
   initialTrans(2,2) = cos(delta_roll)*cos(delta_pitch);
        btMatrix3x3 rot_mat(
          initialTrans(0,0), initialTrans(0,1), initialTrans(0,2),
          initialTrans(1,0), initialTrans(1,1), initialTrans(1,2),
          initialTrans(2,0), initialTrans(2,1), initialTrans(2,2));
        btVector3 t(0.0, 0.00, 0.01);*/
        tf::Transform delta_transform(rot_mat, t);

        setPoseCovariance(STANDARD_POSE_COVARIANCE);
        setTwistCovariance(STANDARD_TWIST_COVARIANCE);

        integrateAndPublish(delta_transform, l_image_msg->header.stamp);

        if (point_cloud_pub_.getNumSubscribers() > 0)
        {
          computeAndPublishPointCloud(l_info_msg, l_image_msg, r_info_msg, matches, inlier_indices);
        }
      }
      else
      {
        setPoseCovariance(BAD_COVARIANCE);
        setTwistCovariance(BAD_COVARIANCE);
        tf::Transform delta_transform;
        delta_transform.setIdentity();
        integrateAndPublish(delta_transform, l_image_msg->header.stamp);

        ROS_DEBUG("Call to VisualOdometryStereo::process() failed.");
        ROS_WARN_THROTTLE(1.0, "Visual Odometer got lost!");
        got_lost_ = true;
      }
    }
  }
예제 #2
0
  void imageCallback(
      const sensor_msgs::ImageConstPtr& l_image_msg,
      const sensor_msgs::ImageConstPtr& r_image_msg,
      const sensor_msgs::CameraInfoConstPtr& l_info_msg,
      const sensor_msgs::CameraInfoConstPtr& r_info_msg)
  {
    ros::Time start_time = ros::WallTime::now();
    bool first_run = false;
    // create odometer if not exists
    if (!visual_odometer_)
    {
      first_run = true;
      initOdometer(l_info_msg, r_info_msg);
    }

    // convert images if necessary
    uint8_t *l_image_data, *r_image_data;
    int l_step, r_step;
    cv_bridge::CvImageConstPtr l_cv_ptr, r_cv_ptr;
    l_cv_ptr = cv_bridge::toCvShare(l_image_msg, sensor_msgs::image_encodings::MONO8);
    l_image_data = l_cv_ptr->image.data;
    l_step = l_cv_ptr->image.step[0];
    r_cv_ptr = cv_bridge::toCvShare(r_image_msg, sensor_msgs::image_encodings::MONO8);
    r_image_data = r_cv_ptr->image.data;
    r_step = r_cv_ptr->image.step[0];

    ROS_ASSERT(l_step == r_step);
    ROS_ASSERT(l_image_msg->width == r_image_msg->width);
    ROS_ASSERT(l_image_msg->height == r_image_msg->height);

    int32_t dims[] = {l_image_msg->width, l_image_msg->height, l_step};
    // on first run or when odometer got lost, only feed the odometer with 
    // images without retrieving data
    if (first_run || got_lost_)
    {
      visual_odometer_->process(l_image_data, r_image_data, dims);
      got_lost_ = false;
      // on first run publish zero once
      if (first_run)
      {
        tf::Transform delta_transform;
        delta_transform.setIdentity();
        integrateAndPublish(delta_transform, l_image_msg->header.stamp);
      }
    }
    else
    {
      bool success = visual_odometer_->process(
          l_image_data, r_image_data, dims, last_motion_small_);
      if (success)
      {
        Matrix motion = Matrix::inv(visual_odometer_->getMotion());
        ROS_DEBUG("Found %i matches with %i inliers.", 
                  visual_odometer_->getNumberOfMatches(),
                  visual_odometer_->getNumberOfInliers());
        ROS_DEBUG_STREAM("libviso2 returned the following motion:\n" << motion);
        Matrix camera_motion;
        // if image was replaced due to small motion we have to subtract the 
        // last motion to get the increment
        if (last_motion_small_)
        {
          camera_motion = Matrix::inv(reference_motion_) * motion;
        }
        else
        {
          // image was not replaced, report full motion from odometer
          camera_motion = motion;
        }
        reference_motion_ = motion; // store last motion as reference

        // calculate current feature flow
        std::vector<Matcher::p_match> matches = visual_odometer_->getMatches();
        std::vector<int> inlier_indices = visual_odometer_->getInlierIndices();
        double feature_flow = computeFeatureFlow(matches);
        last_motion_small_ = (feature_flow < motion_threshold_);
        ROS_DEBUG_STREAM("Feature flow is " << feature_flow 
            << ", marking last motion as " 
            << (last_motion_small_ ? "small." : "normal."));

        tf::Matrix3x3 rot_mat(
          camera_motion.val[0][0], camera_motion.val[0][1], camera_motion.val[0][2],
          camera_motion.val[1][0], camera_motion.val[1][1], camera_motion.val[1][2],
          camera_motion.val[2][0], camera_motion.val[2][1], camera_motion.val[2][2]);
        tf::Vector3 t(camera_motion.val[0][3], camera_motion.val[1][3], camera_motion.val[2][3]);
        tf::Transform delta_transform(rot_mat, t);

        setPoseCovariance(STANDARD_POSE_COVARIANCE);
        setTwistCovariance(STANDARD_TWIST_COVARIANCE);

        integrateAndPublish(delta_transform, l_image_msg->header.stamp);

        if (point_cloud_pub_.getNumSubscribers() > 0)
        {
          computeAndPublishPointCloud(l_info_msg, l_image_msg, r_info_msg, matches, inlier_indices);
        }
      }
      else
      {
        setPoseCovariance(BAD_COVARIANCE);
        setTwistCovariance(BAD_COVARIANCE);
        tf::Transform delta_transform;
        delta_transform.setIdentity();
        integrateAndPublish(delta_transform, l_image_msg->header.stamp);

        ROS_DEBUG("Call to VisualOdometryStereo::process() failed.");
        ROS_WARN_THROTTLE(1.0, "Visual Odometer got lost!");
        got_lost_ = true;
      }

      {
        // create and publish viso2 info msg
        VisoInfo info_msg;
        info_msg.header.stamp = l_image_msg->header.stamp;
        info_msg.got_lost = !success;
        info_msg.num_matches = visual_odometer_->getNumberOfMatches();
        info_msg.num_inliers = visual_odometer_->getNumberOfInliers();
        ros::Duration time_elapsed = ros::WallTime::now() - start_time;
        info_msg.runtime = time_elapsed.toSec();
        info_pub_.publish(info_msg);
      }
    }
  }
예제 #3
0
  void imageCallback(
      const sensor_msgs::ImageConstPtr& image_msg,
      const sensor_msgs::CameraInfoConstPtr& info_msg)
  {
    ros::WallTime start_time = ros::WallTime::now();
 
    bool first_run = false;
    // create odometer if not exists
    if (!visual_odometer_)
    {
      first_run = true;
      // read calibration info from camera info message
      // to fill remaining odometer parameters
      image_geometry::PinholeCameraModel model;
      model.fromCameraInfo(info_msg);
      visual_odometer_params_.calib.f  = model.fx();
      visual_odometer_params_.calib.cu = model.cx();
      visual_odometer_params_.calib.cv = model.cy();
      visual_odometer_.reset(new VisualOdometryMono(visual_odometer_params_));
      if (image_msg->header.frame_id != "") setSensorFrameId(image_msg->header.frame_id);
      ROS_INFO_STREAM("Initialized libviso2 mono odometry "
                      "with the following parameters:" << std::endl << 
                      visual_odometer_params_);
    }

    // convert image if necessary
    uint8_t *image_data;
    int step;
    cv_bridge::CvImageConstPtr cv_ptr;
    if (image_msg->encoding == sensor_msgs::image_encodings::MONO8)
    {
      image_data = const_cast<uint8_t*>(&(image_msg->data[0]));
      step = image_msg->step;
    }
    else
    {
      cv_ptr = cv_bridge::toCvShare(image_msg, sensor_msgs::image_encodings::MONO8);
      image_data = cv_ptr->image.data;
      step = cv_ptr->image.step[0];
    }

    // run the odometer
    int32_t dims[] = {image_msg->width, image_msg->height, step};
    // on first run, only feed the odometer with first image pair without
    // retrieving data
    if (first_run)
    {
      visual_odometer_->process(image_data, dims);
      tf::Transform delta_transform;
      delta_transform.setIdentity();
      integrateAndPublish(delta_transform, image_msg->header.stamp);
    }
    else
    {
      bool success = visual_odometer_->process(image_data, dims);
      if(success)
      {
        replace_ = false;
        Matrix camera_motion = Matrix::inv(visual_odometer_->getMotion());
        ROS_DEBUG("Found %i matches with %i inliers.", 
                  visual_odometer_->getNumberOfMatches(),
                  visual_odometer_->getNumberOfInliers());
        ROS_DEBUG_STREAM("libviso2 returned the following motion:\n" << camera_motion);

        tf::Matrix3x3 rot_mat(
          camera_motion.val[0][0], camera_motion.val[0][1], camera_motion.val[0][2],
          camera_motion.val[1][0], camera_motion.val[1][1], camera_motion.val[1][2],
          camera_motion.val[2][0], camera_motion.val[2][1], camera_motion.val[2][2]);
        tf::Vector3 t(camera_motion.val[0][3], camera_motion.val[1][3], camera_motion.val[2][3]);
        tf::Transform delta_transform(rot_mat, t);

        integrateAndPublish(delta_transform, image_msg->header.stamp);
      }
      else
      {
        ROS_DEBUG("Call to VisualOdometryMono::process() failed. Assuming motion too small.");
        replace_ = true;
        tf::Transform delta_transform;
        delta_transform.setIdentity();
        integrateAndPublish(delta_transform, image_msg->header.stamp);
      }

      // create and publish viso2 info msg
      VisoInfo info_msg;
      info_msg.header.stamp = image_msg->header.stamp;
      info_msg.got_lost = !success;
      info_msg.change_reference_frame = false;
      info_msg.num_matches = visual_odometer_->getNumberOfMatches();
      info_msg.num_inliers = visual_odometer_->getNumberOfInliers();
      ros::WallDuration time_elapsed = ros::WallTime::now() - start_time;
      info_msg.runtime = time_elapsed.toSec();
      info_pub_.publish(info_msg);
    }
  }
예제 #4
0
파일: max_pool.hpp 프로젝트: okhat/ngraph
            void max_pool_backprop(const T* arg_forward,
                                   const T* delta,
                                   T* out,
                                   const Shape& delta_shape,
                                   const Shape& out_shape, // same as arg_forward_shape
                                   const Shape& window_shape,
                                   const Strides& window_movement_strides,
                                   const Shape& padding_below,
                                   const Shape& padding_above)
            {
                CoordinateTransform out_transform(out_shape);

                for (const Coordinate& out_coord : out_transform)
                {
                    out[out_transform.index(out_coord)] = 0;
                }

                CoordinateTransform delta_transform(delta_shape);

                for (const Coordinate& delta_coord : delta_transform)
                {
                    size_t img_index = delta_coord[0];
                    size_t channel = delta_coord[1];

                    size_t n_image_dimensions = out_shape.size() - 2;
                    Coordinate source_window_transform_start(2 + n_image_dimensions);
                    Coordinate source_window_transform_end(2 + n_image_dimensions);
                    Strides source_window_transform_source_strides(2 + n_image_dimensions, 1);
                    AxisVector source_window_transform_source_axis_order(2 + n_image_dimensions);
                    CoordinateDiff source_window_transform_padding_below(2 + n_image_dimensions);
                    CoordinateDiff source_window_transform_padding_above(2 + n_image_dimensions);

                    source_window_transform_start[0] = img_index;
                    source_window_transform_end[0] = img_index + 1;
                    source_window_transform_start[1] = channel;
                    source_window_transform_end[1] = channel + 1;
                    source_window_transform_padding_below[0] = 0;
                    source_window_transform_padding_below[1] = 0;
                    source_window_transform_padding_above[0] = 0;
                    source_window_transform_padding_above[1] = 0;

                    for (size_t i = 2; i < n_image_dimensions + 2; i++)
                    {
                        size_t window_shape_this_dim = window_shape[i - 2];
                        size_t movement_stride = window_movement_strides[i - 2];

                        source_window_transform_start[i] = movement_stride * delta_coord[i];
                        source_window_transform_end[i] =
                            source_window_transform_start[i] + window_shape_this_dim;
                        source_window_transform_padding_below[i] = padding_below[i - 2];
                        source_window_transform_padding_above[i] = padding_above[i - 2];
                    }
                    std::iota(begin(source_window_transform_source_axis_order),
                              end(source_window_transform_source_axis_order),
                              0);

                    CoordinateTransform source_window_transform(
                        out_shape,
                        source_window_transform_start,
                        source_window_transform_end,
                        source_window_transform_source_strides,
                        source_window_transform_source_axis_order,
                        source_window_transform_padding_below,
                        source_window_transform_padding_above);

                    Coordinate argmax_coord;
                    bool argmax_coord_valid = false;
                    T max_val = 0; // just initializing to keep compiler happy, this 0 is ignored

                    for (const Coordinate& source_window_coord : source_window_transform)
                    {
                        if (source_window_transform.has_source_coordinate(source_window_coord))
                        {
                            T candidate =
                                arg_forward[source_window_transform.index(source_window_coord)];

                            if (!argmax_coord_valid || candidate > max_val)
                            {
                                max_val = candidate;
                                argmax_coord = source_window_coord;
                                argmax_coord_valid = true;
                            }
                        }
                    }

                    if (argmax_coord_valid)
                    {
                        out[source_window_transform.index(argmax_coord)] +=
                            delta[delta_transform.index(delta_coord)];
                    }
                }
            }