void StereoProcessor::processDisparity(const cv::Mat& left_rect, const cv::Mat& right_rect, const image_geometry::StereoCameraModel& model, stereo_msgs::DisparityImage& disparity) const { // Fixed-point disparity is 16 times the true value: d = d_fp / 16.0 = x_l - x_r. static const int DPP = 16; // disparities per pixel static const double inv_dpp = 1.0 / DPP; // Block matcher produces 16-bit signed (fixed point) disparity image block_matcher_(left_rect, right_rect, disparity16_); // Fill in DisparityImage image data, converting to 32-bit float sensor_msgs::Image& dimage = disparity.image; dimage.height = disparity16_.rows; dimage.width = disparity16_.cols; dimage.encoding = sensor_msgs::image_encodings::TYPE_32FC1; dimage.step = dimage.width * sizeof(float); dimage.data.resize(dimage.step * dimage.height); cv::Mat_<float> dmat(dimage.height, dimage.width, (float*)&dimage.data[0], dimage.step); // We convert from fixed-point to float disparity and also adjust for any x-offset between // the principal points: d = d_fp*inv_dpp - (cx_l - cx_r) disparity16_.convertTo(dmat, dmat.type(), inv_dpp, -(model.left().cx() - model.right().cx())); ROS_ASSERT(dmat.data == &dimage.data[0]); /// @todo is_bigendian? :) // Stereo parameters disparity.f = model.right().fx(); disparity.T = model.baseline(); /// @todo Window of (potentially) valid disparities // Disparity search range disparity.min_disparity = getMinDisparity(); disparity.max_disparity = getMinDisparity() + getDisparityRange() - 1; disparity.delta_d = inv_dpp; }
void DisparityWideNodelet::imageCb(const ImageConstPtr& l_image_msg, const CameraInfoConstPtr& l_info_msg, const ImageConstPtr& r_image_msg, const CameraInfoConstPtr& r_info_msg) { /// @todo Convert (share) with new cv_bridge assert(l_image_msg->encoding == sensor_msgs::image_encodings::MONO8); assert(r_image_msg->encoding == sensor_msgs::image_encodings::MONO8); // Update the camera model model_.fromCameraInfo(l_info_msg, r_info_msg); // Allocate new disparity image message DisparityImagePtr disp_msg = boost::make_shared<DisparityImage>(); disp_msg->header = l_info_msg->header; disp_msg->image.header = l_info_msg->header; disp_msg->image.height = l_image_msg->height; disp_msg->image.width = l_image_msg->width; disp_msg->image.encoding = sensor_msgs::image_encodings::TYPE_32FC1; disp_msg->image.step = disp_msg->image.width * sizeof(float); disp_msg->image.data.resize(disp_msg->image.height * disp_msg->image.step); // Stereo parameters disp_msg->f = model_.right().fx(); disp_msg->T = model_.baseline(); // Compute window of (potentially) valid disparities cv::Ptr<CvStereoBMState> params = block_matcher_.state; int border = params->SADWindowSize / 2; int left = params->numberOfDisparities + params->minDisparity + border - 1; int wtf = (params->minDisparity >= 0) ? border + params->minDisparity : std::max(border, -params->minDisparity); int right = disp_msg->image.width - 1 - wtf; int top = border; int bottom = disp_msg->image.height - 1 - border; disp_msg->valid_window.x_offset = left; disp_msg->valid_window.y_offset = top; disp_msg->valid_window.width = right - left; disp_msg->valid_window.height = bottom - top; // Disparity search range disp_msg->min_disparity = params->minDisparity; disp_msg->max_disparity = params->minDisparity + params->numberOfDisparities - 1; disp_msg->delta_d = 1.0 / 16; // OpenCV uses 16 disparities per pixel // Create cv::Mat views onto all buffers const cv::Mat_<uint8_t> l_image(l_image_msg->height, l_image_msg->width, const_cast<uint8_t*>(&l_image_msg->data[0]), l_image_msg->step); const cv::Mat_<uint8_t> r_image(r_image_msg->height, r_image_msg->width, const_cast<uint8_t*>(&r_image_msg->data[0]), r_image_msg->step); cv::Mat_<float> disp_image(disp_msg->image.height, disp_msg->image.width, reinterpret_cast<float*>(&disp_msg->image.data[0]), disp_msg->image.step); // Perform block matching to find the disparities block_matcher_(l_image, r_image, disp_image, CV_32F); // Adjust for any x-offset between the principal points: d' = d - (cx_l - cx_r) double cx_l = model_.left().cx(); double cx_r = model_.right().cx(); if (cx_l != cx_r) cv::subtract(disp_image, cv::Scalar(cx_l - cx_r), disp_image); pub_disparity_.publish(disp_msg); }
void PointCloud2Nodelet::imageCb(const ImageConstPtr& l_image_msg, const CameraInfoConstPtr& l_info_msg, const CameraInfoConstPtr& r_info_msg, const DisparityImageConstPtr& disp_msg) { // Update the camera model model_.fromCameraInfo(l_info_msg, r_info_msg); // Calculate point cloud const Image& dimage = disp_msg->image; const cv::Mat_<float> dmat(dimage.height, dimage.width, (float*)&dimage.data[0], dimage.step); model_.projectDisparityImageTo3d(dmat, points_mat_, true); cv::Mat_<cv::Vec3f> mat = points_mat_; // Fill in new PointCloud2 message (2D image-like layout) PointCloud2Ptr points_msg = boost::make_shared<PointCloud2>(); points_msg->header = disp_msg->header; points_msg->height = mat.rows; points_msg->width = mat.cols; points_msg->fields.resize (4); points_msg->fields[0].name = "x"; points_msg->fields[0].offset = 0; points_msg->fields[0].count = 1; points_msg->fields[0].datatype = PointField::FLOAT32; points_msg->fields[1].name = "y"; points_msg->fields[1].offset = 4; points_msg->fields[1].count = 1; points_msg->fields[1].datatype = PointField::FLOAT32; points_msg->fields[2].name = "z"; points_msg->fields[2].offset = 8; points_msg->fields[2].count = 1; points_msg->fields[2].datatype = PointField::FLOAT32; points_msg->fields[3].name = "rgb"; points_msg->fields[3].offset = 12; points_msg->fields[3].count = 1; points_msg->fields[3].datatype = PointField::FLOAT32; //points_msg->is_bigendian = false; ??? static const int STEP = 16; points_msg->point_step = STEP; points_msg->row_step = points_msg->point_step * points_msg->width; points_msg->data.resize (points_msg->row_step * points_msg->height); points_msg->is_dense = false; // there may be invalid points float bad_point = std::numeric_limits<float>::quiet_NaN (); int offset = 0; for (int v = 0; v < mat.rows; ++v) { for (int u = 0; u < mat.cols; ++u, offset += STEP) { if (isValidPoint(mat(v,u))) { // x,y,z,rgba memcpy (&points_msg->data[offset + 0], &mat(v,u)[0], sizeof (float)); memcpy (&points_msg->data[offset + 4], &mat(v,u)[1], sizeof (float)); memcpy (&points_msg->data[offset + 8], &mat(v,u)[2], sizeof (float)); } else { memcpy (&points_msg->data[offset + 0], &bad_point, sizeof (float)); memcpy (&points_msg->data[offset + 4], &bad_point, sizeof (float)); memcpy (&points_msg->data[offset + 8], &bad_point, sizeof (float)); } } } // Fill in color namespace enc = sensor_msgs::image_encodings; const std::string& encoding = l_image_msg->encoding; offset = 0; if (encoding == enc::MONO8) { const cv::Mat_<uint8_t> color(l_image_msg->height, l_image_msg->width, (uint8_t*)&l_image_msg->data[0], l_image_msg->step); for (int v = 0; v < mat.rows; ++v) { for (int u = 0; u < mat.cols; ++u, offset += STEP) { if (isValidPoint(mat(v,u))) { uint8_t g = color(v,u); int32_t rgb = (g << 16) | (g << 8) | g; memcpy (&points_msg->data[offset + 12], &rgb, sizeof (int32_t)); } else { memcpy (&points_msg->data[offset + 12], &bad_point, sizeof (float)); } } } } else if (encoding == enc::RGB8) { const cv::Mat_<cv::Vec3b> color(l_image_msg->height, l_image_msg->width, (cv::Vec3b*)&l_image_msg->data[0], l_image_msg->step); for (int v = 0; v < mat.rows; ++v) { for (int u = 0; u < mat.cols; ++u, offset += STEP) { if (isValidPoint(mat(v,u))) { const cv::Vec3b& rgb = color(v,u); int32_t rgb_packed = (rgb[0] << 16) | (rgb[1] << 8) | rgb[2]; memcpy (&points_msg->data[offset + 12], &rgb_packed, sizeof (int32_t)); } else { memcpy (&points_msg->data[offset + 12], &bad_point, sizeof (float)); } } } } else if (encoding == enc::BGR8) { const cv::Mat_<cv::Vec3b> color(l_image_msg->height, l_image_msg->width, (cv::Vec3b*)&l_image_msg->data[0], l_image_msg->step); for (int v = 0; v < mat.rows; ++v) { for (int u = 0; u < mat.cols; ++u, offset += STEP) { if (isValidPoint(mat(v,u))) { const cv::Vec3b& bgr = color(v,u); int32_t rgb_packed = (bgr[2] << 16) | (bgr[1] << 8) | bgr[0]; memcpy (&points_msg->data[offset + 12], &rgb_packed, sizeof (int32_t)); } else { memcpy (&points_msg->data[offset + 12], &bad_point, sizeof (float)); } } } } else { NODELET_WARN_THROTTLE(30, "Could not fill color channel of the point cloud, " "unsupported encoding '%s'", encoding.c_str()); } pub_points2_.publish(points_msg); }
void StereoProcessor::processPoints(const stereo_msgs::DisparityImage& disparity, const cv::Mat& color, const std::string& encoding, const image_geometry::StereoCameraModel& model, sensor_msgs::PointCloud& points) const { // Calculate dense point cloud const sensor_msgs::Image& dimage = disparity.image; const cv::Mat_<float> dmat(dimage.height, dimage.width, (float*)&dimage.data[0], dimage.step); model.projectDisparityImageTo3d(dmat, dense_points_, true); // Fill in sparse point cloud message points.points.resize(0); points.channels.resize(3); points.channels[0].name = "rgb"; points.channels[0].values.resize(0); points.channels[1].name = "u"; points.channels[1].values.resize(0); points.channels[2].name = "v"; points.channels[2].values.resize(0); for (int32_t u = 0; u < dense_points_.rows; ++u) { for (int32_t v = 0; v < dense_points_.cols; ++v) { if (isValidPoint(dense_points_(u,v))) { // x,y,z geometry_msgs::Point32 pt; pt.x = dense_points_(u,v)[0]; pt.y = dense_points_(u,v)[1]; pt.z = dense_points_(u,v)[2]; points.points.push_back(pt); // u,v points.channels[1].values.push_back(u); points.channels[2].values.push_back(v); } } } // Fill in color namespace enc = sensor_msgs::image_encodings; points.channels[0].values.reserve(points.points.size()); if (encoding == enc::MONO8) { for (int32_t u = 0; u < dense_points_.rows; ++u) { for (int32_t v = 0; v < dense_points_.cols; ++v) { if (isValidPoint(dense_points_(u,v))) { uint8_t g = color.at<uint8_t>(u,v); int32_t rgb = (g << 16) | (g << 8) | g; points.channels[0].values.push_back(*(float*)(&rgb)); } } } } else if (encoding == enc::RGB8) { for (int32_t u = 0; u < dense_points_.rows; ++u) { for (int32_t v = 0; v < dense_points_.cols; ++v) { if (isValidPoint(dense_points_(u,v))) { const cv::Vec3b& rgb = color.at<cv::Vec3b>(u,v); int32_t rgb_packed = (rgb[0] << 16) | (rgb[1] << 8) | rgb[2]; points.channels[0].values.push_back(*(float*)(&rgb_packed)); } } } } else if (encoding == enc::BGR8) { for (int32_t u = 0; u < dense_points_.rows; ++u) { for (int32_t v = 0; v < dense_points_.cols; ++v) { if (isValidPoint(dense_points_(u,v))) { const cv::Vec3b& bgr = color.at<cv::Vec3b>(u,v); int32_t rgb_packed = (bgr[2] << 16) | (bgr[1] << 8) | bgr[0]; points.channels[0].values.push_back(*(float*)(&rgb_packed)); } } } } else { ROS_WARN("Could not fill color channel of the point cloud, unrecognized encoding '%s'", encoding.c_str()); } }
void StereoProcessor::processPoints2(const stereo_msgs::DisparityImage& disparity, const cv::Mat& color, const std::string& encoding, const image_geometry::StereoCameraModel& model, sensor_msgs::PointCloud2& points) const { // Calculate dense point cloud const sensor_msgs::Image& dimage = disparity.image; const cv::Mat_<float> dmat(dimage.height, dimage.width, (float*)&dimage.data[0], dimage.step); model.projectDisparityImageTo3d(dmat, dense_points_, true); // Fill in sparse point cloud message points.height = dense_points_.rows; points.width = dense_points_.cols; points.fields.resize (4); points.fields[0].name = "x"; points.fields[0].offset = 0; points.fields[0].count = 1; points.fields[0].datatype = sensor_msgs::PointField::FLOAT32; points.fields[1].name = "y"; points.fields[1].offset = 4; points.fields[1].count = 1; points.fields[1].datatype = sensor_msgs::PointField::FLOAT32; points.fields[2].name = "z"; points.fields[2].offset = 8; points.fields[2].count = 1; points.fields[2].datatype = sensor_msgs::PointField::FLOAT32; points.fields[3].name = "rgb"; points.fields[3].offset = 12; points.fields[3].count = 1; points.fields[3].datatype = sensor_msgs::PointField::FLOAT32; //points.is_bigendian = false; ??? points.point_step = 16; points.row_step = points.point_step * points.width; points.data.resize (points.row_step * points.height); points.is_dense = false; // there may be invalid points float bad_point = std::numeric_limits<float>::quiet_NaN (); int i = 0; for (int32_t u = 0; u < dense_points_.rows; ++u) { for (int32_t v = 0; v < dense_points_.cols; ++v, ++i) { if (isValidPoint(dense_points_(u,v))) { // x,y,z,rgba memcpy (&points.data[i * points.point_step + 0], &dense_points_(u,v)[0], sizeof (float)); memcpy (&points.data[i * points.point_step + 4], &dense_points_(u,v)[1], sizeof (float)); memcpy (&points.data[i * points.point_step + 8], &dense_points_(u,v)[2], sizeof (float)); } else { memcpy (&points.data[i * points.point_step + 0], &bad_point, sizeof (float)); memcpy (&points.data[i * points.point_step + 4], &bad_point, sizeof (float)); memcpy (&points.data[i * points.point_step + 8], &bad_point, sizeof (float)); } } } // Fill in color namespace enc = sensor_msgs::image_encodings; i = 0; if (encoding == enc::MONO8) { for (int32_t u = 0; u < dense_points_.rows; ++u) { for (int32_t v = 0; v < dense_points_.cols; ++v, ++i) { if (isValidPoint(dense_points_(u,v))) { uint8_t g = color.at<uint8_t>(u,v); int32_t rgb = (g << 16) | (g << 8) | g; memcpy (&points.data[i * points.point_step + 12], &rgb, sizeof (int32_t)); } else { memcpy (&points.data[i * points.point_step + 12], &bad_point, sizeof (float)); } } } } else if (encoding == enc::RGB8) { for (int32_t u = 0; u < dense_points_.rows; ++u) { for (int32_t v = 0; v < dense_points_.cols; ++v, ++i) { if (isValidPoint(dense_points_(u,v))) { const cv::Vec3b& rgb = color.at<cv::Vec3b>(u,v); int32_t rgb_packed = (rgb[0] << 16) | (rgb[1] << 8) | rgb[2]; memcpy (&points.data[i * points.point_step + 12], &rgb_packed, sizeof (int32_t)); } else { memcpy (&points.data[i * points.point_step + 12], &bad_point, sizeof (float)); } } } } else if (encoding == enc::BGR8) { for (int32_t u = 0; u < dense_points_.rows; ++u) { for (int32_t v = 0; v < dense_points_.cols; ++v, ++i) { if (isValidPoint(dense_points_(u,v))) { const cv::Vec3b& bgr = color.at<cv::Vec3b>(u,v); int32_t rgb_packed = (bgr[2] << 16) | (bgr[1] << 8) | bgr[0]; memcpy (&points.data[i * points.point_step + 12], &rgb_packed, sizeof (int32_t)); } else { memcpy (&points.data[i * points.point_step + 12], &bad_point, sizeof (float)); } } } } else { ROS_WARN("Could not fill color channel of the point cloud, unrecognized encoding '%s'", encoding.c_str()); } }
void imageCb(const sensor_msgs::ImageConstPtr& l_image, const sensor_msgs::CameraInfoConstPtr& l_cam_info, const sensor_msgs::ImageConstPtr& r_image, const sensor_msgs::CameraInfoConstPtr& r_cam_info) { ROS_INFO("In callback, seq = %u", l_cam_info->header.seq); // Convert ROS messages for use with OpenCV cv::Mat left, right; try { left = l_bridge_.imgMsgToCv(l_image, "mono8"); right = r_bridge_.imgMsgToCv(r_image, "mono8"); } catch (sensor_msgs::CvBridgeException& e) { ROS_ERROR("Conversion error: %s", e.what()); return; } cam_model_.fromCameraInfo(l_cam_info, r_cam_info); frame_common::CamParams cam_params; cam_params.fx = cam_model_.left().fx(); cam_params.fy = cam_model_.left().fy(); cam_params.cx = cam_model_.left().cx(); cam_params.cy = cam_model_.left().cy(); cam_params.tx = cam_model_.baseline(); if (vslam_system_.addFrame(cam_params, left, right)) { /// @todo Not rely on broken encapsulation of VslamSystem here int size = vslam_system_.sba_.nodes.size(); if (size % 2 == 0) { // publish markers sba::drawGraph(vslam_system_.sba_, cam_marker_pub_, point_marker_pub_); } // Publish VO tracks if (vo_tracks_pub_.getNumSubscribers() > 0) { frame_common::drawVOtracks(left, vslam_system_.vo_.frames, vo_display_); IplImage ipl = vo_display_; sensor_msgs::ImagePtr msg = sensor_msgs::CvBridge::cvToImgMsg(&ipl); msg->header = l_cam_info->header; vo_tracks_pub_.publish(msg, l_cam_info); } // Refine large-scale SBA. const int LARGE_SBA_INTERVAL = 10; if (size > 4 && size % LARGE_SBA_INTERVAL == 0) { ROS_INFO("Running large SBA on %d nodes", size); vslam_system_.refine(); } if (pointcloud_pub_.getNumSubscribers() > 0 && size % 2 == 0) publishRegisteredPointclouds(vslam_system_.sba_, vslam_system_.frames_, pointcloud_pub_); // Publish odometry data to tf. if (0) // TODO: Change this to parameter. { ros::Time stamp = l_cam_info->header.stamp; std::string image_frame = l_cam_info->header.frame_id; Eigen::Vector4d trans = -vslam_system_.sba_.nodes.back().trans; Eigen::Quaterniond rot = vslam_system_.sba_.nodes.back().qrot.conjugate(); trans.head<3>() = rot.toRotationMatrix()*trans.head<3>(); tf_transform_.setOrigin(tf::Vector3(trans(0), trans(1), trans(2))); tf_transform_.setRotation(tf::Quaternion(rot.x(), rot.y(), rot.z(), rot.w()) ); tf::Transform simple_transform; simple_transform.setOrigin(tf::Vector3(0, 0, 0)); simple_transform.setRotation(tf::Quaternion(.5, -.5, .5, .5)); tf_broadcast_.sendTransform(tf::StampedTransform(tf_transform_, stamp, image_frame, "visual_odom")); tf_broadcast_.sendTransform(tf::StampedTransform(simple_transform, stamp, "visual_odom", "pgraph")); // Publish odometry data on topic. if (odom_pub_.getNumSubscribers() > 0) { tf::StampedTransform base_to_image; tf::Transform base_to_visodom; try { tf_listener_.lookupTransform(image_frame, "/base_footprint", stamp, base_to_image); } catch (tf::TransformException ex) { ROS_WARN("%s",ex.what()); return; } base_to_visodom = tf_transform_.inverse() * base_to_image; geometry_msgs::PoseStamped pose; nav_msgs::Odometry odom; pose.header.frame_id = "/visual_odom"; pose.pose.position.x = base_to_visodom.getOrigin().x(); pose.pose.position.y = base_to_visodom.getOrigin().y(); pose.pose.position.z = base_to_visodom.getOrigin().z(); pose.pose.orientation.x = base_to_visodom.getRotation().x(); pose.pose.orientation.y = base_to_visodom.getRotation().y(); pose.pose.orientation.z = base_to_visodom.getRotation().z(); pose.pose.orientation.w = base_to_visodom.getRotation().w(); odom.header.stamp = stamp; odom.header.frame_id = "/visual_odom"; odom.child_frame_id = "/base_footprint"; odom.pose.pose = pose.pose; /* odom.pose.covariance[0] = 1; odom.pose.covariance[7] = 1; odom.pose.covariance[14] = 1; odom.pose.covariance[21] = 1; odom.pose.covariance[28] = 1; odom.pose.covariance[35] = 1; */ odom_pub_.publish(odom); } } } }