void imageCallback( const sensor_msgs::ImageConstPtr& l_image_msg, const sensor_msgs::ImageConstPtr& r_image_msg, const sensor_msgs::CameraInfoConstPtr& l_info_msg, const sensor_msgs::CameraInfoConstPtr& r_info_msg) { bool first_run = false; // create odometer if not exists if (!visual_odometer_) { first_run = true; #if(DEBUG) cv_bridge::CvImageConstPtr l_cv_ptr, r_cv_ptr; l_cv_ptr = cv_bridge::toCvShare(l_image_msg, sensor_msgs::image_encodings::MONO8); r_cv_ptr = cv_bridge::toCvShare(r_image_msg, sensor_msgs::image_encodings::MONO8); last_l_image_ = l_cv_ptr->image; last_r_image_ = r_cv_ptr->image; #endif initOdometer(l_info_msg, r_info_msg); } // convert images if necessary uint8_t *l_image_data, *r_image_data; int l_step, r_step; cv_bridge::CvImageConstPtr l_cv_ptr, r_cv_ptr; l_cv_ptr = cv_bridge::toCvShare(l_image_msg, sensor_msgs::image_encodings::MONO8); l_image_data = l_cv_ptr->image.data; l_step = l_cv_ptr->image.step[0]; r_cv_ptr = cv_bridge::toCvShare(r_image_msg, sensor_msgs::image_encodings::MONO8); r_image_data = r_cv_ptr->image.data; r_step = r_cv_ptr->image.step[0]; ROS_ASSERT(l_step == r_step); ROS_ASSERT(l_image_msg->width == r_image_msg->width); ROS_ASSERT(l_image_msg->height == r_image_msg->height); int32_t dims[] = {l_image_msg->width, l_image_msg->height, l_step}; // on first run or when odometer got lost, only feed the odometer with // images without retrieving data if (first_run || got_lost_) { visual_odometer_->process(l_image_data, r_image_data, dims); got_lost_ = false; } else { bool success = visual_odometer_->process( l_image_data, r_image_data, dims, last_motion_small_); if (success) { Matrix motion = Matrix::inv(visual_odometer_->getMotion()); ROS_DEBUG("Found %i matches with %i inliers.", visual_odometer_->getNumberOfMatches(), visual_odometer_->getNumberOfInliers()); ROS_DEBUG_STREAM("libviso2 returned the following motion:\n" << motion); Matrix camera_motion; // if image was replaced due to small motion we have to subtract the // last motion to get the increment if (last_motion_small_) { camera_motion = Matrix::inv(reference_motion_) * motion; } else { // image was not replaced, report full motion from odometer camera_motion = motion; } reference_motion_ = motion; // store last motion as reference std::cout<< camera_motion << "\n" <<std::endl; #if (USE_MOVEMENT_CONSTRAIN) double deltaRoll = atan2(camera_motion.val[2][1], camera_motion.val[2][2]); double deltaPitch = asin(-camera_motion.val[2][0]); double deltaYaw = atan2(camera_motion.val[1][0], camera_motion.val[0][0]); double tanRoll = camera_motion.val[2][1] / camera_motion.val[2][2]; double tanPitch = tan(deltaPitch); printf("deltaroll is %lf\n", deltaRoll); printf("deltaPitch is %lf\n", deltaPitch); printf("deltaYaw is %lf\n", deltaYaw); double deltaX = camera_motion.val[0][3]; double deltaY = camera_motion.val[1][3]; double deltaZ = camera_motion.val[2][3]; printf("dx %lf, dy %lf, dz %lf, tanRoll %lf tanPitch %lf\n",deltaX, deltaY, deltaZ, tanRoll, tanPitch); if(deltaY > 0 && deltaY > tanRoll * deltaZ) { camera_motion.val[1][3] = tanRoll * deltaZ; //printf("dy %lf deltaY, dynew %lf\n", deltaY,camera_motion.val[2][3]); } else if(deltaY < 0 && deltaY < -tanRoll * deltaZ) { camera_motion.val[1][3] = -tanRoll * deltaZ; //printf("dy %lf deltaY, dynew %lf\n", deltaY,camera_motion.val[2][3]); } /*if(deltaX > 0 && deltaX > tanPitch * deltaZ) { camera_motion.val[0][3] = tanPitch * deltaZ; printf("dx %lf, dxnew %lf\n", deltaX,camera_motion.val[1][3]); } else if(deltaX < 0 && deltaX < -tanPitch * deltaZ) { camera_motion.val[0][3] = -tanPitch * deltaZ; printf("dx %lf, dxnew %lf\n", deltaX,camera_motion.val[1][3]); }*/ /* if(deltaPitch > 0) { deltaPitch = deltaPitch+fabs(deltaRoll)+fabs(deltaYaw); } else { deltaPitch = deltaPitch-fabs(deltaRoll)-fabs(deltaYaw); }*/ deltaPitch = deltaPitch+deltaYaw; #endif // calculate current feature flow std::vector<Matcher::p_match> matches = visual_odometer_->getMatches(); std::vector<int> inlier_indices = visual_odometer_->getInlierIndices(); #if(DEBUG) cv::Mat img1 = l_cv_ptr->image; cv::Mat img2 = r_cv_ptr->image; cv::Size size(last_l_image_.cols, last_l_image_.rows+last_r_image_.rows); cv::Mat outImg(size,CV_MAKETYPE(img1.depth(), 3)); cv::Mat outImg1 = outImg( cv::Rect(0, 0, last_l_image_.cols, last_l_image_.rows) ); cv::Mat outImg2 = outImg( cv::Rect(0, last_l_image_.rows, img1.cols, img1.rows) ); if( last_l_image_.type() == CV_8U ) cvtColor( last_l_image_, outImg1, CV_GRAY2BGR ); else last_l_image_.copyTo( outImg1 ); if( img1.type() == CV_8U ) cvtColor( img1, outImg2, CV_GRAY2BGR ); else img1.copyTo( outImg2 ); for (size_t i = 0; i < matches.size(); ++i) { cv::Point pt1(matches[i].u1p,matches[i].v1p); cv::Point pt2(matches[i].u1c,matches[i].v1c + last_l_image_.rows); if(pt1.y > 239) cv::line(outImg, pt1, pt2, cv::Scalar(255,0,0)); //else //cv::line(outImg, pt1, pt2, cv::Scalar(0,255,0)); } cv::imshow("matching image", outImg); cv::waitKey(10); last_l_image_ = img1; last_r_image_ = img2; #endif double feature_flow = computeFeatureFlow(matches); last_motion_small_ = (feature_flow < motion_threshold_); ROS_DEBUG_STREAM("Feature flow is " << feature_flow << ", marking last motion as " << (last_motion_small_ ? "small." : "normal.")); btMatrix3x3 rot_mat( cos(deltaPitch), 0, sin(deltaPitch), 0, 1, 0, -sin(deltaPitch), 0, cos(deltaPitch)); /*btMatrix3x3 rot_mat( camera_motion.val[0][0], camera_motion.val[0][1], camera_motion.val[0][2], camera_motion.val[1][0], camera_motion.val[1][1], camera_motion.val[1][2], camera_motion.val[2][0], camera_motion.val[2][1], camera_motion.val[2][2]);*/ btVector3 t(camera_motion.val[0][3], camera_motion.val[1][3], camera_motion.val[2][3]); //rotation /*double delta_yaw = 0; double delta_pitch = 0; double delta_roll = 0; delta_yaw = delta_yaw*M_PI/180.0; delta_pitch = delta_pitch*M_PI/180.0; delta_roll = delta_roll*M_PI/180.0; //btMatrix3x3 initialTrans; Eigen::Matrix4f initialTrans = Eigen::Matrix4f::Identity(); initialTrans(0,0) = cos(delta_pitch)*cos(delta_yaw); initialTrans(0,1) = -cos(delta_roll)*sin(delta_yaw) + sin(delta_roll)*sin(delta_pitch)*cos(delta_yaw); initialTrans(0,2) = sin(delta_roll)*sin(delta_yaw) + cos(delta_roll)*sin(delta_pitch)*cos(delta_yaw); initialTrans(1,0) = cos(delta_pitch)*sin(delta_yaw); initialTrans(1,1) = cos(delta_roll)*cos(delta_yaw) + sin(delta_roll)*sin(delta_pitch)*sin(delta_yaw); initialTrans(1,2) = -sin(delta_roll)*cos(delta_yaw) + cos(delta_roll)*sin(delta_pitch)*sin(delta_yaw); initialTrans(2,0) = -sin(delta_pitch); initialTrans(2,1) = sin(delta_roll)*cos(delta_pitch); initialTrans(2,2) = cos(delta_roll)*cos(delta_pitch); btMatrix3x3 rot_mat( initialTrans(0,0), initialTrans(0,1), initialTrans(0,2), initialTrans(1,0), initialTrans(1,1), initialTrans(1,2), initialTrans(2,0), initialTrans(2,1), initialTrans(2,2)); btVector3 t(0.0, 0.00, 0.01);*/ tf::Transform delta_transform(rot_mat, t); setPoseCovariance(STANDARD_POSE_COVARIANCE); setTwistCovariance(STANDARD_TWIST_COVARIANCE); integrateAndPublish(delta_transform, l_image_msg->header.stamp); if (point_cloud_pub_.getNumSubscribers() > 0) { computeAndPublishPointCloud(l_info_msg, l_image_msg, r_info_msg, matches, inlier_indices); } } else { setPoseCovariance(BAD_COVARIANCE); setTwistCovariance(BAD_COVARIANCE); tf::Transform delta_transform; delta_transform.setIdentity(); integrateAndPublish(delta_transform, l_image_msg->header.stamp); ROS_DEBUG("Call to VisualOdometryStereo::process() failed."); ROS_WARN_THROTTLE(1.0, "Visual Odometer got lost!"); got_lost_ = true; } } }
void imageCallback( const sensor_msgs::ImageConstPtr& l_image_msg, const sensor_msgs::ImageConstPtr& r_image_msg, const sensor_msgs::CameraInfoConstPtr& l_info_msg, const sensor_msgs::CameraInfoConstPtr& r_info_msg) { ros::Time start_time = ros::WallTime::now(); bool first_run = false; // create odometer if not exists if (!visual_odometer_) { first_run = true; initOdometer(l_info_msg, r_info_msg); } // convert images if necessary uint8_t *l_image_data, *r_image_data; int l_step, r_step; cv_bridge::CvImageConstPtr l_cv_ptr, r_cv_ptr; l_cv_ptr = cv_bridge::toCvShare(l_image_msg, sensor_msgs::image_encodings::MONO8); l_image_data = l_cv_ptr->image.data; l_step = l_cv_ptr->image.step[0]; r_cv_ptr = cv_bridge::toCvShare(r_image_msg, sensor_msgs::image_encodings::MONO8); r_image_data = r_cv_ptr->image.data; r_step = r_cv_ptr->image.step[0]; ROS_ASSERT(l_step == r_step); ROS_ASSERT(l_image_msg->width == r_image_msg->width); ROS_ASSERT(l_image_msg->height == r_image_msg->height); int32_t dims[] = {l_image_msg->width, l_image_msg->height, l_step}; // on first run or when odometer got lost, only feed the odometer with // images without retrieving data if (first_run || got_lost_) { visual_odometer_->process(l_image_data, r_image_data, dims); got_lost_ = false; // on first run publish zero once if (first_run) { tf::Transform delta_transform; delta_transform.setIdentity(); integrateAndPublish(delta_transform, l_image_msg->header.stamp); } } else { bool success = visual_odometer_->process( l_image_data, r_image_data, dims, last_motion_small_); if (success) { Matrix motion = Matrix::inv(visual_odometer_->getMotion()); ROS_DEBUG("Found %i matches with %i inliers.", visual_odometer_->getNumberOfMatches(), visual_odometer_->getNumberOfInliers()); ROS_DEBUG_STREAM("libviso2 returned the following motion:\n" << motion); Matrix camera_motion; // if image was replaced due to small motion we have to subtract the // last motion to get the increment if (last_motion_small_) { camera_motion = Matrix::inv(reference_motion_) * motion; } else { // image was not replaced, report full motion from odometer camera_motion = motion; } reference_motion_ = motion; // store last motion as reference // calculate current feature flow std::vector<Matcher::p_match> matches = visual_odometer_->getMatches(); std::vector<int> inlier_indices = visual_odometer_->getInlierIndices(); double feature_flow = computeFeatureFlow(matches); last_motion_small_ = (feature_flow < motion_threshold_); ROS_DEBUG_STREAM("Feature flow is " << feature_flow << ", marking last motion as " << (last_motion_small_ ? "small." : "normal.")); tf::Matrix3x3 rot_mat( camera_motion.val[0][0], camera_motion.val[0][1], camera_motion.val[0][2], camera_motion.val[1][0], camera_motion.val[1][1], camera_motion.val[1][2], camera_motion.val[2][0], camera_motion.val[2][1], camera_motion.val[2][2]); tf::Vector3 t(camera_motion.val[0][3], camera_motion.val[1][3], camera_motion.val[2][3]); tf::Transform delta_transform(rot_mat, t); setPoseCovariance(STANDARD_POSE_COVARIANCE); setTwistCovariance(STANDARD_TWIST_COVARIANCE); integrateAndPublish(delta_transform, l_image_msg->header.stamp); if (point_cloud_pub_.getNumSubscribers() > 0) { computeAndPublishPointCloud(l_info_msg, l_image_msg, r_info_msg, matches, inlier_indices); } } else { setPoseCovariance(BAD_COVARIANCE); setTwistCovariance(BAD_COVARIANCE); tf::Transform delta_transform; delta_transform.setIdentity(); integrateAndPublish(delta_transform, l_image_msg->header.stamp); ROS_DEBUG("Call to VisualOdometryStereo::process() failed."); ROS_WARN_THROTTLE(1.0, "Visual Odometer got lost!"); got_lost_ = true; } { // create and publish viso2 info msg VisoInfo info_msg; info_msg.header.stamp = l_image_msg->header.stamp; info_msg.got_lost = !success; info_msg.num_matches = visual_odometer_->getNumberOfMatches(); info_msg.num_inliers = visual_odometer_->getNumberOfInliers(); ros::Duration time_elapsed = ros::WallTime::now() - start_time; info_msg.runtime = time_elapsed.toSec(); info_pub_.publish(info_msg); } } }