void broadcastTransform(Eigen::Matrix4f trans) {
    tf::Transform newTF = tfFromEigen(trans);
//	  tf::Transform transform;
//	  transform.setOrigin( tf::Vector3(msg->x, msg->y, 0.0) );
//	  transform.setRotation( tf::Quaternion(msg->theta, 0, 0) );
    std::cout << "I'm broadcasting one tf at " << ros::Time::now();
    br->sendTransform(tf::StampedTransform(newTF, ros::Time::now(), "openni_camera", "my_new_shiny_tfBroadcaster"));
}
Example #2
0
  /* 
   * One and only one callback, now takes cloud, does everything else needed. 
   */
  void ARPublisher::getTransformationCallback (const sensor_msgs::PointCloud2ConstPtr & msg)
  {
    sensor_msgs::ImagePtr image_msg(new sensor_msgs::Image);
    ARUint8 *dataPtr;
    ARMarkerInfo *marker_info;
    int marker_num;
    int i, k, j;

    /* do we need to initialize? */
    if(!configured_)
    {
      if(msg->width == 0 || msg->height == 0)
      {
        ROS_ERROR ("Deformed cloud! Size = %d, %d.", msg->width, msg->height);
        return;
      }

      cam_param_.xsize = msg->width;
      cam_param_.ysize = msg->height;

      cam_param_.dist_factor[0] = msg->width/2;         // x0 = cX from openCV calibration
      cam_param_.dist_factor[1] = msg->height/2;        // y0 = cY from openCV calibration
      cam_param_.dist_factor[2] = 0;                    // f = -100*k1 from CV. Note, we had to do mm^2 to m^2, hence 10^8->10^2
      cam_param_.dist_factor[3] = 1.0;                  // scale factor, should probably be >1, but who cares...
      
      arInit ();
    }

    /* convert cloud to PCL */
    PointCloud cloud;
    pcl::fromROSMsg(*msg, cloud);
 
    /* get an OpenCV image from the cloud */
    pcl::toROSMsg (cloud, *image_msg);

    cv_bridge::CvImagePtr cv_ptr;
    try
    {
        cv_ptr = cv_bridge::toCvCopy(image_msg, sensor_msgs::image_encodings::BGR8);
    }
    catch (cv_bridge::Exception& e)
    {
      ROS_ERROR ("Could not convert from '%s' to 'bgr8'.", image_msg->encoding.c_str ());
    }
    dataPtr = (ARUint8 *) cv_ptr->image.ptr();

    /* detect the markers in the video frame */
    if (arDetectMarkerLite (dataPtr, threshold_, &marker_info, &marker_num) < 0)
    {
      argCleanup ();
      return;
    }
 
    arPoseMarkers_.markers.clear ();
    /* check for known patterns */
    for (i = 0; i < objectnum; i++)
    {
      k = -1;
      for (j = 0; j < marker_num; j++)
      {
        if (object[i].id == marker_info[j].id)
        {
          if (k == -1)
            k = j;
          else                  // make sure you have the best pattern (highest confidence factor)
          if (marker_info[k].cf < marker_info[j].cf)
            k = j;
        }
      }
      if (k == -1)
      {
        object[i].visible = 0;
        continue;
      }
      
      /* create a cloud for marker corners */
      int d = marker_info[k].dir;
      PointCloud marker;
      marker.push_back( cloud.at( (int)marker_info[k].vertex[(4-d)%4][0], (int)marker_info[k].vertex[(4-d)%4][1] ) ); // upper left
      marker.push_back( cloud.at( (int)marker_info[k].vertex[(5-d)%4][0], (int)marker_info[k].vertex[(5-d)%4][1] ) ); // upper right
      marker.push_back( cloud.at( (int)marker_info[k].vertex[(6-d)%4][0], (int)marker_info[k].vertex[(6-d)%4][1] ) ); // lower right
      marker.push_back( cloud.at( (int)marker_info[k].vertex[(7-d)%4][0], (int)marker_info[k].vertex[(7-d)%4][1] ) );

      /* create an ideal cloud */
      double w = object[i].marker_width;
      PointCloud ideal;
      ideal.push_back( makeRGBPoint(-w/2,w/2,0) );
      ideal.push_back( makeRGBPoint(w/2,w/2,0) );
      ideal.push_back( makeRGBPoint(w/2,-w/2,0) );
      ideal.push_back( makeRGBPoint(-w/2,-w/2,0) );

      /* get transformation */
      Eigen::Matrix4f t;
      TransformationEstimationSVD obj;
      obj.estimateRigidTransformation( marker, ideal, t );

      
      /* get final transformation */
      tf::Transform transform = tfFromEigen(t.inverse());
   
      // any(transform == nan)
      tf::Matrix3x3  m = transform.getBasis();
      tf::Vector3    p = transform.getOrigin();
      bool invalid = false;
      for(int i=0; i < 3; i++)
        for(int j=0; j < 3; j++)
          invalid = (invalid || isnan(m[i][j]) || fabs(m[i][j]) > 1.0);

      for(int i=0; i < 3; i++)
          invalid = (invalid || isnan(p[i]));
       

      if(invalid)
        continue; 

      /* publish the marker */
      ar_pose::ARMarker ar_pose_marker;
      ar_pose_marker.header.frame_id = msg->header.frame_id;
      ar_pose_marker.header.stamp = msg->header.stamp;
      ar_pose_marker.id = object[i].id;

      ar_pose_marker.pose.pose.position.x = transform.getOrigin().getX();
      ar_pose_marker.pose.pose.position.y = transform.getOrigin().getY();
      ar_pose_marker.pose.pose.position.z = transform.getOrigin().getZ();

      ar_pose_marker.pose.pose.orientation.x = transform.getRotation().getAxis().getX();
      ar_pose_marker.pose.pose.orientation.y = transform.getRotation().getAxis().getY();
      ar_pose_marker.pose.pose.orientation.z = transform.getRotation().getAxis().getZ();
      ar_pose_marker.pose.pose.orientation.w = transform.getRotation().getW();

      ar_pose_marker.confidence = marker_info->cf;
      arPoseMarkers_.markers.push_back (ar_pose_marker);

      /* publish transform */
      if (publishTf_)
      {
	    broadcaster_.sendTransform(tf::StampedTransform(transform, msg->header.stamp, msg->header.frame_id, object[i].name));
      }

      /* publish visual marker */

      if (publishVisualMarkers_)
      {
        tf::Vector3 markerOrigin (0, 0, 0.25 * object[i].marker_width * AR_TO_ROS);
        tf::Transform m (tf::Quaternion::getIdentity (), markerOrigin);
        tf::Transform markerPose = transform * m; // marker pose in the camera frame

        tf::poseTFToMsg (markerPose, rvizMarker_.pose);

        rvizMarker_.header.frame_id = msg->header.frame_id;
        rvizMarker_.header.stamp = msg->header.stamp;
        rvizMarker_.id = object[i].id;

        rvizMarker_.scale.x = 1.0 * object[i].marker_width * AR_TO_ROS;
        rvizMarker_.scale.y = 1.0 * object[i].marker_width * AR_TO_ROS;
        rvizMarker_.scale.z = 0.5 * object[i].marker_width * AR_TO_ROS;
        rvizMarker_.ns = "basic_shapes";
        rvizMarker_.type = visualization_msgs::Marker::CUBE;
        rvizMarker_.action = visualization_msgs::Marker::ADD;
        switch (i)
        {
          case 0:
            rvizMarker_.color.r = 0.0f;
            rvizMarker_.color.g = 0.0f;
            rvizMarker_.color.b = 1.0f;
            rvizMarker_.color.a = 1.0;
            break;
          case 1:
            rvizMarker_.color.r = 1.0f;
            rvizMarker_.color.g = 0.0f;
            rvizMarker_.color.b = 0.0f;
            rvizMarker_.color.a = 1.0;
            break;
          default:
            rvizMarker_.color.r = 0.0f;
            rvizMarker_.color.g = 1.0f;
            rvizMarker_.color.b = 0.0f;
            rvizMarker_.color.a = 1.0;
        }
        rvizMarker_.lifetime = ros::Duration ();

        rvizMarkerPub_.publish (rvizMarker_);
        ROS_DEBUG ("Published visual marker");
      }
    }
    arMarkerPub_.publish (arPoseMarkers_);
    ROS_DEBUG ("Published ar_multi markers");
  }
Example #3
0
    /// callback for Kinect point clouds
    void kinect_cb(const sensor_msgs::PointCloud2ConstPtr& pcl_msg) {
        boost::mutex::scoped_lock lock(mutex);

        if (models.empty()) {
            static bool first_call = true;
            if (first_call) {
                ROS_WARN("no model files loaded!");
                first_call = false;
            }
            return;
        }

        // convert to PCL and image
        PointCloud cloud_;

        pcl::fromROSMsg(*pcl_msg, cloud_);

        sensor_msgs::ImagePtr image_(new sensor_msgs::Image);

        pcl::toROSMsg (cloud_, *image_);
        cv_bridge::CvImageConstPtr capture_ = cv_bridge::toCvShare(image_, "bgr8");

        ROS_INFO("received point cloud");

        // Generate an aspect on the scene
        ObjectAspect scene;

        scene.calculate(capture_->image,cloud_.makeShared());

        Eigen::Matrix4f t;

        PointCloud feature_cloud;
        sensor_msgs::PointCloud2 feature_msg;
        int i = 0;
        re_kinect_object_detector::DetectionResult resultMsg;
        for(std::map<std::string, RecognitionModel>::iterator it=models.begin(); it!=models.end(); it++, i++) {
            RecognitionModel& model = it->second;
            re_msgs::DetectedObject detectedObjMsg;

            // Match the scene with the model.
            if (model.matchAspects(scene, t)) {

                // get transformation
                ROS_INFO("displaying %d points in frame: %s",(int)scene.match->points->size(),pcl_msg->header.frame_id.c_str());

                PointCloud cloudtransformed;
                pcl::transformPointCloud(*scene.match->points, cloudtransformed, t);

                if (i == 0)
                    feature_cloud = cloudtransformed;
                else
                    feature_cloud += cloudtransformed;

                for (size_t j=0 ; j< cloudtransformed.points.size(); j++ ){
                    PointType pt = cloudtransformed.points.at(j);
                    if (!isnan(pt.x)){
                        int y = pt.y*525.0 / pt.z + capture_->image.size().height / 2;
                        int x = pt.x*525.0 / pt.z + capture_->image.size().width / 2;

                        re_msgs::Pixel px;
                        px.x = x;
                        px.y = y;
                        detectedObjMsg.points2d.push_back(px);

                        geometry_msgs::Point threeD_pt;
                        threeD_pt.x = pt.x;
                        threeD_pt.y = pt.y;
                        threeD_pt.z = pt.z;

                        detectedObjMsg.points3d.push_back(threeD_pt);
                    }
                }
                tf::Transform object_tf = tfFromEigen(t);
                tf_broadcaster.sendTransform(tf::StampedTransform(object_tf, pcl_msg->header.stamp, pcl_msg->header.frame_id, model.model_name));

                resultMsg.ObjectNames.push_back(model.model_name);
                resultMsg.Detections.push_back(detectedObjMsg);

            }
            cv_bridge::CvImage out_msg;
            out_msg.header = pcl_msg->header;
            out_msg.encoding = "bgr8";
            out_msg.image = capture_->image;

            out_msg.toImageMsg(resultMsg.Image);
            detected_objects_pub.publish(resultMsg);
        }
        pcl::toROSMsg(feature_cloud,feature_msg);

        feature_msg.header.frame_id = pcl_msg->header.frame_id;
        feature_msg.header.stamp = pcl_msg->header.stamp;

        features_pub.publish(feature_msg);
    }
Example #4
0
void SparseTrackerAM::pointCloudCallback(const PointCloudT::ConstPtr& cloud_in_ptr)
{
  boost::mutex::scoped_lock(mutex_);

  struct timeval start_callback, end_callback;
  struct timeval start_features, end_features;
  struct timeval start_model, end_model;
  struct timeval start_icp, end_icp;

  gettimeofday(&start_callback, NULL);

  // **** initialize ***********************************************************

  if (!initialized_)
  {
    initialized_ = getBaseToCameraTf(cloud_in_ptr);
    if (!initialized_) return;
  }

  // **** extract features *****************************************************

  gettimeofday(&start_features, NULL);

  // **** ab prediction

  ros::Time cur_time = cloud_in_ptr->header.stamp;
  double dt = (cur_time  - prev_time_).toSec();
  prev_time_ = cur_time;

  tf::Transform predicted_f2b;

  double pr_x, pr_y, pr_z, pr_roll, pr_pitch, pr_yaw;

  if (use_alpha_beta_)
  {
    double cx, cy, cz, croll, cpitch, cyaw;
    getXYZRPY(f2b_, cx, cy, cz, croll, cpitch, cyaw);

    pr_x     = cx     + v_x_     * dt;
    pr_y     = cy     + v_y_     * dt;
    pr_z     = cz     + v_z_     * dt;
    pr_roll  = croll  + v_roll_  * dt;
    pr_pitch = cpitch + v_pitch_ * dt;
    pr_yaw   = cyaw   + v_yaw_   * dt;

    btVector3 pr_pos(pr_x, pr_y, pr_z);
    btQuaternion pr_q;
    pr_q.setRPY(pr_roll, pr_pitch, pr_yaw);

    predicted_f2b.setOrigin(pr_pos);
    predicted_f2b.setRotation(pr_q);
  }
  else
  {
    predicted_f2b = f2b_;
  }

  PointCloudOrb::Ptr orb_features_ptr = boost::make_shared<PointCloudOrb>();
  bool orb_extract_result = extractOrbFeatures(cloud_in_ptr, orb_features_ptr);
  pcl::transformPointCloud (*orb_features_ptr , *orb_features_ptr, eigenFromTf(predicted_f2b * b2c_));
  orb_features_ptr->header.frame_id = fixed_frame_;

  // FIXME - removed canny

  PointCloudCanny::Ptr canny_features_ptr = boost::make_shared<PointCloudCanny>();
  bool canny_extract_result;// = extractCannyFeatures(cloud_in_ptr, canny_features_ptr);
  pcl::transformPointCloud (*canny_features_ptr , *canny_features_ptr, eigenFromTf(predicted_f2b * b2c_));
  canny_features_ptr->header.frame_id = fixed_frame_;

  gettimeofday(&end_features, NULL);

  // **** ICP ******************************

  gettimeofday(&start_icp, NULL);

  bool orb_icp_result = false;
  bool canny_icp_result = false;

  if (orb_extract_result)
  {
    //printf("~ORB~\n");
    orb_icp_result = OrbICP(orb_features_ptr);

    double eps_roll_  = 0.3;
    double eps_pitch_ = 0.3;
    double eps_yaw_   = 0.3;
    double eps_x_     = 0.3;
    double eps_y_     = 0.3;
    double eps_z_     = 0.3;

    if (orb_icp_result)
    {
      tf::Transform corr = tfFromEigen(orb_reg_.getFinalTransformation());

      // particles add error to motion
      double c_x, c_y, c_z, c_roll, c_pitch, c_yaw;
      double e_x, e_y, e_z, e_roll, e_pitch, e_yaw;
      getXYZRPY(corr, c_x, c_y, c_z, c_roll, c_pitch, c_yaw);

      for (int i = 0; i < 20; i++)
      {
        e_roll  = getUrand() * eps_roll_  * c_roll;
        e_pitch = getUrand() * eps_pitch_ * c_pitch;
        e_yaw   = getUrand() * eps_yaw_   * c_yaw;
        e_x = getUrand() * eps_x_  * c_x;
        e_y = getUrand() * eps_y_  * c_y;
        e_z = getUrand() * eps_z_  * c_z;

        btVector3 e_pos(c_x + e_x, c_y + e_y, c_z + e_z);
        btQuaternion e_q;
        e_q.setRPY(c_roll + e_roll, c_pitch + e_pitch, c_yaw + e_yaw);

        tf::Transform e_corr;

        e_corr.setOrigin(e_pos);
        e_corr.setRotation(e_q);

        pf2b_[i] = e_corr * pf2b_[i];
      }

      // end particles

      tf::Transform measured_f2b = corr * predicted_f2b;

      // **** ab estmation
      if (use_alpha_beta_)
      {
        double m_x, m_y, m_z, m_roll, m_pitch, m_yaw;
        getXYZRPY(measured_f2b, m_x, m_y, m_z, m_roll, m_pitch, m_yaw);

        // residuals

        double r_x     = m_x     - pr_x;
        double r_y     = m_y     - pr_y;
        double r_z     = m_z     - pr_z;
        double r_roll  = m_roll  - pr_roll;
        double r_pitch = m_pitch - pr_pitch;
        double r_yaw   = m_yaw   - pr_yaw;

        fixAngleD(r_roll);
        fixAngleD(r_pitch);
        fixAngleD(r_yaw);

        // final position

        double f_x     = pr_x     + alpha_ * r_x;
        double f_y     = pr_y     + alpha_ * r_y;
        double f_z     = pr_z     + alpha_ * r_z;
        double f_roll  = pr_roll  + alpha_ * r_roll;
        double f_pitch = pr_pitch + alpha_ * r_pitch;
        double f_yaw   = pr_yaw   + alpha_ * r_yaw;

        btVector3 f_pos(f_x, f_y, f_z);
        btQuaternion f_q;
        f_q.setRPY(f_roll, f_pitch, f_yaw);

        f2b_.setOrigin(f_pos);
        f2b_.setRotation(f_q);

        // final velocity

        v_x_     = v_x_     + beta_ * r_x     / dt;
        v_y_     = v_y_     + beta_ * r_y     / dt;
        v_z_     = v_z_     + beta_ * r_z     / dt;
        v_roll_  = v_roll_  + beta_ * r_roll  / dt;
        v_pitch_ = v_pitch_ + beta_ * r_pitch / dt;
        v_yaw_   = v_yaw_   + beta_ * r_yaw   / dt;

        //printf("VEL: %f, %f, %f\n", v_x_, v_y_, v_z_);
      }
      else
      {
        f2b_ = measured_f2b;
      }
    }
  }


  if (!orb_icp_result)
    printf("ERROR\n");

  gettimeofday(&end_icp, NULL);

  // **** ADD features to model

  gettimeofday(&start_model, NULL);

  if (model_->points.size() == 0)
  {
    *model_ += *orb_features_ptr;
  }
  else
  {
    pcl::KdTreeFLANN<PointOrb> tree_model;
    tree_model.setInputCloud(model_);

    std::vector<int> indices;
    std::vector<float> distances;

    indices.resize(1);
    distances.resize(1);

    for (int i = 0; i < orb_features_ptr->points.size(); ++i)
    {
      PointOrb& p = orb_features_ptr->points[i];

      int n_found = tree_model.nearestKSearch(p, 1, indices, distances);

      if (n_found == 0)
      {
        model_->points.push_back(p);
        model_->width++;
      }
      else
      {
        if ( distances[0] > 0.01)
        {
          //found far away, insert new one

          model_->points.push_back(p);
          model_->width++;
        }
        else
        {
          // found near, modify old one

          //PointOrb& q = model_->points[indices[0]];
          //q.x = 0.5*(p.x + q.x);
          //q.y = 0.5*(p.y + q.y);
          //q.z = 0.5*(p.z + q.z);
        }
      }
    }
  }

  gettimeofday(&end_model, NULL);

  // *** broadcast tf **********************************************************

  broadcastTF(cloud_in_ptr);

  // *** counter  **************************************************************

  frame_count_++;

  // **** print diagnostics ****************************************************

  gettimeofday(&end_callback, NULL);

  double features_dur  = msDuration(start_features, end_features);
  double model_dur     = msDuration(start_model,    end_model);
  double icp_dur       = msDuration(start_icp,      end_icp);
  double callback_dur  = msDuration(start_callback, end_callback);

  //int model_frames = orb_history_.getSize();
  int icp_iterations = orb_reg_.getFinalIterations();
  int orb_features_count = orb_features_ptr->points.size();
  int canny_features_count =canny_features_ptr->points.size();
  int model_size = model_->points.size();

  printf("F[%d][%d] %.1f \t M[%d] %.1f \t ICP[%d] %.1f \t TOTAL %.1f\n",
      orb_features_count, canny_features_count, features_dur,
      model_size, model_dur,
      icp_iterations,
      icp_dur, callback_dur);

}