Example #1
0
File: exview.c Project: SNce/ARMaze
static void getResultQuat( ARMarkerInfo *marker_info )
{
    double      target_trans[3][4];
    double      cam_trans[3][4];
    double      quat[4], pos[3];
    char        string1[256];
    char        string2[256];

    if( arGetTransMat(marker_info, target_center, target_width, target_trans) < 0 ) return;
    if( arUtilMatInv(target_trans, cam_trans) < 0 ) return;
    if( arUtilMat2QuatPos(cam_trans, quat, pos) < 0 ) return;

    sprintf(string1," QUAT: Pos x: %3.1f  y: %3.1f  z: %3.1f\n",
            pos[0], pos[1], pos[2]);
    sprintf(string2, "      Quat qx: %3.2f qy: %3.2f qz: %3.2f qw: %3.2f ",
            quat[0], quat[1], quat[2], quat[3]);
    strcat( string1, string2 );

    if( disp_mode ) {
        draw( "target", target_trans, 0, 0 );
        draw_exview( a, b, r, target_trans, 1, 1 );
    }
    else {
        draw( "target", target_trans, 1, 1 );
        draw_exview( a, b, r, target_trans, 0, 0 );
    }
    print_string( string1 );

    return;
}
Example #2
0
  void ARMultiPublisher::getTransformationCallback (const sensor_msgs::ImageConstPtr & image_msg)
  {
    ARUint8 *dataPtr;
    ARMarkerInfo *marker_info;
    int marker_num;
    int i, k, j;

    /* Get the image from ROSTOPIC
     * NOTE: the dataPtr format is BGR because the ARToolKit library was
     * build with V4L, dataPtr format change according to the
     * ARToolKit configure option (see config.h).*/
#if ROS_VERSION_MINIMUM(1, 9, 0)
    try
    {
      capture_ = cv_bridge::toCvCopy (image_msg, sensor_msgs::image_encodings::BGR8);
    }
    catch (cv_bridge::Exception& e)
    {
      ROS_ERROR("cv_bridge exception: %s", e.what());
    }
    dataPtr = (ARUint8 *) ((IplImage) capture_->image).imageData;
#else
    try
    {
      capture_ = bridge_.imgMsgToCv (image_msg, "bgr8");
    }
    catch (sensor_msgs::CvBridgeException & e)
    {
      ROS_ERROR("cv_bridge exception: %s", e.what());
    }
    dataPtr = (ARUint8 *) capture_->imageData;
#endif

    // detect the markers in the video frame
    if (arDetectMarker (dataPtr, threshold_, &marker_info, &marker_num) < 0)
    {
      argCleanup ();
      ROS_BREAK ();
    }

    arPoseMarkers_.markers.clear ();
    // check for known patterns
    for (i = 0; i < objectnum; i++)
    {
      k = -1;
      for (j = 0; j < marker_num; j++)
      {
        if (object[i].id == marker_info[j].id)
        {
          if (k == -1)
            k = j;
          else                  // make sure you have the best pattern (highest confidence factor)
          if (marker_info[k].cf < marker_info[j].cf)
            k = j;
        }
      }
      if (k == -1)
      {
        object[i].visible = 0;
        continue;
      }

      // calculate the transform for each marker
      if (object[i].visible == 0)
      {
        arGetTransMat (&marker_info[k], object[i].marker_center, object[i].marker_width, object[i].trans);
      }
      else
      {
        arGetTransMatCont (&marker_info[k], object[i].trans,
                           object[i].marker_center, object[i].marker_width, object[i].trans);
      }
      object[i].visible = 1;

      double arQuat[4], arPos[3];

      //arUtilMatInv (object[i].trans, cam_trans);
      arUtilMat2QuatPos (object[i].trans, arQuat, arPos);

      // **** convert to ROS frame

      double quat[4], pos[3];

      pos[0] = arPos[0] * AR_TO_ROS;
      pos[1] = arPos[1] * AR_TO_ROS;
      pos[2] = arPos[2] * AR_TO_ROS;

      quat[0] = -arQuat[0];
      quat[1] = -arQuat[1];
      quat[2] = -arQuat[2];
      quat[3] = arQuat[3];

      ROS_DEBUG (" Object num %i ------------------------------------------------", i);
      ROS_DEBUG (" QUAT: Pos x: %3.5f  y: %3.5f  z: %3.5f", pos[0], pos[1], pos[2]);
      ROS_DEBUG ("     Quat qx: %3.5f qy: %3.5f qz: %3.5f qw: %3.5f", quat[0], quat[1], quat[2], quat[3]);

      // **** publish the marker

      ar_pose::ARMarker ar_pose_marker;
      ar_pose_marker.header.frame_id = image_msg->header.frame_id;
      ar_pose_marker.header.stamp = image_msg->header.stamp;
      ar_pose_marker.id = object[i].id;

      ar_pose_marker.pose.pose.position.x = pos[0];
      ar_pose_marker.pose.pose.position.y = pos[1];
      ar_pose_marker.pose.pose.position.z = pos[2];

      ar_pose_marker.pose.pose.orientation.x = quat[0];
      ar_pose_marker.pose.pose.orientation.y = quat[1];
      ar_pose_marker.pose.pose.orientation.z = quat[2];
      ar_pose_marker.pose.pose.orientation.w = quat[3];

      ar_pose_marker.confidence = round(marker_info->cf * 100);
      arPoseMarkers_.markers.push_back (ar_pose_marker);

      // **** publish transform between camera and marker

#if ROS_VERSION_MINIMUM(1, 9, 0)
      tf::Quaternion rotation (quat[0], quat[1], quat[2], quat[3]);
      tf::Vector3 origin (pos[0], pos[1], pos[2]);
      tf::Transform t (rotation, origin);
#else
// DEPRECATED: Fuerte support ends when Hydro is released
      btQuaternion rotation (quat[0], quat[1], quat[2], quat[3]);
      btVector3 origin (pos[0], pos[1], pos[2]);
      btTransform t (rotation, origin);
#endif

      if (publishTf_)
      {
        tf::StampedTransform camToMarker (t, image_msg->header.stamp, image_msg->header.frame_id, object[i].name);
        broadcaster_.sendTransform(camToMarker);
      }

      // **** publish visual marker

      if (publishVisualMarkers_)
      {
#if ROS_VERSION_MINIMUM(1, 9, 0)
        tf::Vector3 markerOrigin (0, 0, 0.25 * object[i].marker_width * AR_TO_ROS);
        tf::Transform m (tf::Quaternion::getIdentity (), markerOrigin);
        tf::Transform markerPose = t * m; // marker pose in the camera frame
#else
// DEPRECATED: Fuerte support ends when Hydro is released
        btVector3 markerOrigin (0, 0, 0.25 * object[i].marker_width * AR_TO_ROS);
        btTransform m (btQuaternion::getIdentity (), markerOrigin);
        btTransform markerPose = t * m; // marker pose in the camera frame
#endif

        tf::poseTFToMsg (markerPose, rvizMarker_.pose);

        rvizMarker_.header.frame_id = image_msg->header.frame_id;
        rvizMarker_.header.stamp = image_msg->header.stamp;
        rvizMarker_.id = object[i].id;

        rvizMarker_.scale.x = 1.0 * object[i].marker_width * AR_TO_ROS;
        rvizMarker_.scale.y = 1.0 * object[i].marker_width * AR_TO_ROS;
        rvizMarker_.scale.z = 0.5 * object[i].marker_width * AR_TO_ROS;
        rvizMarker_.ns = "basic_shapes";
        rvizMarker_.type = visualization_msgs::Marker::CUBE;
        rvizMarker_.action = visualization_msgs::Marker::ADD;
        switch (i)
        {
          case 0:
            rvizMarker_.color.r = 0.0f;
            rvizMarker_.color.g = 0.0f;
            rvizMarker_.color.b = 1.0f;
            rvizMarker_.color.a = 1.0;
            break;
          case 1:
            rvizMarker_.color.r = 1.0f;
            rvizMarker_.color.g = 0.0f;
            rvizMarker_.color.b = 0.0f;
            rvizMarker_.color.a = 1.0;
            break;
          default:
            rvizMarker_.color.r = 0.0f;
            rvizMarker_.color.g = 1.0f;
            rvizMarker_.color.b = 0.0f;
            rvizMarker_.color.a = 1.0;
        }
        rvizMarker_.lifetime = ros::Duration (1.0);

        rvizMarkerPub_.publish(rvizMarker_);
        ROS_DEBUG ("Published visual marker");
      }
    }
    arMarkerPub_.publish(arPoseMarkers_);
    ROS_DEBUG ("Published ar_multi markers");
  }
Example #3
0
int arFilterTransMat(ARFilterTransMatInfo *ftmi, ARdouble m[3][4], const int reset)
{
    ARdouble q[4], p[3], alpha, oneminusalpha, omega, cosomega, sinomega, s0, s1;
    
    if (!ftmi) return (-1);
    
    if (arUtilMat2QuatPos(m, q, p) < 0) return (-2);
    arUtilQuatNorm(q);
    
    if (reset) {
        ftmi->q[0] = q[0];
        ftmi->q[1] = q[1];
        ftmi->q[2] = q[2];
        ftmi->q[3] = q[3];
        ftmi->p[0] = p[0];
        ftmi->p[1] = p[1];
        ftmi->p[2] = p[2];
    } else {
        alpha = ftmi->alpha;
#ifdef ARDOUBLE_IS_FLOAT
        oneminusalpha = 1.0f - alpha;
#else
        oneminusalpha = 1.0 - alpha;
#endif
        
        // SLERP for orientation.
        cosomega = q[0]*ftmi->q[0] + q[1]*ftmi->q[1] + q[2]*ftmi->q[2] + q[3]*ftmi->q[3]; // cos of angle between vectors.
#ifdef ARDOUBLE_IS_FLOAT
        if (cosomega < 0.0f) {
            cosomega = -cosomega;
            q[0] = -q[0];
            q[1] = -q[1];
            q[2] = -q[2];
            q[3] = -q[3];
        } 
        if (cosomega > 0.9995f) {
            s0 = oneminusalpha;
            s1 = alpha;
        } else {
            omega = acosf(cosomega);
            sinomega = sinf(omega);
            s0 = sinf(oneminusalpha * omega) / sinomega;
            s1 = sinf(alpha * omega) / sinomega;
        }
#else
        if (cosomega < 0.0) {
            cosomega = -cosomega;
            q[0] = -q[0];
            q[1] = -q[1];
            q[2] = -q[2];
            q[3] = -q[3];
        } 
        if (cosomega > 0.9995) {
            s0 = oneminusalpha;
            s1 = alpha;
        } else {
            omega = acos(cosomega);
            sinomega = sin(omega);
            s0 = sin(oneminusalpha * omega) / sinomega;
            s1 = sin(alpha * omega) / sinomega;
        }
#endif
        ftmi->q[0] = q[0]*s1 + ftmi->q[0]*s0;
        ftmi->q[1] = q[1]*s1 + ftmi->q[1]*s0;
        ftmi->q[2] = q[2]*s1 + ftmi->q[2]*s0;
        ftmi->q[3] = q[3]*s1 + ftmi->q[3]*s0;
        arUtilQuatNorm(ftmi->q);
        
        // Linear interpolation for position.
        ftmi->p[0] = p[0]*alpha + ftmi->p[0]*oneminusalpha;
        ftmi->p[1] = p[1]*alpha + ftmi->p[1]*oneminusalpha;
        ftmi->p[2] = p[2]*alpha + ftmi->p[2]*oneminusalpha;
    }
    
    if (arUtilQuatPos2Mat(ftmi->q, ftmi->p, m) < 0) return (-2);
    
    return (0);
}
Example #4
0
  void ARSinglePublisher::getTransformationCallback (const sensor_msgs::ImageConstPtr & image_msg)
  {
    ARUint8 *dataPtr;
    ARMarkerInfo *marker_info;
    int marker_num;
    int i, k;

    /* Get the image from ROSTOPIC
     * NOTE: the dataPtr format is BGR because the ARToolKit library was
     * build with V4L, dataPtr format change according to the 
     * ARToolKit configure option (see config.h).*/
    try
    {
      capture_ = bridge_.imgMsgToCv (image_msg, "bgr8");
    }
    catch (sensor_msgs::CvBridgeException & e)
    {
      ROS_ERROR ("Could not convert from '%s' to 'bgr8'.", image_msg->encoding.c_str ());
    }
    //cvConvertImage(capture_,capture_,CV_CVTIMG_FLIP); //flip image
    dataPtr = (ARUint8 *) capture_->imageData;

    // detect the markers in the video frame 
    if (arDetectMarker (dataPtr, threshold_, &marker_info, &marker_num) < 0)
    {
      ROS_FATAL ("arDetectMarker failed");
      ROS_BREAK ();             // FIXME: I don't think this should be fatal... -Bill
    }

    // check for known patterns
    k = -1;
    for (i = 0; i < marker_num; i++)
    {
      if (marker_info[i].id == patt_id_)
      {
        ROS_DEBUG ("Found pattern: %d ", patt_id_);

        // make sure you have the best pattern (highest confidence factor)
        if (k == -1)
          k = i;
        else if (marker_info[k].cf < marker_info[i].cf)
          k = i;
      }
    }

    if (k != -1)
    {
      // **** get the transformation between the marker and the real camera
      double arQuat[4], arPos[3];

      if (!useHistory_ || contF == 0)
        arGetTransMat (&marker_info[k], marker_center_, markerWidth_, marker_trans_);
      else
        arGetTransMatCont (&marker_info[k], marker_trans_, marker_center_, markerWidth_, marker_trans_);

      contF = 1;

      //arUtilMatInv (marker_trans_, cam_trans);
      arUtilMat2QuatPos (marker_trans_, arQuat, arPos);

      // **** convert to ROS frame

      double quat[4], pos[3];
    
      pos[0] = arPos[0] * AR_TO_ROS;
      pos[1] = arPos[1] * AR_TO_ROS;
      pos[2] = arPos[2] * AR_TO_ROS;

      quat[0] = -arQuat[0];
      quat[1] = -arQuat[1];
      quat[2] = -arQuat[2];
      quat[3] = arQuat[3];

      ROS_DEBUG (" QUAT: Pos x: %3.5f  y: %3.5f  z: %3.5f", pos[0], pos[1], pos[2]);
      ROS_DEBUG ("     Quat qx: %3.5f qy: %3.5f qz: %3.5f qw: %3.5f", quat[0], quat[1], quat[2], quat[3]);

      // **** publish the marker

		  ar_pose_marker_.header.frame_id = image_msg->header.frame_id;
		  ar_pose_marker_.header.stamp    = image_msg->header.stamp;
		  ar_pose_marker_.id              = marker_info->id;

		  ar_pose_marker_.pose.pose.position.x = pos[0];
		  ar_pose_marker_.pose.pose.position.y = pos[1];
		  ar_pose_marker_.pose.pose.position.z = pos[2];

		  ar_pose_marker_.pose.pose.orientation.x = quat[0];
		  ar_pose_marker_.pose.pose.orientation.y = quat[1];
		  ar_pose_marker_.pose.pose.orientation.z = quat[2];
		  ar_pose_marker_.pose.pose.orientation.w = quat[3];
		
		  ar_pose_marker_.confidence = marker_info->cf;

		  arMarkerPub_.publish(ar_pose_marker_);
		  ROS_DEBUG ("Published ar_single marker");
		
      // **** publish transform between camera and marker

      btQuaternion rotation (quat[0], quat[1], quat[2], quat[3]);
      btVector3 origin(pos[0], pos[1], pos[2]);
      btTransform t(rotation, origin);

      if(publishTf_)
      {
        if(reverse_transform_)
        {
          tf::StampedTransform markerToCam (t.inverse(), image_msg->header.stamp, markerFrame_.c_str(), image_msg->header.frame_id);
          broadcaster_.sendTransform(markerToCam);
        } else {
          tf::StampedTransform camToMarker (t, image_msg->header.stamp, image_msg->header.frame_id, markerFrame_.c_str());
          broadcaster_.sendTransform(camToMarker);
        }
      }

      // **** publish visual marker

      if(publishVisualMarkers_)
      {
        btVector3 markerOrigin(0, 0, 0.25 * markerWidth_ * AR_TO_ROS);
        btTransform m(btQuaternion::getIdentity(), markerOrigin);
        btTransform markerPose = t * m; // marker pose in the camera frame
      
        tf::poseTFToMsg(markerPose, rvizMarker_.pose);

			  rvizMarker_.header.frame_id = image_msg->header.frame_id;
			  rvizMarker_.header.stamp = image_msg->header.stamp;
			  rvizMarker_.id = 1;

			  rvizMarker_.scale.x = 1.0 * markerWidth_ * AR_TO_ROS;
			  rvizMarker_.scale.y = 1.0 * markerWidth_ * AR_TO_ROS;
			  rvizMarker_.scale.z = 0.5 * markerWidth_ * AR_TO_ROS;
			  rvizMarker_.ns = "basic_shapes";
			  rvizMarker_.type = visualization_msgs::Marker::CUBE;
			  rvizMarker_.action = visualization_msgs::Marker::ADD;
			  rvizMarker_.color.r = 0.0f;
			  rvizMarker_.color.g = 1.0f;
			  rvizMarker_.color.b = 0.0f;
			  rvizMarker_.color.a = 1.0;
			  rvizMarker_.lifetime = ros::Duration();
			
			  rvizMarkerPub_.publish(rvizMarker_);
			  ROS_DEBUG ("Published visual marker");
      }
    }
    else
    {
      contF = 0;
      ROS_DEBUG ("Failed to locate marker");
    }
  }
void ARSinglePublisher::getTransformationCallback(const sensor_msgs::ImageConstPtr& image_msg)
{
  // ROS_INFO("======================================================");
  // ROS_INFO("Callback...");

  ARMarkerInfo *marker_info;
  int marker_num;
  int i, k;

  // Get the image from ROSTOPIC
  // NOTE: the data_ptr format is BGR because the ARToolKit library was
  // build with V4L, data_ptr format change according to the
  // ARToolKit configure option (see config.h).
  try
  {
    capture_ = bridge_.imgMsgToCv(image_msg, "bgr8");
  }
  catch (sensor_msgs::CvBridgeException & e)
  {
    ROS_ERROR("Could not convert from >%s< to 'bgr8'.", image_msg->encoding.c_str ());
    return;
  }

//  const int WIDTH = 640;
//  const int HEIGHT = 480;
//  // declare a destination IplImage object with correct size, depth and channels
//  IplImage *destination = cvCreateImage(cvSize(WIDTH, HEIGHT), capture_->depth, capture_->nChannels);
//  // use cvResize to resize source to a destination image
//  cvResize(capture_, destination);
//  // save image with a name supplied with a second argument
  //  std::string filename = "/tmp/" + marker_frame_ + ".jpg";
  //  cvSaveImage(filename.c_str(), destination);
//  ROS_INFO("BEFORE: Depth = >%i<.", capture_->depth);
//  ROS_INFO("BEFORE: nChannels = >%i<.", capture_->nChannels);
//  ROS_INFO("BEFORE: Width = >%i<.", capture_->width);
//  ROS_INFO("BEFORE: WidthStep = >%i<.", capture_->widthStep);
//  ROS_INFO("BEFORE: Height = >%i<.", capture_->height);
//  ROS_INFO("BEFORE: ImageSize = >%i<.", capture_->imageSize);
//  ROS_INFO("BEFORE: nSize = >%i<.", capture_->nSize);
//  ROS_INFO("BEFORE: dataOrder = >%i<.", capture_->dataOrder);
//  ROS_INFO("BEFORE: origin = >%i<.", capture_->origin);
//  capture_ = destination;
//  // memcpy(capture_->imageData, destination->imageData, destination->imageSize);
//  ROS_INFO("AFTER:  Depth = >%i<.", capture_->depth);
//  ROS_INFO("AFTER:  nChannels = >%i<.", capture_->nChannels);
//  ROS_INFO("AFTER:  Width = >%i<.", capture_->width);
//  ROS_INFO("AFTER:  WidthStep = >%i<.", capture_->widthStep);
//  ROS_INFO("AFTER:  Height = >%i<.", capture_->height);
//  ROS_INFO("AFTER:  ImageSize = >%i<.", capture_->imageSize);
//  ROS_INFO("AFTER:  nSize = >%i<.", capture_->nSize);
//  ROS_INFO("AFTER:  dataOrder = >%i<.", capture_->dataOrder);
//  ROS_INFO("AFTER:  origin = >%i<.", capture_->origin);

  // cvConvertImage(capture_, capture_, CV_CVTIMG_FLIP); //flip image
  ARUint8 *data_ptr = (ARUint8 *)capture_->imageData;

  // detect the markers in the video frame
  if (arDetectMarker(data_ptr, threshold_, &marker_info, &marker_num) < 0)
  {
    ROS_FATAL ("arDetectMarker failed");
    ROS_BREAK (); // FIXME: I don't think this should be fatal... -Bill
  }

  // check for known patterns
  k = -1;
  for (i = 0; i < marker_num; i++)
  {
    if (marker_info[i].id == patt_id_)
    {
      ROS_DEBUG("Found pattern: %d ", patt_id_);

      // make sure you have the best pattern (highest confidence factor)
      if (k == -1)
        k = i;
      else if (marker_info[k].cf < marker_info[i].cf)
        k = i;
    }
  }

  if (k != -1)
  {
    if (!use_history_ || cont_f_ == 0)
    {
      arGetTransMat(&marker_info[k], marker_center_, marker_width_, marker_trans_);
    }
    else
    {
      arGetTransMatCont(&marker_info[k], marker_trans_, marker_center_, marker_width_, marker_trans_);
    }

    cont_f_ = 1;

    // get the transformation between the marker and the real camera
    double arQuat[4], arPos[3];

    // arUtilMatInv (marker_trans_, cam_trans);
    arUtilMat2QuatPos(marker_trans_, arQuat, arPos);

    // error checking
    if(fabs(sqrt(arQuat[0]*arQuat[0] + arQuat[1]*arQuat[1] + arQuat[2]*arQuat[2] + arQuat[3]*arQuat[3]) - 1.0) > 0.0001)
    {
      ROS_WARN("Skipping invalid frame. Computed quaternion is invalid.");
    }
    if(std::isnan(arQuat[0]) || std::isnan(arQuat[1]) || std::isnan(arQuat[2]) || std::isnan(arQuat[3]))
    {
      ROS_WARN("Skipping invalid frame because computed orientation is not a number.");
      return;
    }
    if(std::isinf(arQuat[0]) || std::isinf(arQuat[1]) || std::isinf(arQuat[2]) || std::isinf(arQuat[3]))
    {
      ROS_WARN("Skipping invalid frame because computed orientation is infinite.");
      return;
    }

    // convert to ROS frame

    double quat[4], pos[3];

    pos[0] = arPos[0] * AR_TO_ROS;
    pos[1] = arPos[1] * AR_TO_ROS;
    pos[2] = arPos[2] * AR_TO_ROS;

    quat[0] = -arQuat[0];
    quat[1] = -arQuat[1];
    quat[2] = -arQuat[2];
    quat[3] = arQuat[3];

    ROS_DEBUG("  Pos x: %3.5f  y: %3.5f  z: %3.5f", pos[0], pos[1], pos[2]);
    ROS_DEBUG("  Quat qx: %3.5f qy: %3.5f qz: %3.5f qw: %3.5f", quat[0], quat[1], quat[2], quat[3]);

    // publish the marker

    ar_target_marker_.confidence = marker_info->cf;

    ar_marker_publisher_.publish(ar_target_marker_);
    ROS_DEBUG ("Published ar_single marker");

    // publish transform between camera and marker

    tf::Quaternion rotation(quat[0], quat[1], quat[2], quat[3]);
    tf::Vector3 origin(pos[0], pos[1], pos[2]);
    tf::Transform transform(rotation, origin);

    // TODO: figure out why this happens once in a while...
    if(transform.getOrigin().getZ() < 0.0)
    {
      transform.setOrigin(-transform.getOrigin());
    }

    if (publish_tf_)
    {
      if (reverse_transform_)
      {
        ROS_ASSERT_MSG(false, "Reverse transform is not debugged yet.");
        // tf::StampedTransform marker_to_cam(t.inverse(), image_msg->header.stamp, marker_frame_.c_str(), image_msg->header.frame_id);
        tf::StampedTransform marker_to_cam(transform.inverse(), image_msg->header.stamp, marker_frame_, camera_frame_);
        tf_broadcaster_.sendTransform(marker_to_cam);
      }
      else
      {
        tf::Transform offseted_transform = transform * marker_offset_;
        // tf::StampedTransform cam_to_marker(t, image_msg->header.stamp, image_msg->header.frame_id, marker_frame_.c_str());
        tf::StampedTransform cam_to_marker(offseted_transform, image_msg->header.stamp, camera_frame_, marker_frame_);
        // tf::StampedTransform cam_to_marker(transform, image_msg->header.stamp, camera_frame_, marker_frame_);
        tf_broadcaster_.sendTransform(cam_to_marker);
      }
    }

    // publish visual marker
    publishMarker(transform, image_msg->header.stamp);
  }
  else
  {
    cont_f_ = 0;
    ROS_WARN("Failed to locate marker.");
  }

}
Example #6
0
  void ARBundlePublisher::getTransformationCallback (const sensor_msgs::ImageConstPtr & image_msg)
  {
    ARUint8 *dataPtr;
    ARMarkerInfo *marker_info;
    int marker_num;
    int knownPatternCount, k, j;

    /* Get the image from ROSTOPIC
     * NOTE: the dataPtr format is BGR because the ARToolKit library was
     * build with V4L, dataPtr format change according to the 
     * ARToolKit configure option (see config.h).*/
#if ROS_VERSION_MINIMUM(1, 9, 0)
    try
    {
      capture_ = cv_bridge::toCvCopy (image_msg, sensor_msgs::image_encodings::BGR8);
    }
    catch (cv_bridge::Exception& e)
    {
      ROS_ERROR("cv_bridge exception: %s", e.what()); 
    }
    dataPtr = (ARUint8 *) ((IplImage) capture_->image).imageData;
#else
    try
    {
      capture_ = bridge_.imgMsgToCv (image_msg, "bgr8");
    }
    catch (sensor_msgs::CvBridgeException & e)
    {
      ROS_ERROR("cv_bridge exception: %s", e.what()); 
    }
    dataPtr = (ARUint8 *) capture_->imageData;
#endif

    // detect the markers in the video frame
    if (arDetectMarker (dataPtr, threshold_, &marker_info, &marker_num) < 0)
    {
      argCleanup ();
      ROS_BREAK ();
    }

    arPoseMarkers_.markers.clear ();
    // check for known patterns
    for (knownPatternCount = 0; knownPatternCount < objectnum; knownPatternCount++)
    {
      k = -1;	//haven't yet seen my pattern yet. 
	  //marker_num is how many markers were actually found
      for (j = 0; j < marker_num; j++)
      {
        if (object[knownPatternCount].id == marker_info[j].id)
        {
          if (k == -1)	//if this is the first wild sighting
            k = j;		//which marker matches my pattern?
          else                  // make sure you have the best pattern (highest confidence factor)
          if (marker_info[k].cf < marker_info[j].cf)
            k = j;
        }
      }//end for (j)
      if (k == -1)	//didn't find my pattern :(
      {
        object[knownPatternCount].visible = 0;
        continue;	//ok. so this just skips all the way to the next knownPatternCount
      }

      object[knownPatternCount].visible = 1;	//woohoo mark as found

	  getMarkerTransform(knownPatternCount, marker_info, k);	//transform is stored in object[knownPatternCount].trans

      double arQuat[4], arPos[3];	//for the marker
      double masterARQuat[4], masterARPos[3];	//for the master/center of the box 

	  //find the transform for the pattern to the center of the box
	  //updates master_trans_
	  findTransformToCenter(object[knownPatternCount].trans, knownPatternCount);
      
	  //arUtilMatInv (object[i].trans, cam_trans);
      arUtilMat2QuatPos (object[knownPatternCount].trans, arQuat, arPos);
      arUtilMat2QuatPos (master_trans_, masterARQuat, masterARPos);

      // **** convert to ROS frame
      double quat[4], pos[3];
      double masterQuat[4], masterPos[3];
	  convertToRosFrame(arQuat, arPos, quat, pos);
	  convertToRosFrame(masterARQuat, masterARPos, masterQuat, masterPos);
	
	  //write pos and quat out to text file 
	  if (outputToFile)	{
		  //need to output video tick
		  //i know this is incredibly incredibly not efficient :(
		  //TODO: output tick as well, from the camera_raw msg


		//convert nanoseconds -> seconds
		snprintf(buffer, BUFFER_SIZE, "%10.10f", 
			image_msg->header.stamp.toNSec()*1e-9);
	  	output << buffer <<  ",";	
	  	output << masterPos[0] << ",";	
	  	output << masterPos[1] << ",";	
	  	output << masterPos[2] << ",";	
	  	output << masterQuat[0] << ",";	
	  	output << masterQuat[1] << ",";	
	  	output << masterQuat[2] << ",";	
	  	output << masterQuat[3] << "\n";	
	  }

	  ROS_DEBUG (" Object num %i------------------------------------------------", knownPatternCount);
      ROS_DEBUG (" QUAT: Pos x: %3.5f  y: %3.5f  z: %3.5f", pos[0], pos[1], pos[2]);
      ROS_DEBUG ("     Quat qx: %3.5f qy: %3.5f qz: %3.5f qw: %3.5f", quat[0], quat[1], quat[2], quat[3]);

      // **** prepare to publish the marker
	  stuffARMarkerMsg(knownPatternCount, pos, quat,image_msg->header, marker_info);		
 

      // **** publish transform between camera and marker

#if ROS_VERSION_MINIMUM(1, 9, 0)
      tf::Quaternion rotation (quat[0], quat[1], quat[2], quat[3]);
      tf::Vector3 origin (pos[0], pos[1], pos[2]);
      tf::Transform t (rotation, origin);
#else
// DEPRECATED: Fuerte support ends when Hydro is released
      btQuaternion rotation (quat[0], quat[1], quat[2], quat[3]);
      btVector3 origin (pos[0], pos[1], pos[2]);
      btTransform t (rotation, origin);
#endif

	  tf::Quaternion masterRotation (masterQuat[0], masterQuat[1], masterQuat[2], masterQuat[3]);
      tf::Vector3 masterOrigin (masterPos[0], masterPos[1], masterPos[2]);
      tf::Transform masterTransform (masterRotation, masterOrigin);

      if (publishTf_)
      {
		//no need to publish tfs of markers
       // tf::StampedTransform camToMarker (t, image_msg->header.stamp, image_msg->header.frame_id, object[knownPatternCount].name);
      //  broadcaster_.sendTransform(camToMarker);

		tf::StampedTransform camToMaster (masterTransform, image_msg->header.stamp, image_msg->header.frame_id, "master");
        broadcaster_.sendTransform(camToMaster);

      }

      // **** publish visual marker

      if (publishVisualMarkers_)
      {
#if ROS_VERSION_MINIMUM(1, 9, 0)
        tf::Vector3 markerOrigin (0, 0, 0.25 * object[knownPatternCount].marker_width * AR_TO_ROS);
        tf::Transform m (tf::Quaternion::getIdentity (), markerOrigin);
        tf::Transform markerPose = t * m; // marker pose in the camera frame 
#else
// DEPRECATED: Fuerte support ends when Hydro is released
        btVector3 markerOrigin (0, 0, 0.25 * object[i].marker_width * AR_TO_ROS);
        btTransform m (btQuaternion::getIdentity (), markerOrigin);
        btTransform markerPose = t * m; // marker pose in the camera frame
#endif

		publishVisualMarker(knownPatternCount, markerPose, image_msg->header); 
	  }
  
	} //end outer loop of for 
    arMarkerPub_.publish(arPoseMarkers_);
  
  }
void ARMultiPublisher::getTransformationCallback(const sensor_msgs::ImageConstPtr & image_msg)
{
  // Get the image from ROSTOPIC
  // NOTE: the dataPtr format is BGR because the ARToolKit library was
  // build with V4L, dataPtr format change according to the
  // ARToolKit configure option (see config.h).
  try
  {
    capture_ = bridge_.imgMsgToCv(image_msg, "bgr8");
  }
  catch (sensor_msgs::CvBridgeException & e)
  {
    ROS_ERROR ("Could not convert from >%s< to 'bgr8'.", image_msg->encoding.c_str ());
  }
  // cvConvertImage(capture,capture,CV_CVTIMG_FLIP);
  ARUint8* data_ptr = (ARUint8 *)capture_->imageData;

  // detect the markers in the video frame
  if (arDetectMarker(data_ptr, threshold_, &marker_info_, &num_detected_marker_) < 0)
  {
    argCleanup();
    ROS_BREAK ();
  }
  ROS_DEBUG("Detected >%i< of >%i< markers.", num_detected_marker_, num_total_markers_);

  double error = 0.0;
  if ((error = arMultiGetTransMat(marker_info_, num_detected_marker_, multi_marker_config_)) < 0)
  {
    // ROS_ERROR("Could not get transformation. This should never happen.");
    ROS_WARN("Could not get transformation.");
    return;
  }
  ROS_DEBUG("Error is >%f<.", error);

  for (int i = 0; i < num_detected_marker_; i++)
  {
    ROS_DEBUG("multi_marker_config_->prevF: %i", multi_marker_config_->prevF);
    ROS_DEBUG("%s: (%i) pos: %f %f id: %i cf: %f", marker_frame_.c_str(), i, marker_info_[i].pos[0], marker_info_[i].pos[1], marker_info_[i].id, marker_info_[i].cf);
  }

  // choose those with the highest confidence
  std::vector<double> cfs(num_total_markers_, 0.0);
  marker_indizes_.clear();
  for (int i = 0; i < num_total_markers_; ++i)
  {
    marker_indizes_.push_back(-1);
  }
  for (int i = 0; i < num_total_markers_; ++i)
  {
    for (int j = 0; j < num_detected_marker_; j++)
    {
      if (!(marker_info_[j].id < 0))
      {
        if (marker_info_[j].cf > cfs[marker_info_[j].id])
        {
          cfs[marker_info_[j].id] = marker_info_[j].cf;
          marker_indizes_[marker_info_[j].id] = j;
        }
      }
    }
  }

  double ar_quat[4], ar_pos[3];
  arUtilMat2QuatPos(multi_marker_config_->trans, ar_quat, ar_pos);
  tf::Quaternion rotation(-ar_quat[0], -ar_quat[1], -ar_quat[2], ar_quat[3]);
  tf::Vector3 origin(ar_pos[0] * AR_TO_ROS, ar_pos[1] * AR_TO_ROS, ar_pos[2] * AR_TO_ROS);
  tf::Transform transform(rotation, origin);
  if (multi_marker_config_->prevF && publish_tf_)
  {
    if(error < error_threshold_)
    {
      ROS_DEBUG("%f %f %f | %f %f %f %f | %f", origin.getX(), origin.getY(), origin.getZ(), rotation.getX(), rotation.getY(), rotation.getZ(), rotation.getW(), image_msg->header.stamp.toSec());
      tf::StampedTransform cam_to_marker(transform, image_msg->header.stamp, camera_frame_, marker_frame_);
      tf_broadcaster_.sendTransform(cam_to_marker);
    }
    publishErrorMarker(error, image_msg->header.stamp);
  }

  if(publish_visual_markers_)
  {
    for (int i = 0; i < num_total_markers_; i++)
    {
      if (marker_indizes_[i] >= 0)
      {
          tf::Transform marker_transform;
          getTransform(i, marker_transform);
          tf::Transform marker = transform * marker_transform;
          publishMarker(i, marker, image_msg->header.stamp);
          last_transform_ = marker;
      }
      // else
      // {
      //     publishMarker(i, last_transform_, image_msg->header.stamp);
      // }
    }
  }


}