예제 #1
0
static carmen_orientation_3D_t 
get_orientation_car_reference_from_message(tf::Quaternion xsens_reading)
{
	tf::StampedTransform xsens_to_car;
	transformer.lookupTransform(xsens_tf_name, car_tf_name, tf::Time(0), xsens_to_car);

	tf::Transform xsens_matrix;
	xsens_matrix.setOrigin(tf::Vector3(0.0, 0.0, 0.0));
	xsens_matrix.setRotation(xsens_reading);
	
	tf::Transform car_pose = xsens_matrix * xsens_to_car;
	
	carmen_orientation_3D_t orientation = get_carmen_orientation_from_tf_transform(car_pose);

	return orientation;
}
예제 #2
0
static carmen_vector_3D_t 
get_car_position_from_message(carmen_xsens_xyz_message *xsens_xyz_message)
{
	tf::StampedTransform xsens_to_car;
	transformer.lookupTransform(xsens_tf_name, car_tf_name, tf::Time(0), xsens_to_car);
	
	tf::Transform global_to_xsens;
	global_to_xsens.setOrigin(carmen_vector3_to_tf_vector3(xsens_xyz_message->position));
	global_to_xsens.setRotation(carmen_quaternion_to_tf_quaternion(xsens_xyz_message->quat));
	
	tf::Transform global_to_car = global_to_xsens * xsens_to_car;

	carmen_vector_3D_t car_position = tf_vector3_to_carmen_vector3(global_to_car.getOrigin());
	
	return car_position;
}
void 
initialize_pose_6d_transformation_matrix()
{
	visual_odometry_pose_6d_transformation_matrix = Matrix::eye(4);

	tf::Transform board_to_camera_pose_g;
	tf::Transform car_to_board_pose;
	tf::Transform world_to_car_pose_g;
	tf::Transform camera_to_visual_odometry_transform;

	// initial car pose with respect to the world
	world_to_car_pose_g.setOrigin(tf::Vector3(car_pose_g.position.x, car_pose_g.position.y, car_pose_g.position.z));
	world_to_car_pose_g.setRotation(tf::Quaternion(car_pose_g.orientation.yaw, car_pose_g.orientation.pitch, car_pose_g.orientation.roll));
	tf::StampedTransform world_to_car_transform(world_to_car_pose_g, tf::Time(0), "/world", "/car");
	transformer.setTransform(world_to_car_transform, "world_to_car_transform");

	// board pose with respect to the car
	car_to_board_pose.setOrigin(tf::Vector3(sensor_board_pose_g.position.x, sensor_board_pose_g.position.y, sensor_board_pose_g.position.z));
	car_to_board_pose.setRotation(tf::Quaternion(sensor_board_pose_g.orientation.yaw, sensor_board_pose_g.orientation.pitch, sensor_board_pose_g.orientation.roll)); 				// yaw, pitch, roll
	tf::StampedTransform car_to_board_transform(car_to_board_pose, tf::Time(0), "/car", "/board");
	transformer.setTransform(car_to_board_transform, "car_to_board_transform");

	// camera pose with respect to the board
	board_to_camera_pose_g.setOrigin(tf::Vector3(camera_pose_g.position.x, camera_pose_g.position.y, camera_pose_g.position.z));
	board_to_camera_pose_g.setRotation(tf::Quaternion(camera_pose_g.orientation.yaw, camera_pose_g.orientation.pitch, camera_pose_g.orientation.roll)); 				// yaw, pitch, roll
	tf::StampedTransform board_to_camera_transform(board_to_camera_pose_g, tf::Time(0), "/board", "/camera");
	transformer.setTransform(board_to_camera_transform, "board_to_camera_transform");

	/**
	 * visual odometry pose with respect to the camera
	 * (the rotation comes from parent coordinate system (camera) to
	 * the child coordinate system (visual_odometry)) following yaw,
	 * pitch, roll angles order).
	 */
	camera_to_visual_odometry_transform.setOrigin(tf::Vector3(0.0, 0.0, 0.0)); // x, y, z;
	camera_to_visual_odometry_transform.setRotation(tf::Quaternion(-M_PI / 2.0, 0.0, -M_PI / 2.0));	// yaw, pitch, roll
	tf::StampedTransform camera_to_visual_odometry_stamped_transform(camera_to_visual_odometry_transform, tf::Time(0), "/camera", "/visual_odometry"); 	// create a time stamped transformation that defines the visual odometry position with respect to the camera
	transformer.setTransform(camera_to_visual_odometry_stamped_transform, "camera_to_visual_odometry_transform"); // add a link to the tf tree with the camera_to_visual_odometry_transform stamped transformation

	// get the transformation between the visual odometry coordinate system with respect to the carmen coordinate system.
	transformer.lookupTransform("/car", "/visual_odometry", tf::Time(0), g_car_to_visual_odometry_transform);
}
예제 #4
0
carmen_pose_3D_t
get_velodyne_pose_in_relation_to_car_helper(int argc, char** argv)
{	
	static int initialized = 0;

	if(initialized == 0)
	{
		initialize_carmen_parameters(argc, argv);
		initialize_transformations();

		initialized = 1;
	}

	tf::StampedTransform car_to_velodyne;
	transformer.lookupTransform(car_tf_name, velodyne_tf_name, tf::Time(0), car_to_velodyne);
	
	carmen_pose_3D_t velodyne_pose = tf_transform_to_carmen_pose_3D(car_to_velodyne);

	return velodyne_pose;
}
예제 #5
0
static carmen_vector_3D_t 
get_car_position_from_message(carmen_gps_xyz_message *gps_xyz_message)
{
	tf::StampedTransform gps_to_car;
	transformer.lookupTransform(gps_tf_name, car_tf_name, tf::Time(0), gps_to_car);
	
	carmen_vector_3D_t gps_position;
	gps_position.x = gps_xyz_message->x;
	gps_position.y = gps_xyz_message->y;
	gps_position.z = gps_xyz_message->z;

	tf::Transform global_to_gps;
	global_to_gps.setOrigin(carmen_vector3_to_tf_vector3(gps_position));
	tf::Quaternion zero_quat(0.0, 0.0, 0.0);
	global_to_gps.setRotation(zero_quat);
	
	tf::Transform global_to_car = global_to_gps * gps_to_car;

	carmen_vector_3D_t car_position = tf_vector3_to_carmen_vector3(global_to_car.getOrigin());
	
	return car_position;
}
예제 #6
0
bool projectRayToGround(const tf::Transformer& listener,
      const tf::Stamped<tf::Point> camera_ray,
      Eigen::Vector4d ground_plane, std::string ground_frame,
      tf::Stamped<tf::Point>* world_point)
{
    tf::StampedTransform camera_transform;
    // This is a static link, so Time(0) should be fine
    if (!listener.canTransform(ground_frame, camera_ray.frame_id_, camera_ray.stamp_)) {
        ROS_INFO("Couldn't transform %s to %s\n",camera_ray.frame_id_.c_str(),
                ground_frame.c_str());
        return false;
    }
    listener.lookupTransform(ground_frame, camera_ray.frame_id_,ros::Time(0),camera_transform);
    tf::Stamped<tf::Point> ground_frame_ray;
    listener.transformVector(ground_frame, camera_ray, ground_frame_ray);
    Eigen::Vector3d ray, ray_origin;
    tf::vectorTFToEigen(ground_frame_ray, ray);
    tf::vectorTFToEigen(camera_transform.getOrigin(), ray_origin);
    Eigen::Vector3d ground_v = intersectRayPlane(ray, ray_origin, ground_plane); 
    tf::vectorEigenToTF(ground_v, *world_point);
    world_point->frame_id_ = ground_frame;
    world_point->stamp_ = camera_ray.stamp_;
    return true;
}
예제 #7
0
  void LaserProjection::transformLaserScanToPointCloud_ (const std::string &target_frame, 
                                                        const sensor_msgs::LaserScan &scan_in,
                                                        sensor_msgs::PointCloud2 &cloud_out, 
                                                        tf::Transformer &tf, 
                                                        double range_cutoff,
                                                        int channel_options)
  {
    //check if the user has requested the index field
    bool requested_index = false;
    if ((channel_options & channel_option::Index))
      requested_index = true;

    //we'll enforce that we get index values for the laser scan so that we
    //ensure that we use the correct timestamps
    channel_options |= channel_option::Index;

    projectLaser_(scan_in, cloud_out, -1.0, channel_options);

    //we'll assume no associated viewpoint by default
    bool has_viewpoint = false;
    uint32_t vp_x_offset = 0;

    //we need to find the offset of the intensity field in the point cloud
    //we also know that the index field is guaranteed to exist since we 
    //set the channel option above. To be really safe, it might be worth
    //putting in a check at some point, but I'm just going to put in an
    //assert for now
    uint32_t index_offset = 0;
    for(unsigned int i = 0; i < cloud_out.fields.size(); ++i)
    {
      if(cloud_out.fields[i].name == "index")
      {
        index_offset = cloud_out.fields[i].offset;
      }

      //we want to check if the cloud has a viewpoint associated with it
      //checking vp_x should be sufficient since vp_x, vp_y, and vp_z all
      //get put in together
      if(cloud_out.fields[i].name == "vp_x")
      {
        has_viewpoint = true;
        vp_x_offset = cloud_out.fields[i].offset;
      }
    }

    ROS_ASSERT(index_offset > 0);

    cloud_out.header.frame_id = target_frame;

    // Extract transforms for the beginning and end of the laser scan
    ros::Time start_time = scan_in.header.stamp;
    ros::Time end_time   = scan_in.header.stamp + ros::Duration ().fromSec (scan_in.ranges.size () * scan_in.time_increment);

    tf::StampedTransform start_transform, end_transform, cur_transform ;

    tf.lookupTransform (target_frame, scan_in.header.frame_id, start_time, start_transform);
    tf.lookupTransform (target_frame, scan_in.header.frame_id, end_time, end_transform);

    double ranges_norm = 1 / ((double) scan_in.ranges.size () - 1.0);

    //we want to loop through all the points in the cloud
    for(size_t i = 0; i < cloud_out.width; ++i)
    {
      // Apply the transform to the current point
      float *pstep = (float*)&cloud_out.data[i * cloud_out.point_step + 0];

      //find the index of the point
      uint32_t pt_index;
      memcpy(&pt_index, &cloud_out.data[i * cloud_out.point_step + index_offset], sizeof(uint32_t));
      
      // Assume constant motion during the laser-scan, and use slerp to compute intermediate transforms
      tfScalar ratio = pt_index * ranges_norm;

      //! \todo Make a function that performs both the slerp and linear interpolation needed to interpolate a Full Transform (Quaternion + Vector)
      // Interpolate translation
      tf::Vector3 v (0, 0, 0);
      v.setInterpolate3 (start_transform.getOrigin (), end_transform.getOrigin (), ratio);
      cur_transform.setOrigin (v);

      // Interpolate rotation
      tf::Quaternion q1, q2;
      start_transform.getBasis ().getRotation (q1);
      end_transform.getBasis ().getRotation (q2);

      // Compute the slerp-ed rotation
      cur_transform.setRotation (slerp (q1, q2 , ratio));

      tf::Vector3 point_in (pstep[0], pstep[1], pstep[2]);
      tf::Vector3 point_out = cur_transform * point_in;

      // Copy transformed point into cloud
      pstep[0] = point_out.x ();
      pstep[1] = point_out.y ();
      pstep[2] = point_out.z ();

      // Convert the viewpoint as well
      if(has_viewpoint)
      {
        float *vpstep = (float*)&cloud_out.data[i * cloud_out.point_step + vp_x_offset];
        point_in = tf::Vector3 (vpstep[0], vpstep[1], vpstep[2]);
        point_out = cur_transform * point_in;

        // Copy transformed point into cloud
        vpstep[0] = point_out.x ();
        vpstep[1] = point_out.y ();
        vpstep[2] = point_out.z ();
      }
    }

    //if the user didn't request the index field, then we need to copy the PointCloud and drop it
    if(!requested_index)
    {
      sensor_msgs::PointCloud2 cloud_without_index;

      //copy basic meta data
      cloud_without_index.header = cloud_out.header;
      cloud_without_index.width = cloud_out.width;
      cloud_without_index.height = cloud_out.height;
      cloud_without_index.is_bigendian = cloud_out.is_bigendian;
      cloud_without_index.is_dense = cloud_out.is_dense;

      //copy the fields
      cloud_without_index.fields.resize(cloud_out.fields.size());
      unsigned int field_count = 0;
      unsigned int offset_shift = 0;
      for(unsigned int i = 0; i < cloud_out.fields.size(); ++i)
      {
        if(cloud_out.fields[i].name != "index")
        {
          cloud_without_index.fields[field_count] = cloud_out.fields[i];
          cloud_without_index.fields[field_count].offset -= offset_shift;
          ++field_count;
        }
        else
        {
          //once we hit the index, we'll set the shift
          offset_shift = 4;
        }
      }

      //resize the fields
      cloud_without_index.fields.resize(field_count);

      //compute the size of the new data
      cloud_without_index.point_step = cloud_out.point_step - offset_shift;
      cloud_without_index.row_step   = cloud_without_index.point_step * cloud_without_index.width;
      cloud_without_index.data.resize (cloud_without_index.row_step   * cloud_without_index.height);

      uint32_t i = 0;
      uint32_t j = 0;
      //copy over the data from one cloud to the other
      while (i < cloud_out.data.size())
      {
        if((i % cloud_out.point_step) < index_offset || (i % cloud_out.point_step) >= (index_offset + 4))
        {
          cloud_without_index.data[j++] = cloud_out.data[i];
        }
        i++;
      }

      //make sure to actually set the output
      cloud_out = cloud_without_index;
    }
  }
예제 #8
0
  void
    LaserProjection::transformLaserScanToPointCloud_ (const std::string &target_frame, sensor_msgs::PointCloud &cloud_out, const sensor_msgs::LaserScan &scan_in,
                                                     tf::Transformer& tf, double range_cutoff, int mask)
  {
    cloud_out.header = scan_in.header;

    tf::Stamped<tf::Point> pointIn;
    tf::Stamped<tf::Point> pointOut;

    //check if the user has requested the index field
    bool requested_index = false;
    if ((mask & channel_option::Index))
      requested_index = true;

    //we need to make sure that we include the index in our mask
    //in order to guarantee that we get our timestamps right
    mask |= channel_option::Index;

    pointIn.frame_id_ = scan_in.header.frame_id;

    projectLaser_ (scan_in, cloud_out, range_cutoff, false, mask);

    cloud_out.header.frame_id = target_frame;

    // Extract transforms for the beginning and end of the laser scan
    ros::Time start_time = scan_in.header.stamp ;
    ros::Time end_time   = scan_in.header.stamp + ros::Duration().fromSec(scan_in.ranges.size()*scan_in.time_increment) ;

    tf::StampedTransform start_transform ;
    tf::StampedTransform end_transform ;
    tf::StampedTransform cur_transform ;

    tf.lookupTransform(target_frame, scan_in.header.frame_id, start_time, start_transform) ;
    tf.lookupTransform(target_frame, scan_in.header.frame_id, end_time, end_transform) ;

    //we need to find the index of the index channel
    int index_channel_idx = -1;
    for(unsigned int i = 0; i < cloud_out.channels.size(); ++i)
    {
      if(cloud_out.channels[i].name == "index")
      {
        index_channel_idx = i;
        break;
      }
    }

    //check just in case
    ROS_ASSERT(index_channel_idx >= 0);

    for(unsigned int i = 0; i < cloud_out.points.size(); ++i)
    {
      //get the index for this point
      uint32_t pt_index = cloud_out.channels[index_channel_idx].values[i];

      // Instead, assume constant motion during the laser-scan, and use slerp to compute intermediate transforms
      tfScalar ratio = pt_index / ( (double) scan_in.ranges.size() - 1.0) ;

      //! \todo Make a function that performs both the slerp and linear interpolation needed to interpolate a Full Transform (Quaternion + Vector)

      //Interpolate translation
      tf::Vector3 v (0, 0, 0);
      v.setInterpolate3(start_transform.getOrigin(), end_transform.getOrigin(), ratio) ;
      cur_transform.setOrigin(v) ;

      //Interpolate rotation
      tf::Quaternion q1, q2 ;
      start_transform.getBasis().getRotation(q1) ;
      end_transform.getBasis().getRotation(q2) ;

      // Compute the slerp-ed rotation
      cur_transform.setRotation( slerp( q1, q2 , ratio) ) ;

      // Apply the transform to the current point
      tf::Vector3 pointIn(cloud_out.points[i].x, cloud_out.points[i].y, cloud_out.points[i].z) ;
      tf::Vector3 pointOut = cur_transform * pointIn ;

      // Copy transformed point into cloud
      cloud_out.points[i].x  = pointOut.x();
      cloud_out.points[i].y  = pointOut.y();
      cloud_out.points[i].z  = pointOut.z();
    }

    //if the user didn't request the index, we want to remove it from the channels
    if(!requested_index)
      cloud_out.channels.erase(cloud_out.channels.begin() + index_channel_idx);
  }
예제 #9
0
static void
ultrasonic_sensor_message_handler(carmen_ultrasonic_sonar_sensor_message *message)
{

	tf::Transform world_to_car_pose;
	double yaw, pitch, roll;

	if (!grid_mapping_initialized)
		return;
	
	int i;

	carmen_visual_odometry_pose6d_message odometry_message;
	odometry_message = interpolator.InterpolateMessages(message);

	Xt.x = odometry_message.pose_6d.x;
	Xt.y = odometry_message.pose_6d.y;
	Xt.theta = odometry_message.pose_6d.yaw;

	carmen_update_cells_below_robot(&carmen_map, Xt);

	world_to_car_pose.setOrigin(tf::Vector3(odometry_message.pose_6d.x, odometry_message.pose_6d.y, odometry_message.pose_6d.z));
	world_to_car_pose.setRotation(tf::Quaternion(odometry_message.pose_6d.yaw, odometry_message.pose_6d.pitch, odometry_message.pose_6d.roll));
	tf::StampedTransform world_to_car_transform(world_to_car_pose, tf::Time(0), "/world", "/car");
	tf_transformer.setTransform(world_to_car_transform, "world_to_car_transform");

	//SENSOR R1 - FRONTAL
	tf::StampedTransform world_to_ultrasonic_sensor_r1;
	tf_transformer.lookupTransform("/world", "/ultrasonic_sensor_r1", tf::Time(0), world_to_ultrasonic_sensor_r1);
	Xt_r1.x = world_to_ultrasonic_sensor_r1.getOrigin().x();
	Xt_r1.y = world_to_ultrasonic_sensor_r1.getOrigin().y();
	tf::Matrix3x3(world_to_ultrasonic_sensor_r1.getRotation()).getEulerYPR(yaw, pitch, roll);
	Xt_r1.theta = yaw;

	double range[180];

	for (i=0 ; i<180 ; i++)
		range[i] = (double) message->sensor[3];

	carmen_grid_mapping_update_grid_map(&carmen_map, Xt_r1, range, &laser_params);
	carmen_grid_mapping_publish_message(&carmen_map, message->timestamp);

	//SENSOR R2 - LATERAL FRONTAL
	tf::StampedTransform world_to_ultrasonic_sensor_r2;
	tf_transformer.lookupTransform("/world", "/ultrasonic_sensor_r2", tf::Time(0), world_to_ultrasonic_sensor_r2);
	Xt_r2.x = world_to_ultrasonic_sensor_r2.getOrigin().x();
	Xt_r2.y = world_to_ultrasonic_sensor_r2.getOrigin().y();
	tf::Matrix3x3(world_to_ultrasonic_sensor_r2.getRotation()).getEulerYPR(yaw, pitch, roll);
	Xt_r2.theta = yaw;

	for (i=0 ; i<180 ; i++)
			range[i] = (double) message->sensor[2];

	carmen_grid_mapping_update_grid_map(&carmen_map, Xt_r2, range, &laser_params);
	carmen_grid_mapping_publish_message(&carmen_map, message->timestamp);

	//SENSOR L2 - LATERAL TRASEIRO
	tf::StampedTransform world_to_ultrasonic_sensor_l2;
	tf_transformer.lookupTransform("/world", "/ultrasonic_sensor_l2", tf::Time(0), world_to_ultrasonic_sensor_l2);
	Xt_l2.x = world_to_ultrasonic_sensor_l2.getOrigin().x();
	Xt_l2.y = world_to_ultrasonic_sensor_l2.getOrigin().y();
	tf::Matrix3x3(world_to_ultrasonic_sensor_l2.getRotation()).getEulerYPR(yaw, pitch, roll);
	Xt_l2.theta = yaw;

	for (i=0 ; i<180 ; i++)
			range[i] = (double) message->sensor[1];

	carmen_grid_mapping_update_grid_map(&carmen_map, Xt_l2, range, &laser_params);
	carmen_grid_mapping_publish_message(&carmen_map, message->timestamp);

	//SENSOR L1 - TRASEIRO
	tf::StampedTransform world_to_ultrasonic_sensor_l1;
	tf_transformer.lookupTransform("/world", "/ultrasonic_sensor_l1", tf::Time(0), world_to_ultrasonic_sensor_l1);
	Xt_l1.x = world_to_ultrasonic_sensor_l1.getOrigin().x();
	Xt_l1.y = world_to_ultrasonic_sensor_l1.getOrigin().y();
	tf::Matrix3x3(world_to_ultrasonic_sensor_l1.getRotation()).getEulerYPR(yaw, pitch, roll);
	Xt_l1.theta = yaw;

	for (i=0 ; i<180 ; i++)
			range[i] = (double) message->sensor[0];

	carmen_grid_mapping_update_grid_map(&carmen_map, Xt_l1, range, &laser_params);
	carmen_grid_mapping_publish_message(&carmen_map, message->timestamp);


	/*static int count = 1;

	char mapname[256];
	sprintf(mapname, "map%d.jpg", count);

	printf("saving map...\n");
	carmen_graphics_write_map_as_jpeg(mapname, &carmen_map, 0);
	printf("saved map...\n");

	count++;*/

}