bool planning_environment::KinematicModelStateMonitor::allJointsUpdated(ros::Duration allowed_dur) const {
  current_joint_values_lock_.lock();
  bool ret = true;
  for(std::map<std::string, double>::const_iterator it = current_joint_state_map_.begin();
      it != current_joint_state_map_.end();
      it++) {
    if(last_joint_update_.find(it->first) == last_joint_update_.end()) {
      ROS_DEBUG_STREAM("Joint " << it->first << " not yet updated");
      ret = false;
      continue;
    }
    if(allowed_dur != ros::Duration()) {
      ros::Duration dur = ros::Time::now()-last_joint_update_.find(it->first)->second; 
      if(dur > allowed_dur) {
        ROS_DEBUG_STREAM("Joint " << it->first << " last updated " << dur.toSec() << " where allowed duration is " << allowed_dur.toSec());
        ret = false;
        continue;
      }
    }
  }
  current_joint_values_lock_.unlock();
  return ret;
}
コード例 #2
0
 void read(ros::Time time, ros::Duration period)
 {
   for(int j=0; j < n_joints_; ++j)
   {
     joint_position_prev_[j] = joint_position_[j];
     joint_position_[j] += angles::shortest_angular_distance(joint_position_[j],
                             sim_joints_[j]->GetAngle(0).Radian());
     joint_position_kdl_(j) = joint_position_[j];
     // derivate velocity as in the real hardware instead of reading it from simulation
     joint_velocity_[j] = filters::exponentialSmoothing((joint_position_[j] - joint_position_prev_[j])/period.toSec(), joint_velocity_[j], 0.2);
     joint_effort_[j] = sim_joints_[j]->GetForce((int)(0));
     joint_stiffness_[j] = joint_stiffness_command_[j];
   }
 }
コード例 #3
0
int main(int argc, char **argv)
{
    const double radius = 0.5; // 0.5
    const double pitch_step = 0.2;
    const int circle_points = 6; // 6
    const int circle_rings = 3; // 3
    const double plan_time = 5;
    const int plan_retry = 1;
    //const int pose_id_max = circle_points * 3;
    const ros::Duration wait_settle(1.0);
    const ros::Duration wait_camera(0.5);
    const ros::Duration wait_notreached(3.0);
    const ros::Duration wait_reset(30.0);

    const std::string planning_group = "bumblebee"; //robot, bumblebee
    const std::string end_effector = "tool_flange"; // bumblebee_cam1, tool_flange

    const std::string plannerId = "PRMkConfigDefault";
    const std::string frame_id = "base_link";

    ros::init (argc, argv, "simple_pose_mission");
    ros::AsyncSpinner spinner(1);
    spinner.start();
    ros::NodeHandle node_handle;
    ros::Publisher cameratate_pub = node_handle.advertise<group4_msgs::PointCloudPose>("/robot_rx60b/camerapose", 5);
    ros::Publisher markerPublisher = node_handle.advertise<visualization_msgs::MarkerArray>("/simple_pose_mission/poses", 10);

    tf::TransformListener listener;

    // Marker
    MarkerPose marker("object_center");
    addObjectCenterMarker(marker);

    // Robot and scene
    moveit::planning_interface::MoveGroup group(planning_group);
    group.setPlannerId(plannerId);
    group.setPoseReferenceFrame(frame_id);
    //group.setWorkspace(-1.5,-1.5,1.5,1.5,0,1);
    group.setGoalTolerance(0.01f);
    group.setPlanningTime(plan_time);

    group4_msgs::PointCloudPose msg;
    msg.header.frame_id = frame_id;

    tf::Pose object_center = marker.getPose("object_center");
    NumberedPoses poses = generatePoses(object_center, circle_points, circle_rings, pitch_step, radius);
    NumberedPoses::iterator poses_itt;
    bool poses_updated = true;


    while (ros::ok())
    {
        bool success;

        if (poses_updated || poses_itt == poses.end())
        {
            poses_itt = poses.begin();
            poses_updated = false;
        }

        NumberedPose numbered_pose = *poses_itt;

        visualizePoses(poses, frame_id,markerPublisher, numbered_pose.pose_id);


        ROS_INFO("Moving to pose %i of %i", numbered_pose.pose_id, numbered_pose.pose_id_max);



        tf::StampedTransform transform;
        try{
          listener.lookupTransform("/bumblebee_cam1", "/tool_flange",
                                   ros::Time(0), transform);
        }
        catch (tf::TransformException ex){
          ROS_ERROR("%s",ex.what());
        }

        tf::Pose p = numbered_pose.pose_tf * transform;
        geometry_msgs::Pose desired_pose = tfPoseToGeometryPose(p);
        group.setPoseTarget(desired_pose, end_effector);

        success = false;
        for (int itry = 0; itry < plan_retry; itry++)
        {

            group.setStartStateToCurrentState();
            wait_notreached.sleep();
            success = group.move();
            if (success)
                break;
            else
                ROS_INFO("Plan unsuccessfull.. Retrying! (%i)", itry);
        }

        if (true)
        {

            wait_settle.sleep();

            geometry_msgs::PoseStamped carmine_pose = group.getCurrentPose("camera_link");
            geometry_msgs::PoseStamped bumblebee_pose_left = group.getCurrentPose("bumblebee_cam1");
            geometry_msgs::PoseStamped bumblebee_pose_right = group.getCurrentPose("bumblebee_cam2");

            msg.header.stamp = ros::Time::now();
            msg.pose_id.data = numbered_pose.pose_id;
            msg.pose_id_max.data = numbered_pose.pose_id_max;
            msg.spin_center_pose = tfPoseToGeometryPose(object_center);
            msg.carmine_pose = carmine_pose.pose;
            msg.bumblebee_pose_left = bumblebee_pose_left.pose;
            msg.bumblebee_pose_right = bumblebee_pose_right.pose;

            cameratate_pub.publish(msg);

            ROS_INFO("Waiting %f seconds for camera.", wait_camera.toSec());
            wait_camera.sleep();
        }

        if (poses_itt == poses.end())
        {
            ROS_INFO("Last pose reached. Waiting %f seconds before resetting", wait_reset.toSec());
            poses_itt = poses.begin();
            wait_reset.sleep();
        }
        else
            poses_itt++;

    }
    group.stop();
    return 0;
}
コード例 #4
0
void JointPositionController::update(const ros::Time& time, const ros::Duration& period)
{
  command_struct_ = *(command_.readFromRT());
  double command_position = command_struct_.position_;
  double command_velocity = command_struct_.velocity_;
  bool has_velocity_ =  command_struct_.has_velocity_;

  double error, vel_error;
  double commanded_effort;

  double current_position = joint_.getPosition();

  // Make sure joint is within limits if applicable
  enforceJointLimits(command_position);

  // Compute position error
  if (joint_urdf_->type == urdf::Joint::REVOLUTE)
  {
   angles::shortest_angular_distance_with_limits(
      current_position,
      command_position,
      joint_urdf_->limits->lower,
      joint_urdf_->limits->upper,
      error);
  }
  else if (joint_urdf_->type == urdf::Joint::CONTINUOUS)
  {
    error = angles::shortest_angular_distance(current_position, command_position);
  }
  else //prismatic
  {
    error = command_position - current_position;
  }

  // Decide which of the two PID computeCommand() methods to call
  if (has_velocity_)
  {
    // Compute velocity error if a non-zero velocity command was given
    vel_error = command_velocity - joint_.getVelocity();

    // Set the PID error and compute the PID command with nonuniform
    // time step size. This also allows the user to pass in a precomputed derivative error.
    commanded_effort = pid_controller_.computeCommand(error, vel_error, period);
  }
  else
  {
    // Set the PID error and compute the PID command with nonuniform
    // time step size.
    commanded_effort = pid_controller_.computeCommand(error, period);
  }

  joint_.setCommand(commanded_effort);

  // publish state
  if (loop_count_ % 10 == 0)
  {
    if(controller_state_publisher_ && controller_state_publisher_->trylock())
    {
      controller_state_publisher_->msg_.header.stamp = time;
      controller_state_publisher_->msg_.set_point = command_position;
      controller_state_publisher_->msg_.process_value = current_position;
      controller_state_publisher_->msg_.process_value_dot = joint_.getVelocity();
      controller_state_publisher_->msg_.error = error;
      controller_state_publisher_->msg_.time_step = period.toSec();
      controller_state_publisher_->msg_.command = commanded_effort;

      double dummy;
      bool antiwindup;
      getGains(controller_state_publisher_->msg_.p,
        controller_state_publisher_->msg_.i,
        controller_state_publisher_->msg_.d,
        controller_state_publisher_->msg_.i_clamp,
        dummy,
        antiwindup);
      controller_state_publisher_->msg_.antiwindup = static_cast<char>(antiwindup);
      controller_state_publisher_->unlockAndPublish();
    }
  }
  loop_count_++;
}
コード例 #5
0
bool LWRHWreal::read(ros::Time time, ros::Duration period)
{
  // update the robot positions
  for (int j = 0; j < LBR_MNJ; j++)
  {
  	joint_position_prev_[j] = joint_position_[j];
    joint_position_[j] = device_->getMsrMsrJntPosition()[j];
    joint_effort_[j] = device_->getMsrJntTrq()[j];
    joint_velocity_[j] = filters::exponentialSmoothing((joint_position_[j]-joint_position_prev_[j])/period.toSec(), joint_velocity_[j], 0.2);
    joint_stiffness_[j] = joint_stiffness_command_[j];
  }
  
  //this->device_->interface->doDataExchange();

  return true;
}
コード例 #6
0
	// read 'measurement' joint values
	void PanTiltHW::read(ros::Time time, ros::Duration period)
	{
		client_->Receive();
		
		for(int i=0; i<n_joints_;i++)
		{
			joint_position_[i] = client_->drives[i].state.position;
			joint_position_prev_[i] = joint_position_[i];
		    joint_velocity_[i] = filters::exponentialSmoothing((joint_position_[i] - joint_position_prev_[i])/period.toSec(), joint_velocity_[i], 0.2);
		}
	}
コード例 #7
0
ファイル: amcl_node.cpp プロジェクト: Renda110/AGVC
void AmclNode::reconfigureCB(AMCLConfig &config, uint32_t level)
{
    boost::recursive_mutex::scoped_lock cfl(configuration_mutex_);

    //we don't want to do anything on the first call
    //which corresponds to startup
    if(first_reconfigure_call_)
    {
        first_reconfigure_call_ = false;
        default_config_ = config;
        return;
    }

    if(config.restore_defaults) {
        config = default_config_;
        //avoid looping
        config.restore_defaults = false;
    }

    d_thresh_ = config.update_min_d;
    a_thresh_ = config.update_min_a;

    resample_interval_ = config.resample_interval;

    laser_min_range_ = config.laser_min_range;
    laser_max_range_ = config.laser_max_range;

    gui_publish_period = ros::Duration(1.0/config.gui_publish_rate);
    save_pose_period = ros::Duration(1.0/config.save_pose_rate);

    transform_tolerance_.fromSec(config.transform_tolerance);

    max_beams_ = config.laser_max_beams;
    alpha1_ = config.odom_alpha1;
    alpha2_ = config.odom_alpha2;
    alpha3_ = config.odom_alpha3;
    alpha4_ = config.odom_alpha4;
    alpha5_ = config.odom_alpha5;

    z_hit_ = config.laser_z_hit;
    z_short_ = config.laser_z_short;
    z_max_ = config.laser_z_max;
    z_rand_ = config.laser_z_rand;
    sigma_hit_ = config.laser_sigma_hit;
    lambda_short_ = config.laser_lambda_short;
    laser_likelihood_max_dist_ = config.laser_likelihood_max_dist;

    if(config.laser_model_type == "beam")
        laser_model_type_ = LASER_MODEL_BEAM;
    else if(config.laser_model_type == "likelihood_field")
        laser_model_type_ = LASER_MODEL_LIKELIHOOD_FIELD;

    if(config.odom_model_type == "diff")
        odom_model_type_ = ODOM_MODEL_DIFF;
    else if(config.odom_model_type == "omni")
        odom_model_type_ = ODOM_MODEL_OMNI;
    else if(config.odom_model_type == "diff-corrected")
        odom_model_type_ = ODOM_MODEL_DIFF_CORRECTED;
    else if(config.odom_model_type == "omni-corrected")
        odom_model_type_ = ODOM_MODEL_OMNI_CORRECTED;

    if(config.min_particles > config.max_particles)
    {
        ROS_WARN("You've set min_particles to be less than max particles, this isn't allowed so they'll be set to be equal.");
        config.max_particles = config.min_particles;
    }

    min_particles_ = config.min_particles;
    max_particles_ = config.max_particles;
    alpha_slow_ = config.recovery_alpha_slow;
    alpha_fast_ = config.recovery_alpha_fast;
    tf_broadcast_ = config.tf_broadcast;

    pf_ = pf_alloc(min_particles_, max_particles_,
                   alpha_slow_, alpha_fast_,
                   (pf_init_model_fn_t)AmclNode::uniformPoseGenerator,
                   (void *)map_);
    pf_err_ = config.kld_err;
    pf_z_ = config.kld_z;
    pf_->pop_err = pf_err_;
    pf_->pop_z = pf_z_;

    // Initialize the filter
    pf_vector_t pf_init_pose_mean = pf_vector_zero();
    pf_init_pose_mean.v[0] = last_published_pose.pose.pose.position.x;
    pf_init_pose_mean.v[1] = last_published_pose.pose.pose.position.y;
    pf_init_pose_mean.v[2] = tf::getYaw(last_published_pose.pose.pose.orientation);
    pf_matrix_t pf_init_pose_cov = pf_matrix_zero();
    pf_init_pose_cov.m[0][0] = last_published_pose.pose.covariance[6*0+0];
    pf_init_pose_cov.m[1][1] = last_published_pose.pose.covariance[6*1+1];
    pf_init_pose_cov.m[2][2] = last_published_pose.pose.covariance[6*5+5];
    pf_init(pf_, pf_init_pose_mean, pf_init_pose_cov);
    pf_init_ = false;

    // Instantiate the sensor objects
    // Odometry
    delete odom_;
    odom_ = new AMCLOdom();
    ROS_ASSERT(odom_);
    odom_->SetModel( odom_model_type_, alpha1_, alpha2_, alpha3_, alpha4_, alpha5_ );
    // Laser
    delete laser_;
    laser_ = new AMCLLaser(max_beams_, map_);
    ROS_ASSERT(laser_);
    if(laser_model_type_ == LASER_MODEL_BEAM)
        laser_->SetModelBeam(z_hit_, z_short_, z_max_, z_rand_,
                             sigma_hit_, lambda_short_, 0.0);
    else
    {
        ROS_INFO("Initializing likelihood field model; this can take some time on large maps...");
        laser_->SetModelLikelihoodField(z_hit_, z_rand_, sigma_hit_,
                                        laser_likelihood_max_dist_);
        ROS_INFO("Done initializing likelihood field model.");
    }

    odom_frame_id_ = config.odom_frame_id;
    base_frame_id_ = config.base_frame_id;
    global_frame_id_ = config.global_frame_id;

    delete laser_scan_filter_;
    laser_scan_filter_ =
        new tf::MessageFilter<sensor_msgs::LaserScan>(*laser_scan_sub_,
                *tf_,
                odom_frame_id_,
                100);
    laser_scan_filter_->registerCallback(boost::bind(&AmclNode::laserReceived,
                                         this, _1));

    initial_pose_sub_ = nh_.subscribe("initialpose", 2, &AmclNode::initialPoseReceived, this);
}
コード例 #8
0
static void scan_callback(const sensor_msgs::PointCloud2::ConstPtr& input)
{
	if (map_loaded == 1 && init_pos_set == 1) {
		matching_start = std::chrono::system_clock::now();

		static tf::TransformBroadcaster br;
		tf::Transform transform;
		tf::Quaternion predict_q, ndt_q, current_q, control_q, localizer_q;

		pcl::PointXYZ p;
		pcl::PointCloud<pcl::PointXYZ> scan;

		current_scan_time = input->header.stamp;

		pcl::fromROSMsg(*input, scan);

		if(_localizer == "velodyne"){
			pcl::PointCloud<velodyne_pointcloud::PointXYZIR> tmp;
			pcl::fromROSMsg(*input, tmp);
			scan.points.clear();

			for (pcl::PointCloud<velodyne_pointcloud::PointXYZIR>::const_iterator item = tmp.begin(); item != tmp.end(); item++) {
				p.x = (double) item->x;
				p.y = (double) item->y;
				p.z = (double) item->z;
				if(item->ring >= min && item->ring <= max && item->ring % layer == 0 ){
					scan.points.push_back(p);
				}
			}
		}

		pcl::PointCloud<pcl::PointXYZ>::Ptr scan_ptr(new pcl::PointCloud<pcl::PointXYZ>(scan));
		pcl::PointCloud<pcl::PointXYZ>::Ptr filtered_scan_ptr(new pcl::PointCloud<pcl::PointXYZ>());

		Eigen::Matrix4f t(Eigen::Matrix4f::Identity()); // base_link
		Eigen::Matrix4f t2(Eigen::Matrix4f::Identity()); // localizer

		// Downsampling the velodyne scan using VoxelGrid filter
		pcl::VoxelGrid<pcl::PointXYZ> voxel_grid_filter;
		voxel_grid_filter.setLeafSize(voxel_leaf_size, voxel_leaf_size, voxel_leaf_size);
		voxel_grid_filter.setInputCloud(scan_ptr);
		voxel_grid_filter.filter(*filtered_scan_ptr);

		// Setting point cloud to be aligned.
		ndt.setInputSource(filtered_scan_ptr);

		// Guess the initial gross estimation of the transformation
		predict_pose.x = previous_pose.x + offset_x;
		predict_pose.y = previous_pose.y + offset_y;
		predict_pose.z = previous_pose.z + offset_z;
		predict_pose.roll = previous_pose.roll;
		predict_pose.pitch = previous_pose.pitch;
		predict_pose.yaw = previous_pose.yaw + offset_yaw;

		Eigen::Translation3f init_translation(predict_pose.x, predict_pose.y, predict_pose.z);
		Eigen::AngleAxisf init_rotation_x(predict_pose.roll, Eigen::Vector3f::UnitX());
		Eigen::AngleAxisf init_rotation_y(predict_pose.pitch, Eigen::Vector3f::UnitY());
		Eigen::AngleAxisf init_rotation_z(predict_pose.yaw, Eigen::Vector3f::UnitZ());
		Eigen::Matrix4f init_guess = (init_translation * init_rotation_z * init_rotation_y * init_rotation_x) * tf_btol;

		pcl::PointCloud<pcl::PointXYZ>::Ptr output_cloud(new pcl::PointCloud<pcl::PointXYZ>);
		ndt.align(*output_cloud, init_guess);

		t = ndt.getFinalTransformation(); // localizer
		t2 = t * tf_ltob; // base_link

		iteration = ndt.getFinalNumIteration();
		score = ndt.getFitnessScore();
		trans_probability = ndt.getTransformationProbability();

		tf::Matrix3x3 mat_l; // localizer
		mat_l.setValue(static_cast<double>(t(0, 0)), static_cast<double>(t(0, 1)), static_cast<double>(t(0, 2)),
				static_cast<double>(t(1, 0)), static_cast<double>(t(1, 1)), static_cast<double>(t(1, 2)),
				static_cast<double>(t(2, 0)), static_cast<double>(t(2, 1)), static_cast<double>(t(2, 2)));

		// Update ndt_pose
		localizer_pose.x = t(0, 3);
		localizer_pose.y = t(1, 3);
		localizer_pose.z = t(2, 3);
		mat_l.getRPY(localizer_pose.roll, localizer_pose.pitch, localizer_pose.yaw, 1);

		tf::Matrix3x3 mat_b; // base_link
		mat_b.setValue(static_cast<double>(t2(0, 0)), static_cast<double>(t2(0, 1)), static_cast<double>(t2(0, 2)),
				static_cast<double>(t2(1, 0)), static_cast<double>(t2(1, 1)), static_cast<double>(t2(1, 2)),
				static_cast<double>(t2(2, 0)), static_cast<double>(t2(2, 1)), static_cast<double>(t2(2, 2)));

		// Update ndt_pose
		ndt_pose.x = t2(0, 3);
		ndt_pose.y = t2(1, 3);
		ndt_pose.z = t2(2, 3);
		mat_b.getRPY(ndt_pose.roll, ndt_pose.pitch, ndt_pose.yaw, 1);

		// Compute the velocity
		scan_duration = current_scan_time - previous_scan_time;
		double secs = scan_duration.toSec();
		double distance = sqrt((ndt_pose.x - previous_pose.x) * (ndt_pose.x - previous_pose.x) +
				(ndt_pose.y - previous_pose.y) * (ndt_pose.y - previous_pose.y) +
				(ndt_pose.z - previous_pose.z) * (ndt_pose.z - previous_pose.z));

		predict_pose_error = sqrt((ndt_pose.x - predict_pose.x) * (ndt_pose.x - predict_pose.x) +
				(ndt_pose.y - predict_pose.y) * (ndt_pose.y - predict_pose.y) +
				(ndt_pose.z - predict_pose.z) * (ndt_pose.z - predict_pose.z));

		current_velocity = distance / secs;
		current_velocity_smooth = (current_velocity + previous_velocity + second_previous_velocity) / 3.0;
		if(current_velocity_smooth < 0.2){
			current_velocity_smooth = 0.0;
		}
		current_acceleration = (current_velocity - previous_velocity) / secs;

		estimated_vel_mps.data = current_velocity;
		estimated_vel_kmph.data = current_velocity * 3.6;

		estimated_vel_mps_pub.publish(estimated_vel_mps);
		estimated_vel_kmph_pub.publish(estimated_vel_kmph);

		if(predict_pose_error <= PREDICT_POSE_THRESHOLD){
			use_predict_pose = 0;
		}else{
			use_predict_pose = 1;
		}
		use_predict_pose = 0;

		if(use_predict_pose == 0){
			current_pose.x = ndt_pose.x;
			current_pose.y = ndt_pose.y;
			current_pose.z = ndt_pose.z;
			current_pose.roll = ndt_pose.roll;
			current_pose.pitch = ndt_pose.pitch;
			current_pose.yaw = ndt_pose.yaw;
		}else{
			current_pose.x = predict_pose.x;
			current_pose.y = predict_pose.y;
			current_pose.z = predict_pose.z;
			current_pose.roll = predict_pose.roll;
			current_pose.pitch = predict_pose.pitch;
			current_pose.yaw = predict_pose.yaw;
		}

		control_pose.roll = current_pose.roll;
		control_pose.pitch = current_pose.pitch;
		control_pose.yaw = current_pose.yaw - angle / 180.0 * M_PI;
		double theta = control_pose.yaw;
		control_pose.x = cos(theta) * (-control_shift_x) + sin(theta) * (-control_shift_y) + current_pose.x;
		control_pose.y = -sin(theta) * (-control_shift_x) + cos(theta) * (-control_shift_y) + current_pose.y;
		control_pose.z = current_pose.z - control_shift_z;

		// Set values for publishing pose
		predict_q.setRPY(predict_pose.roll, predict_pose.pitch, predict_pose.yaw);
		predict_pose_msg.header.frame_id = "/map";
		predict_pose_msg.header.stamp = current_scan_time;
		predict_pose_msg.pose.position.x = predict_pose.x;
		predict_pose_msg.pose.position.y = predict_pose.y;
		predict_pose_msg.pose.position.z = predict_pose.z;
		predict_pose_msg.pose.orientation.x = predict_q.x();
		predict_pose_msg.pose.orientation.y = predict_q.y();
		predict_pose_msg.pose.orientation.z = predict_q.z();
		predict_pose_msg.pose.orientation.w = predict_q.w();

		ndt_q.setRPY(ndt_pose.roll, ndt_pose.pitch, ndt_pose.yaw);
		ndt_pose_msg.header.frame_id = "/map";
		ndt_pose_msg.header.stamp = current_scan_time;
		ndt_pose_msg.pose.position.x = ndt_pose.x;
		ndt_pose_msg.pose.position.y = ndt_pose.y;
		ndt_pose_msg.pose.position.z = ndt_pose.z;
		ndt_pose_msg.pose.orientation.x = ndt_q.x();
		ndt_pose_msg.pose.orientation.y = ndt_q.y();
		ndt_pose_msg.pose.orientation.z = ndt_q.z();
		ndt_pose_msg.pose.orientation.w = ndt_q.w();

		current_q.setRPY(current_pose.roll, current_pose.pitch, current_pose.yaw);
		current_pose_msg.header.frame_id = "/map";
		current_pose_msg.header.stamp = current_scan_time;
		current_pose_msg.pose.position.x = current_pose.x;
		current_pose_msg.pose.position.y = current_pose.y;
		current_pose_msg.pose.position.z = current_pose.z;
		current_pose_msg.pose.orientation.x = current_q.x();
		current_pose_msg.pose.orientation.y = current_q.y();
		current_pose_msg.pose.orientation.z = current_q.z();
		current_pose_msg.pose.orientation.w = current_q.w();

		control_q.setRPY(control_pose.roll, control_pose.pitch, control_pose.yaw);
		control_pose_msg.header.frame_id = "/map";
		control_pose_msg.header.stamp = current_scan_time;
		control_pose_msg.pose.position.x = control_pose.x;
		control_pose_msg.pose.position.y = control_pose.y;
		control_pose_msg.pose.position.z = control_pose.z;
		control_pose_msg.pose.orientation.x = control_q.x();
		control_pose_msg.pose.orientation.y = control_q.y();
		control_pose_msg.pose.orientation.z = control_q.z();
		control_pose_msg.pose.orientation.w = control_q.w();

		localizer_q.setRPY(localizer_pose.roll, localizer_pose.pitch, localizer_pose.yaw);
		localizer_pose_msg.header.frame_id = "/map";
		localizer_pose_msg.header.stamp = current_scan_time;
		localizer_pose_msg.pose.position.x = localizer_pose.x;
		localizer_pose_msg.pose.position.y = localizer_pose.y;
		localizer_pose_msg.pose.position.z = localizer_pose.z;
		localizer_pose_msg.pose.orientation.x = localizer_q.x();
		localizer_pose_msg.pose.orientation.y = localizer_q.y();
		localizer_pose_msg.pose.orientation.z = localizer_q.z();
		localizer_pose_msg.pose.orientation.w = localizer_q.w();

		predict_pose_pub.publish(predict_pose_msg);
		ndt_pose_pub.publish(ndt_pose_msg);
		current_pose_pub.publish(current_pose_msg);
		control_pose_pub.publish(control_pose_msg);
		localizer_pose_pub.publish(localizer_pose_msg);

		// Send TF "/base_link" to "/map"
		transform.setOrigin(tf::Vector3(current_pose.x, current_pose.y, current_pose.z));
		transform.setRotation(current_q);
		br.sendTransform(tf::StampedTransform(transform, current_scan_time, "/map", "/base_link"));

		matching_end = std::chrono::system_clock::now();
		exe_time = std::chrono::duration_cast<std::chrono::microseconds>(matching_end-matching_start).count()/1000.0;
		time_ndt_matching.data = exe_time;
		time_ndt_matching_pub.publish(time_ndt_matching);

		// Set values for /estimate_twist
		angular_velocity = (current_pose.yaw - previous_pose.yaw) / secs;

		estimate_twist_msg.header.stamp = current_scan_time;
		estimate_twist_msg.twist.linear.x = current_velocity;
		estimate_twist_msg.twist.linear.y = 0.0;
		estimate_twist_msg.twist.linear.z = 0.0;
		estimate_twist_msg.twist.angular.x = 0.0;
		estimate_twist_msg.twist.angular.y = 0.0;
		estimate_twist_msg.twist.angular.z = angular_velocity;

		estimate_twist_pub.publish(estimate_twist_msg);

		geometry_msgs::Vector3Stamped estimate_vel_msg;
		estimate_vel_msg.header.stamp = current_scan_time;
		estimate_vel_msg.vector.x = current_velocity;
		estimated_vel_pub.publish(estimate_vel_msg);

		// Set values for /ndt_stat
		ndt_stat_msg.header.stamp = current_scan_time;
		ndt_stat_msg.exe_time = time_ndt_matching.data;
		ndt_stat_msg.iteration = iteration;
		ndt_stat_msg.score = score;
		ndt_stat_msg.velocity = current_velocity;
		ndt_stat_msg.acceleration = current_acceleration;
		ndt_stat_msg.use_predict_pose = 0;

		ndt_stat_pub.publish(ndt_stat_msg);

		/* Compute NDT_Reliability */
		ndt_reliability.data = Wa * (exe_time/100.0) * 100.0 + Wb * (iteration/10.0) * 100.0 + Wc * ((2.0-trans_probability)/2.0) * 100.0;
		ndt_reliability_pub.publish(ndt_reliability);

#ifdef OUTPUT
		// Output log.csv
		std::ofstream ofs_log("log.csv", std::ios::app);
		if (ofs_log == NULL) {
			std::cerr << "Could not open 'log.csv'." << std::endl;
			exit(1);
		}
		ofs_log << input->header.seq << ","
				<< step_size << ","
				<< trans_eps << ","
				<< voxel_leaf_size << ","
				<< current_pose.x << ","
				<< current_pose.y << ","
				<< current_pose.z << ","
				<< current_pose.roll << ","
				<< current_pose.pitch << ","
				<< current_pose.yaw << ","
				<< predict_pose.x << ","
				<< predict_pose.y << ","
				<< predict_pose.z << ","
				<< predict_pose.roll << ","
				<< predict_pose.pitch << ","
				<< predict_pose.yaw << ","
				<< current_pose.x - predict_pose.x << ","
				<< current_pose.y - predict_pose.y << ","
				<< current_pose.z - predict_pose.z << ","
				<< current_pose.roll - predict_pose.roll << ","
				<< current_pose.pitch - predict_pose.pitch << ","
				<< current_pose.yaw - predict_pose.yaw << ","
				<< predict_pose_error << ","
				<< iteration << ","
				<< score << ","
				<< trans_probability << ","
				<< ndt_reliability.data << ","
				<< current_velocity << ","
				<< current_velocity_smooth << ","
				<< current_acceleration << ","
				<< angular_velocity << ","
				<< time_ndt_matching.data << ","
				<< std::endl;
#endif

		std::cout << "-----------------------------------------------------------------" << std::endl;
		std::cout << "Sequence: " << input->header.seq << std::endl;
		std::cout << "Timestamp: " << input->header.stamp << std::endl;
		std::cout << "Frame ID: " << input->header.frame_id << std::endl;
		std::cout << "Number of Scan Points: " << scan_ptr->size() << " points." << std::endl;
		std::cout << "Number of Filtered Scan Points: " << filtered_scan_ptr->size() << " points." << std::endl;
		std::cout << "Leaf Size: " << voxel_leaf_size << std::endl;
		std::cout << "NDT has converged: " << ndt.hasConverged() << std::endl;
		std::cout << "Fitness Score: " << ndt.getFitnessScore() << std::endl;
		std::cout << "Transformation Probability: " << ndt.getTransformationProbability() << std::endl;
		std::cout << "Execution Time: " << exe_time << " ms." << std::endl;
		std::cout << "Number of Iterations: " << ndt.getFinalNumIteration() << std::endl;
		std::cout << "NDT Reliability: " << ndt_reliability.data << std::endl;
		std::cout << "(x,y,z,roll,pitch,yaw): " << std::endl;
		std::cout << "(" << current_pose.x << ", " << current_pose.y << ", " << current_pose.z
				<< ", " << current_pose.roll << ", " << current_pose.pitch << ", " << current_pose.yaw << ")" << std::endl;
		std::cout << "Transformation Matrix: " << std::endl;
		std::cout << t << std::endl;
		std::cout << "-----------------------------------------------------------------" << std::endl;

		// Update previous_***
		offset_x = current_pose.x - previous_pose.x;
		offset_y = current_pose.y - previous_pose.y;
		offset_z = current_pose.z - previous_pose.z;
		offset_yaw = current_pose.yaw - previous_pose.yaw;

		previous_pose.x = current_pose.x;
		previous_pose.y = current_pose.y;
		previous_pose.z = current_pose.z;
		previous_pose.roll = current_pose.roll;
		previous_pose.pitch = current_pose.pitch;
		previous_pose.yaw = current_pose.yaw;

		previous_scan_time.sec = current_scan_time.sec;
		previous_scan_time.nsec = current_scan_time.nsec;

		second_previous_velocity = previous_velocity;
		previous_velocity = current_velocity;
		previous_acceleration = current_acceleration;
		previous_estimated_vel_kmph.data = estimated_vel_kmph.data;
	}
}
void OmniDriveController::update(const ros::Time& time, const ros::Duration& period)
{
	//get the wheel velocity from the handle
	double wheel1_vel = wheel1_joint.getVelocity();
	double wheel2_vel = wheel2_joint.getVelocity();
	double wheel3_vel = wheel3_joint.getVelocity();

	// Estimate linear and angular velocity using joint information
			odometry_.update(wheel1_vel, wheel2_vel, wheel3_vel, time);

			// Publish odometry message
			if(last_state_publish_time_ + publish_period_ < time)
			{
				last_state_publish_time_ += publish_period_;
				// Compute and store orientation info
				const geometry_msgs::Quaternion orientation(
						tf::createQuaternionMsgFromYaw(odometry_.getHeading()));

				// Populate odom message and publish
				if(odom_pub_->trylock())
				{
					odom_pub_->msg_.header.stamp = time;
					odom_pub_->msg_.pose.pose.position.x = odometry_.getX();
					odom_pub_->msg_.pose.pose.position.y = odometry_.getY();
					odom_pub_->msg_.pose.pose.orientation = orientation;
					odom_pub_->msg_.twist.twist.linear.x  = odometry_.getLinearX();
					odom_pub_->msg_.twist.twist.linear.y  = odometry_.getLinearY();
					odom_pub_->msg_.twist.twist.angular.z = odometry_.getAngular();
					odom_pub_->unlockAndPublish();
				}

				// Publish tf /odom frame
				if (enable_odom_tf_ && tf_odom_pub_->trylock())
				{
					geometry_msgs::TransformStamped& odom_frame = tf_odom_pub_->msg_.transforms[0];
					odom_frame.header.stamp = time;
					odom_frame.transform.translation.x = odometry_.getX();
					odom_frame.transform.translation.y = odometry_.getY();
					odom_frame.transform.rotation = orientation;
					tf_odom_pub_->unlockAndPublish();
				}
			}

			// MOVE ROBOT
			// Retreive current velocity command and time step:
			Commands curr_cmd = *(command_.readFromRT());
			const double dt = (time - curr_cmd.stamp).toSec();

			// Brake if cmd_vel has timeout:
			if (dt > cmd_vel_timeout_)
			{
				//ROS_INFO("wocao nima");
				curr_cmd.linear_x = 0.0;
				curr_cmd.linear_y = 0.0;
				curr_cmd.angular = 0.0;
			}

			// Limit velocities and accelerations:
			const double cmd_dt(period.toSec());
			limiter_linear_x_.limit(curr_cmd.linear_x, last_cmd_.linear_x, cmd_dt);
			limiter_linear_y_.limit(curr_cmd.linear_y, last_cmd_.linear_y, cmd_dt);
			limiter_angular_. limit(curr_cmd.angular,  last_cmd_.angular,  cmd_dt);
			last_cmd_ = curr_cmd;

			// Compute wheels velocities:
			const double vth = curr_cmd.angular, vx = - curr_cmd.linear_x, vy = curr_cmd.linear_y;
			const double psi = 2 * 3.14159265 / 3;
			const double L = wheel_center_;


			const double wheel1_cmd  = (L*vth - vy) / wheel_radius_;
			const double wheel2_cmd  = (L*vth + vy*cos(psi/2) - vx*sin(psi/2)) / wheel_radius_;
			const double wheel3_cmd  = (L*vth + vy*cos(psi/2) + vx*sin(psi/2)) / wheel_radius_;

			ROS_INFO("send velocity is %f %f %f", wheel1_cmd,wheel2_cmd,wheel3_cmd);

			wheel1_joint.setCommand(wheel1_cmd);
			wheel2_joint.setCommand(wheel2_cmd);
			wheel3_joint.setCommand(wheel3_cmd);
}
void JointPositionController::update(const ros::Time &time, const ros::Duration &period) {
    ROS_DEBUG_STREAM("updating");
    
    // compute velocities and accelerations by hand just to be sure
    for (int i = 0; i < n_joints_; i++) {
        double current_position = joints_[i].getPosition();
        current_velocities_[i] = (current_position - previous_positions_[i]) / period.toSec();
        current_accelerations_[i] = (current_velocities_[i] - previous_velocities_[i]) / period.toSec();
        previous_positions_[i] = current_position;
        previous_velocities_[i] = current_velocities_[i];
    }
    
    if (new_reference_) {
        // Read the latest commanded trajectory message
        commanded_trajectory_ = *trajectory_command_buffer_.readFromRT();
        new_reference_ = false;
        
        for (int i = 0; i < n_joints_; i ++) {
            if (std::abs(commanded_trajectory_.positions[i] - last_commanded_trajectory_.positions[i]) > command_update_tolerance_) {
                must_recompute_trajectory_ = true;
                reached_reference_ = false;
                last_commanded_trajectory_ = commanded_trajectory_;
            }
        }
    }

    // Initialize RML result
    int rml_result = 0;
    
    ROS_DEBUG_STREAM("checking for having to recompute");


    // Compute RML traj after the start time and if there are still points in the queue
    if (must_recompute_trajectory_) {
        // Compute the trajectory
        ROS_WARN_STREAM("RML Recomputing trajectory...");

        // Update RML input parameters
        for (int i = 0; i < n_joints_; i++) {
            
            rml_in_->CurrentPositionVector->VecData[i] = joints_[i].getPosition();
            rml_in_->CurrentVelocityVector->VecData[i] = current_velocities_[i];
            rml_in_->CurrentAccelerationVector->VecData[i] = current_accelerations_[i];

            rml_in_->TargetPositionVector->VecData[i] = commanded_trajectory_.positions[i];
            rml_in_->TargetVelocityVector->VecData[i] = commanded_trajectory_.velocities[i];

            rml_in_->SelectionVector->VecData[i] = true;
        }
        
        
        ROS_DEBUG_STREAM("Current position: " << std::endl << *(rml_in_->CurrentPositionVector));
        ROS_DEBUG_STREAM("Target position: " << std::endl << *(rml_in_->TargetPositionVector));

        // Store the traj start time
        traj_start_time_ = time;

        // Set desired execution time for this trajectory (definitely > 0)
        rml_in_->SetMinimumSynchronizationTime(minimum_synchronization_time_);

//         ROS_DEBUG_STREAM("RML IN: time: " << rml_in_->GetMinimumSynchronizationTime());

        // Specify behavior after reaching point
        rml_flags_.BehaviorAfterFinalStateOfMotionIsReached = recompute_trajectory_ ? RMLPositionFlags::RECOMPUTE_TRAJECTORY : RMLPositionFlags::KEEP_TARGET_VELOCITY;
        rml_flags_.SynchronizationBehavior = RMLPositionFlags::ONLY_TIME_SYNCHRONIZATION;
        rml_flags_.KeepCurrentVelocityInCaseOfFallbackStrategy = true;

        // Compute trajectory
        rml_result = rml_->RMLPosition(*rml_in_.get(),
                                       rml_out_.get(),
                                       rml_flags_);

        // Disable recompute flag
        must_recompute_trajectory_ = false;
    } 
    
    // Sample the already computed trajectory
    rml_result = rml_->RMLPositionAtAGivenSampleTime(
                        (time - traj_start_time_ + period).toSec(),
                        rml_out_.get());

    // Determine if any of the joint tolerances have been violated
    for (int i = 0; i < n_joints_; i++) {
        double tracking_error = std::abs(rml_out_->NewPositionVector->VecData[i] - joints_[i].getPosition());

        if (tracking_error > position_tolerances_[i]) {
            must_recompute_trajectory_ = true;
            ROS_WARN_STREAM("Tracking for joint " << i << " outside of tolerance! (" << tracking_error 
                << " > " << position_tolerances_[i] << ")");
        }
    }

    // Compute command
    for (int i = 0; i < n_joints_; i++) {
        commanded_positions_[i] = rml_out_->NewPositionVector->VecData[i];
    }

    // Only set a different position command if the
    switch (rml_result) {
    case ReflexxesAPI::RML_WORKING:
        // S'all good.
        break;

    case ReflexxesAPI::RML_FINAL_STATE_REACHED:
        ROS_DEBUG_STREAM("final state reached");
        must_recompute_trajectory_ = recompute_trajectory_;
        reached_reference_ = true;
        break;

    default:
        if (loop_count_ % decimation_ == 0) {
            ROS_ERROR("Reflexxes error code: %d. Setting position commands to measured position.", rml_result);
        }

        for (int i = 0; i < n_joints_; i++)
            commanded_positions_[i] = joints_[i].getPosition();
        
        break;
    };

    // Set the lower-level commands
    if (!reached_reference_) {
        ROS_DEBUG_STREAM("setting command");
        for (int i = 0; i < n_joints_; i++) {
            joints_[i].setCommand(commanded_positions_[i]);
        }
    }

    // Publish state
    if (loop_count_ % decimation_ == 0) {
        /*
         *      boost::scoped_ptr<realtime_tools::RealtimePublisher<controllers_msgs::JointControllerState> >
         *        &state_pub = controller_state_publisher_;
         *
         *      for(int i=0; i<n_joints_; i++) {
         *        if(state_pub && state_pub->trylock()) {
         *          state_pub->msg_.header.stamp = time;
         *          state_pub->msg_.set_point = pos_target;
         *          state_pub->msg_.process_value = pos_actual;
         *          state_pub->msg_.process_value_dot = vel_actual;
         *          state_pub->msg_.error = pos_error;
         *          state_pub->msg_.time_step = period.toSec();
         *          state_pub->msg_.command = commanded_effort;
         *
         *          double dummy;
         *          pids_[i]->getGains(
         *              state_pub->msg_.p,
         *              state_pub->msg_.i,
         *              state_pub->msg_.d,
         *              state_pub->msg_.i_clamp,
         *              dummy);
         *          state_pub->unlockAndPublish();
        }
        }
        */
    }

    // Increment the loop count
    loop_count_++;
}
コード例 #11
0
    void YouBotUniversalController::updateJointVelocity(double setPoint, pr2_mechanism_model::JointState* joint_state_, control_toolbox::Pid* pid_controller_, const ros::Duration& dt, const int& jointIndex) {
        double error(0);
        assert(joint_state_->joint_);

        double command_ = setPoint;
        double velocity_ = joint_state_->velocity_;
        const double T = 1;
        filteredVelocity[jointIndex] = filteredVelocity[jointIndex] + (velocity_ - filteredVelocity[jointIndex]) * dt.toSec() / (dt.toSec() + T);
        error = filteredVelocity[jointIndex] - command_;
        ROS_DEBUG("Current velocity: %f, filtered velocity: %f, Error: %f\n", velocity_, filteredVelocity[jointIndex], error);

        double commanded_effort = pid_controller_ -> updatePid(error, dt);
        joint_state_->commanded_effort_ = commanded_effort;

    }
コード例 #12
0
void QuadrotorHardwareSim::readSim(ros::Time time, ros::Duration period)
{
  // call all available subscriber callbacks now
  callback_queue_.callAvailable();

  // read state from Gazebo
  const double acceleration_time_constant = 0.1;
  gz_pose_             =  link_->GetWorldPose();
  gz_acceleration_     = ((link_->GetWorldLinearVel() - gz_velocity_) + acceleration_time_constant * gz_acceleration_) / (period.toSec() + acceleration_time_constant);
  gz_velocity_         =  link_->GetWorldLinearVel();
  gz_angular_velocity_ =  link_->GetWorldAngularVel();

  if (!subscriber_state_) {
    header_.frame_id = "/world";
    header_.stamp = time;
    pose_.position.x = gz_pose_.pos.x;
    pose_.position.y = gz_pose_.pos.y;
    pose_.position.z = gz_pose_.pos.z;
    pose_.orientation.w = gz_pose_.rot.w;
    pose_.orientation.x = gz_pose_.rot.x;
    pose_.orientation.y = gz_pose_.rot.y;
    pose_.orientation.z = gz_pose_.rot.z;
    twist_.linear.x = gz_velocity_.x;
    twist_.linear.y = gz_velocity_.y;
    twist_.linear.z = gz_velocity_.z;
    twist_.angular.x = gz_angular_velocity_.x;
    twist_.angular.y = gz_angular_velocity_.y;
    twist_.angular.z = gz_angular_velocity_.z;
    acceleration_.x = gz_acceleration_.x;
    acceleration_.y = gz_acceleration_.y;
    acceleration_.z = gz_acceleration_.z;
  }

  if (!subscriber_imu_) {
    imu_.orientation.w = gz_pose_.rot.w;
    imu_.orientation.x = gz_pose_.rot.x;
    imu_.orientation.y = gz_pose_.rot.y;
    imu_.orientation.z = gz_pose_.rot.z;

    gazebo::math::Vector3 gz_angular_velocity_body = gz_pose_.rot.RotateVectorReverse(gz_angular_velocity_);
    imu_.angular_velocity.x = gz_angular_velocity_body.x;
    imu_.angular_velocity.y = gz_angular_velocity_body.y;
    imu_.angular_velocity.z = gz_angular_velocity_body.z;

    gazebo::math::Vector3 gz_linear_acceleration_body = gz_pose_.rot.RotateVectorReverse(gz_acceleration_ - physics_->GetGravity());
    imu_.linear_acceleration.x = gz_linear_acceleration_body.x;
    imu_.linear_acceleration.y = gz_linear_acceleration_body.y;
    imu_.linear_acceleration.z = gz_linear_acceleration_body.z;
  }
}
コード例 #13
0
    void ITRCartesianImpedanceController::update(const ros::Time& time, const ros::Duration& period)
    {
        // get current values
        //std::cout << "Update current values" << std::endl;
        KDL::Rotation cur_R(cart_handles_.at(0).getPosition(),
                            cart_handles_.at(1).getPosition(),
                            cart_handles_.at(2).getPosition(),
                            cart_handles_.at(4).getPosition(),
                            cart_handles_.at(5).getPosition(),
                            cart_handles_.at(6).getPosition(),
                            cart_handles_.at(8).getPosition(),
                            cart_handles_.at(9).getPosition(),
                            cart_handles_.at(10).getPosition());
        KDL::Vector cur_p(cart_handles_.at(3).getPosition(),
                            cart_handles_.at(7).getPosition(),
                            cart_handles_.at(11).getPosition());
        KDL::Frame cur_T( cur_R, cur_p );
        x_cur_ = cur_T;

        std::vector<double> cur_T_FRI;


        // Cartesian speed limit for new positions:

        // create quaternions for current and desired orientation, normalize them to get correct angular distance later!
        x_cur_quat_ = Eigen::Matrix3d(x_cur_.M.data).transpose();
        x_cur_quat_.normalize();
        x_des_quat_ = Eigen::Matrix3d(x_des_.M.data).transpose();
        x_des_quat_.normalize();
        x_set_quat_ = x_des_quat_;

        // get linear desired velocity
        x_dot_.vel = (x_des_.p-x_set_.p) / period.toSec();

        // limit linear velocity to desired position
        double x_dot_vel_norm;
        x_dot_vel_norm = x_dot_.vel.Norm();
        if (x_dot_vel_norm > max_trans_speed_) {
            x_dot_.vel = x_dot_.vel / x_dot_vel_norm * max_trans_speed_;
        }       

        if (cmd_flag_) {

            // Interpolate position
            x_set_.p = x_prev_.p + (x_dot_.vel * period.toSec());

            // Interpolate orientation with SLERP
            double slerp_ratio = max_rot_speed_ / (x_des_quat_.angularDistance(x_prev_quat_)) * period.toSec();
            slerp_ratio = std::min(slerp_ratio, 1.0);
            x_set_quat_ = x_prev_quat_.slerp(slerp_ratio, x_des_quat_);

            // convert quaternion to rot matrix
            x_set_quat_.normalize(); // need to be normalized to get right rotation matrix
            x_set_.M = KDL::Rotation::Quaternion(x_set_quat_.x(), x_set_quat_.y(), x_set_quat_.z(), x_set_quat_.w());
        }

        fromKDLtoFRI(x_set_, cur_T_FRI);
        // forward commands to hwi
        for(int c = 0; c < 30; ++c)
        {
            if(c < 12)
                cart_handles_.at(c).setCommand(cur_T_FRI.at(c));
            if(c > 11 && c < 18)
                cart_handles_.at(c).setCommand(k_des_[c-12]);
            if(c > 17 && c < 24)
                cart_handles_.at(c).setCommand(d_des_[c-18]);
            if(c > 23 && c < 30)
                cart_handles_.at(c).setCommand(f_des_[c-24]);
        }

        x_prev_ = x_set_;
        x_prev_quat_ = x_set_quat_;
    }
コード例 #14
0
void CassiopeiaHW::readEncoders(ros::Duration dt)
{
// read robots joint state
  // right card
  if (! dm6814_right.ReadEncoder6814(1, &encoder_1_val))
    ROS_ERROR("ERROR: ReadEncoder6814(1) FAILED");

  if (! dm6814_right.ReadEncoder6814(2, &encoder_2_val))
    ROS_ERROR("ERROR: ReadEncoder6814(2) FAILED");

  if (! dm6814_right.ReadEncoder6814(3, &encoder_3_val))
    ROS_ERROR("ERROR: ReadEncoder6814(3) FAILED");
  //left card
  if (! dm6814_left.ReadEncoder6814(1, &encoder_4_val))
    ROS_ERROR("ERROR: ReadEncoder6814(1) FAILED");

  if (! dm6814_left.ReadEncoder6814(2, &encoder_5_val))
    ROS_ERROR("ERROR: ReadEncoder6814(2) FAILED");

  if (! dm6814_left.ReadEncoder6814(3, &encoder_6_val))
    ROS_ERROR("ERROR: ReadEncoder6814(3) FAILED");



// Handling of encoder value overflow
  if      ((int)encoder_1_val - (int)encoder_1_old < -62000) encoder_1_ovf++;
  else if ((int)encoder_1_val - (int)encoder_1_old >  62000) encoder_1_ovf--;
  encoder_1_old = encoder_1_val;
  encoder_1 = (int)encoder_1_val + 65535*encoder_1_ovf;

  if      ((int)encoder_2_val - (int)encoder_2_old < -62000) encoder_2_ovf++;
  else if ((int)encoder_2_val - (int)encoder_2_old >  62000) encoder_2_ovf--;
  encoder_2_old = encoder_2_val;
  encoder_2 = (int)encoder_2_val + 65535*encoder_2_ovf;

  if      ((int)encoder_3_val - (int)encoder_3_old < -62000) encoder_3_ovf++;
  else if ((int)encoder_3_val - (int)encoder_3_old >  62000) encoder_3_ovf--;
  encoder_3_old = encoder_3_val;
  encoder_3 = (int)encoder_3_val + 65535*encoder_3_ovf;

  if      ((int)encoder_4_val - (int)encoder_4_old < -62000) encoder_4_ovf++;
  else if ((int)encoder_4_val - (int)encoder_4_old >  62000) encoder_4_ovf--;
  encoder_4_old = encoder_4_val;
  encoder_4 = (int)encoder_4_val + 65535*encoder_4_ovf;

  if      ((int)encoder_5_val - (int)encoder_5_old < -62000) encoder_5_ovf++;
  else if ((int)encoder_5_val - (int)encoder_5_old >  62000) encoder_5_ovf--;
  encoder_5_old = encoder_5_val;
  encoder_5 = (int)encoder_5_val + 65535*encoder_5_ovf;

  if      ((int)encoder_6_val - (int)encoder_6_old < -62000) encoder_6_ovf++;
  else if ((int)encoder_6_val - (int)encoder_6_old >  62000) encoder_6_ovf--;
  encoder_6_old = encoder_6_val;
  encoder_6 = (int)encoder_6_val + 65535*encoder_6_ovf;

  ROS_DEBUG("1: %d, 2: %d, 3: %d, 4: %d, 5: %d, 6: %d", encoder_1, encoder_2, encoder_3, encoder_4, encoder_5, encoder_6);

// Position Calculation
  pos[0]=  (double)encoder_1*2*M_PI/(4096*190) - offset_pos[0];
  pos[1]= -(double)encoder_2*2*M_PI/(4096*190) - offset_pos[1];
  pos[2]=  (double)encoder_3*2*M_PI/(4096*190) - offset_pos[2];//disconnected
  
  pos[3]=  (double)encoder_4*2*M_PI/(4096*190) - offset_pos[3];
  pos[4]=  (double)encoder_5*2*M_PI/(4096*190) - offset_pos[4];
  pos[5]=  (double)encoder_6*2*M_PI/ 4096      - offset_pos[5];

  ROS_DEBUG("encoder6: %d, pos (10^3): %d", encoder_6, (int)(pos[5]*1000));
  
  ROS_DEBUG("POS: 1: %f, 2: %f, 3: %f, 4: %f, 5: %f, 6: %f", pos[0], pos[1], pos[2], pos[3], pos[4], pos[5]);

// Speed Calculation
  for(int i=0; i<6; i++)
  {
    vel[i]=(pos[i] - prev_pos[i])/dt.toSec();
    prev_pos[i] = pos[i];
  }

}
コード例 #15
0
ファイル: move_right_bt.cpp プロジェクト: almc/nao_basic
	int executeCB(ros::Duration dt)
		{
			std::cout << "**GoToWaypoint -%- Executing Main Task, elapsed_time: "
			          << dt.toSec() << std::endl;
			std::cout << "**GoToWaypoint -%- execute_time: "
			          << execute_time_.toSec() << std::endl;
			execute_time_ += dt;

			std::cout << "**Counter value:" << counter << std::endl;
			if (counter > 1)
				std::cout << "********************************************************************************" << std::endl;


			if (!init_)
			{
				initialize();
				init_ = true;
			}
			if ( (ros::Time::now() - time_at_pos_).toSec() < 0.2 )
			{
				if( message_.type == "goto")
				{
						// Goal position of ball relative to ROBOT_FRAME
						float goal_x = 0.00;
						float goal_y = 0.00;
						float error_x = message_.x - goal_x;
						float error_y = message_.y - goal_y;
						if (fabs(error_x) < 0.12 && fabs(error_y) < 0.12)
						{
							std::cout << "Closeness count " << closeness_count << std::endl;
							closeness_count++;
							//If the NAO has been close for enough iterations, we consider to goal reached
							if (closeness_count > 0)
							{
								motion_proxy_ptr->stopMove();
								set_feedback(SUCCESS);
								// std::cout << "sleeeping for 2 second before destroying thread" << std::endl;
								// sleep(2.0);
								finalize();
								return 1;
							}
						}
						else
						{
							closeness_count = 0;
						}
						//Limit the "error" in order to limit the walk speed
						error_x = error_x >  0.6 ?  0.6 : error_x;
						error_x = error_x < -0.6 ? -0.6 : error_x;
						error_y = error_y >  0.6 ?  0.6 : error_y;
						error_y = error_y < -0.6 ? -0.6 : error_y;
						// float speed_x = error_x * 1.0/(2+5*closeness_count);
						// float speed_y = error_y * 1.0/(2+5*closeness_count);
						// float frequency = 0.1/(5*closeness_count+(1.0/(fabs(error_x)+fabs(error_y)))); //Frequency of foot steps
						// motion_proxy_ptr->setWalkTargetVelocity(speed_x, speed_y, 0.0, frequency);
						// ALMotionProxy::setWalkTargetVelocity(const float& x, const float& y, const float& theta, const float& frequency)
						AL::ALValue walk_config;
						//walk_config.arrayPush(AL::ALValue::array("MaxStepFrequency", frequency));
						//Lower value of step height gives smoother walking
						// std::cout << "y " << message_.y << std::endl;
						if (fabs(message_.y) < 0.10)
						{
							// walk_config.arrayPush(AL::ALValue::array("StepHeight", 0.01));
							// motion_proxy_ptr->post.moveTo(message_.x, 0.0, 0.0, walk_config);
							motion_proxy_ptr->post.moveTo(message_.x, 0.0, 0.0);
							sleep(2.0);
							//motion_proxy_ptr->post.stopMove();
						}
						else
						{
							// walk_config.arrayPush(AL::ALValue::array("StepHeight", 0.005));
							// motion_proxy_ptr->post.moveTo(0.0, 0.0, message_.y/fabs(message_.y)*0.1, walk_config);
							motion_proxy_ptr->post.moveTo(0.0, 0.0, message_.y/fabs(message_.y)*0.1);
							//sleep(3.0);
							//motion_proxy_ptr->post.stopMove();
						}
				}

			}
			set_feedback(RUNNING);
			return 0;
		}
コード例 #16
0
	void OneTaskInverseDynamicsJL::update(const ros::Time& time, const ros::Duration& period)
	{
		// get joint positions
  		for(int i=0; i < joint_handles_.size(); i++) 
  		{
    		joint_msr_states_.q(i) = joint_handles_[i].getPosition();
    		joint_msr_states_.qdot(i) = joint_handles_[i].getVelocity();
    	}

    	// clearing msgs before publishing
    	msg_err_.data.clear();
    	msg_pose_.data.clear();
    	
    	if (cmd_flag_)
    	{
    		// resetting N and tau(t=0) for the highest priority task
    		N_trans_ = I_;	
    		SetToZero(tau_);

    		// computing Inertia, Coriolis and Gravity matrices
		    id_solver_->JntToMass(joint_msr_states_.q, M_);
		    id_solver_->JntToCoriolis(joint_msr_states_.q, joint_msr_states_.qdot, C_);
		    id_solver_->JntToGravity(joint_msr_states_.q, G_);
		    G_.data.setZero();

		    // computing the inverse of M_ now, since it will be used often
		    pseudo_inverse(M_.data,M_inv_,false); //M_inv_ = M_.data.inverse(); 


	    	// computing Jacobian J(q)
	    	jnt_to_jac_solver_->JntToJac(joint_msr_states_.q,J_);

	    	// computing the distance from the mid points of the joint ranges as objective function to be minimized
	    	phi_ = task_objective_function(joint_msr_states_.q);

	    	// using the first step to compute jacobian of the tasks
	    	if (first_step_)
	    	{
	    		J_last_ = J_;
	    		phi_last_ = phi_;
	    		first_step_ = 0;
	    		return;
	    	}

	    	// computing the derivative of Jacobian J_dot(q) through numerical differentiation
	    	J_dot_.data = (J_.data - J_last_.data)/period.toSec();

	    	// computing forward kinematics
	    	fk_pos_solver_->JntToCart(joint_msr_states_.q,x_);

	    	if (Equal(x_,x_des_,0.05))
	    	{
	    		ROS_INFO("On target");
	    		cmd_flag_ = 0;
	    		return;	    		
	    	}

	    	// pushing x to the pose msg
	    	for (int i = 0; i < 3; i++)
	    		msg_pose_.data.push_back(x_.p(i));

	    	// setting marker parameters
	    	set_marker(x_,msg_id_);

	    	// computing end-effector position/orientation error w.r.t. desired frame
	    	x_err_ = diff(x_,x_des_);

	    	x_dot_ = J_.data*joint_msr_states_.qdot.data;    	

	    	// setting error reference
	    	for(int i = 0; i < e_ref_.size(); i++)
		    {
	    		// e = x_des_dotdot + Kd*(x_des_dot - x_dot) + Kp*(x_des - x)
	    		e_ref_(i) =  -Kd_(i)*(x_dot_(i)) + Kp_(i)*x_err_(i);
    			msg_err_.data.push_back(e_ref_(i));
	    	}

    		// computing b = J*M^-1*(c+g) - J_dot*q_dot
    		b_ = J_.data*M_inv_*(C_.data + G_.data) - J_dot_.data*joint_msr_states_.qdot.data;

	    	// computing omega = J*M^-1*N^T*J
	    	omega_ = J_.data*M_inv_*N_trans_*J_.data.transpose();

	    	// computing lambda = omega^-1
	    	pseudo_inverse(omega_,lambda_);
	    	//lambda_ = omega_.inverse();

	    	// computing nullspace
	    	N_trans_ = N_trans_ - J_.data.transpose()*lambda_*J_.data*M_inv_;  	    		

	    	// finally, computing the torque tau
	    	tau_.data = J_.data.transpose()*lambda_*(e_ref_ + b_) + N_trans_*(Eigen::Matrix<double,7,1>::Identity(7,1)*(phi_ - phi_last_)/(period.toSec()));

	    	// saving J_ and phi of the last iteration
	    	J_last_ = J_;
	    	phi_last_ = phi_;
	
    	}

    	// set controls for joints
    	for (int i = 0; i < joint_handles_.size(); i++)
    	{
    		if(cmd_flag_)
    			joint_handles_[i].setCommand(tau_(i));
    		else
       			joint_handles_[i].setCommand(PIDs_[i].computeCommand(joint_des_states_.q(i) - joint_msr_states_.q(i),period));
    	}

    	// publishing markers for visualization in rviz
    	pub_marker_.publish(msg_marker_);
    	msg_id_++;

	    // publishing error 
	    pub_error_.publish(msg_err_);
	    // publishing pose 
	    pub_pose_.publish(msg_pose_);
	    ros::spinOnce();

	}
コード例 #17
0
ファイル: local2global.cpp プロジェクト: JarryChou/Autoware
//static void velodyne_callback(const pcl::PointCloud<velodyne_pointcloud::PointXYZIR>::ConstPtr& input)
static void map_callback(const sensor_msgs::PointCloud2::ConstPtr& input)
{

  if (map_loaded == 0) {
    std::cout << "Loading map data... ";
    map.header.frame_id = "/pointcloud_map_frame";
    
    // Convert the data type(from sensor_msgs to pcl).
    pcl::fromROSMsg(*input, map);
    
    pcl::PointCloud<pcl::PointXYZI>::Ptr map_ptr(new pcl::PointCloud<pcl::PointXYZI>(map));
    // Setting point cloud to be aligned to.
    ndt.setInputTarget(map_ptr);
	
    // Setting NDT parameters to default values
    ndt.setMaximumIterations(iter);
    ndt.setResolution(ndt_res);
    ndt.setStepSize(step_size);
    ndt.setTransformationEpsilon(trans_eps);
    
    map_loaded = 1;
    std::cout << "Map Loaded." << std::endl;
  }



    if (map_loaded == 1 && init_pos_set == 1) {
        callback_start = ros::Time::now();

        static tf::TransformBroadcaster br;
        tf::Transform transform;
        tf::Quaternion q;

        tf::Quaternion q_control;

        // 1 scan
	/*
        pcl::PointCloud<pcl::PointXYZI> scan;
        pcl::PointXYZI p;
        scan.header = input->header;
        scan.header.frame_id = "velodyne_scan_frame";
	*/

        ros::Time scan_time;
        scan_time.sec = additional_map.header.stamp / 1000000.0;
        scan_time.nsec = (additional_map.header.stamp - scan_time.sec * 1000000.0) * 1000.0;

        /*
         std::cout << "scan.header.stamp: " << scan.header.stamp << std::endl;
         std::cout << "scan_time: " << scan_time << std::endl;
         std::cout << "scan_time.sec: " << scan_time.sec << std::endl;
         std::cout << "scan_time.nsec: " << scan_time.nsec << std::endl;
         */

        t1_start = ros::Time::now();
	/*
        for (pcl::PointCloud<velodyne_pointcloud::PointXYZIR>::const_iterator item = input->begin(); item != input->end(); item++) {
            p.x = (double) item->x;
            p.y = (double) item->y;
            p.z = (double) item->z;

            scan.points.push_back(p);
        }
	*/
	//	pcl::fromROSMsg(*input, scan);
        t1_end = ros::Time::now();
        d1 = t1_end - t1_start;

        Eigen::Matrix4f t(Eigen::Matrix4f::Identity());

	//        pcl::PointCloud<pcl::PointXYZI>::Ptr scan_ptr(new pcl::PointCloud<pcl::PointXYZI>(scan));
        pcl::PointCloud<pcl::PointXYZI>::Ptr filtered_additional_map_ptr(new pcl::PointCloud<pcl::PointXYZI>);

        // Downsampling the velodyne scan using VoxelGrid filter
        t2_start = ros::Time::now();
        pcl::VoxelGrid<pcl::PointXYZI> voxel_grid_filter;
        voxel_grid_filter.setLeafSize(voxel_leaf_size, voxel_leaf_size, voxel_leaf_size);
        voxel_grid_filter.setInputCloud(additional_map_ptr);
        voxel_grid_filter.filter(*filtered_additional_map_ptr);
        t2_end = ros::Time::now();
        d2 = t2_end - t2_start;

        // Setting point cloud to be aligned.
        ndt.setInputSource(filtered_additional_map_ptr);

        // Guess the initial gross estimation of the transformation
        t3_start = ros::Time::now();
        tf::Matrix3x3 init_rotation;

        guess_pos.x = previous_pos.x + offset_x;
        guess_pos.y = previous_pos.y + offset_y;
        guess_pos.z = previous_pos.z + offset_z;
        guess_pos.roll = previous_pos.roll;
        guess_pos.pitch = previous_pos.pitch;
        guess_pos.yaw = previous_pos.yaw + offset_yaw;

        Eigen::AngleAxisf init_rotation_x(guess_pos.roll, Eigen::Vector3f::UnitX());
        Eigen::AngleAxisf init_rotation_y(guess_pos.pitch, Eigen::Vector3f::UnitY());
        Eigen::AngleAxisf init_rotation_z(guess_pos.yaw, Eigen::Vector3f::UnitZ());

        Eigen::Translation3f init_translation(guess_pos.x, guess_pos.y, guess_pos.z);

        Eigen::Matrix4f init_guess = (init_translation * init_rotation_z * init_rotation_y * init_rotation_x).matrix();

        t3_end = ros::Time::now();
        d3 = t3_end - t3_start;

        t4_start = ros::Time::now();
        pcl::PointCloud<pcl::PointXYZI>::Ptr output_cloud(new pcl::PointCloud<pcl::PointXYZI>);
        ndt.align(*output_cloud, init_guess);

        t = ndt.getFinalTransformation();
	pcl::PointCloud<pcl::PointXYZI>::Ptr transformed_additional_map_ptr (new pcl::PointCloud<pcl::PointXYZI>());
	transformed_additional_map_ptr->header.frame_id = "/map";
	pcl::transformPointCloud(*additional_map_ptr, *transformed_additional_map_ptr, t);
	sensor_msgs::PointCloud2::Ptr msg_ptr(new sensor_msgs::PointCloud2);

	pcl::toROSMsg(*transformed_additional_map_ptr, *msg_ptr);
	msg_ptr->header.frame_id = "/map";
	ndt_map_pub.publish(*msg_ptr);

	// Writing Point Cloud data to PCD file
	pcl::io::savePCDFileASCII("global_map.pcd", *transformed_additional_map_ptr);
	std::cout << "Saved " << transformed_additional_map_ptr->points.size() << " data points to global_map.pcd." << std::endl;

	pcl::PointCloud<pcl::PointXYZRGB> output;
	output.width = transformed_additional_map_ptr->width;
	output.height = transformed_additional_map_ptr->height;
	output.points.resize(output.width * output.height);

	for(size_t i = 0; i < output.points.size(); i++){
	  output.points[i].x = transformed_additional_map_ptr->points[i].x;
	  output.points[i].y = transformed_additional_map_ptr->points[i].y;
	  output.points[i].z = transformed_additional_map_ptr->points[i].z;
	  output.points[i].rgb = 255 << 8;
	}

	pcl::io::savePCDFileASCII("global_map_rgb.pcd", output);
	std::cout << "Saved " << output.points.size() << " data points to global_map_rgb.pcd." << std::endl;

        t4_end = ros::Time::now();
        d4 = t4_end - t4_start;

        t5_start = ros::Time::now();
        /*
         tf::Vector3 origin;
         origin.setValue(static_cast<double>(t(0,3)), static_cast<double>(t(1,3)), static_cast<double>(t(2,3)));
         */

        tf::Matrix3x3 tf3d;

        tf3d.setValue(static_cast<double>(t(0, 0)), static_cast<double>(t(0, 1)), static_cast<double>(t(0, 2)), static_cast<double>(t(1, 0)), static_cast<double>(t(1, 1)), static_cast<double>(t(1, 2)), static_cast<double>(t(2, 0)), static_cast<double>(t(2, 1)), static_cast<double>(t(2, 2)));

        // Update current_pos.
        current_pos.x = t(0, 3);
        current_pos.y = t(1, 3);
        current_pos.z = t(2, 3);
        tf3d.getRPY(current_pos.roll, current_pos.pitch, current_pos.yaw, 1);

	// control_pose
	current_pos_control.roll = current_pos.roll;
	current_pos_control.pitch = current_pos.pitch;
	current_pos_control.yaw = current_pos.yaw - angle / 180.0 * M_PI;
	double theta = current_pos_control.yaw;
	current_pos_control.x = cos(theta) * (-control_shift_x) + sin(theta) * (-control_shift_y) + current_pos.x;
	current_pos_control.y = -sin(theta) * (-control_shift_x) + cos(theta) * (-control_shift_y) + current_pos.y;
	current_pos_control.z = current_pos.z - control_shift_z;

        // transform "/velodyne" to "/map"
#if 0
        transform.setOrigin(tf::Vector3(current_pos.x, current_pos.y, current_pos.z));
        q.setRPY(current_pos.roll, current_pos.pitch, current_pos.yaw);
        transform.setRotation(q);
#else
	//
	// FIXME:
	// We corrected the angle of "/velodyne" so that pure_pursuit
	// can read this frame for the control.
	// However, this is not what we want because the scan of Velodyne
	// looks unmatched for the 3-D map on Rviz.
	// What we really want is to make another TF transforming "/velodyne"
	// to a new "/ndt_points" frame and modify pure_pursuit to
	// read this frame instead of "/velodyne".
	// Otherwise, can pure_pursuit just use "/ndt_frame"?
	//
        transform.setOrigin(tf::Vector3(current_pos_control.x, current_pos_control.y, current_pos_control.z));
        q.setRPY(current_pos_control.roll, current_pos_control.pitch, current_pos_control.yaw);
        transform.setRotation(q);
#endif

	q_control.setRPY(current_pos_control.roll, current_pos_control.pitch, current_pos_control.yaw);

        /*
         std::cout << "ros::Time::now(): " << ros::Time::now() << std::endl;
         std::cout << "ros::Time::now().sec: " << ros::Time::now().sec << std::endl;
         std::cout << "ros::Time::now().nsec: " << ros::Time::now().nsec << std::endl;
         */

	//        br.sendTransform(tf::StampedTransform(transform, scan_time, "map", "velodyne"));

        static tf::TransformBroadcaster pose_broadcaster;
        tf::Transform pose_transform;
        tf::Quaternion pose_q;

/*        pose_transform.setOrigin(tf::Vector3(0, 0, 0));
        pose_q.setRPY(0, 0, 0);
        pose_transform.setRotation(pose_q);
        pose_broadcaster.sendTransform(tf::StampedTransform(pose_transform, scan_time, "map", "ndt_frame"));
*/
        // publish the position
       // ndt_pose_msg.header.frame_id = "/ndt_frame";
        ndt_pose_msg.header.frame_id = "/map";
        ndt_pose_msg.header.stamp = scan_time;
        ndt_pose_msg.pose.position.x = current_pos.x;
        ndt_pose_msg.pose.position.y = current_pos.y;
        ndt_pose_msg.pose.position.z = current_pos.z;
        ndt_pose_msg.pose.orientation.x = q.x();
        ndt_pose_msg.pose.orientation.y = q.y();
        ndt_pose_msg.pose.orientation.z = q.z();
        ndt_pose_msg.pose.orientation.w = q.w();

        static tf::TransformBroadcaster pose_broadcaster_control;
        tf::Transform pose_transform_control;
        tf::Quaternion pose_q_control;

     /*   pose_transform_control.setOrigin(tf::Vector3(0, 0, 0));
        pose_q_control.setRPY(0, 0, 0);
        pose_transform_control.setRotation(pose_q_control);
        pose_broadcaster_control.sendTransform(tf::StampedTransform(pose_transform_control, scan_time, "map", "ndt_frame"));
*/
        // publish the position
     //   control_pose_msg.header.frame_id = "/ndt_frame";
        control_pose_msg.header.frame_id = "/map";
        control_pose_msg.header.stamp = scan_time;
        control_pose_msg.pose.position.x = current_pos_control.x;
        control_pose_msg.pose.position.y = current_pos_control.y;
        control_pose_msg.pose.position.z = current_pos_control.z;
        control_pose_msg.pose.orientation.x = q_control.x();
        control_pose_msg.pose.orientation.y = q_control.y();
        control_pose_msg.pose.orientation.z = q_control.z();
        control_pose_msg.pose.orientation.w = q_control.w();

        /*
         std::cout << "ros::Time::now(): " << ros::Time::now() << std::endl;
         std::cout << "ros::Time::now().sec: " << ros::Time::now().sec << std::endl;
         std::cout << "ros::Time::now().nsec: " << ros::Time::now().nsec << std::endl;
         */

        ndt_pose_pub.publish(ndt_pose_msg);
        control_pose_pub.publish(control_pose_msg);

        t5_end = ros::Time::now();
        d5 = t5_end - t5_start;

#ifdef OUTPUT
        // Writing position to position_log.txt
        std::ofstream ofs("position_log.txt", std::ios::app);
        if (ofs == NULL) {
            std::cerr << "Could not open 'position_log.txt'." << std::endl;
            exit(1);
        }
        ofs << current_pos.x << " " << current_pos.y << " " << current_pos.z << " " << current_pos.roll << " " << current_pos.pitch << " " << current_pos.yaw << std::endl;
#endif

        // Calculate the offset (curren_pos - previous_pos)
        offset_x = current_pos.x - previous_pos.x;
        offset_y = current_pos.y - previous_pos.y;
        offset_z = current_pos.z - previous_pos.z;
        offset_yaw = current_pos.yaw - previous_pos.yaw;

        // Update position and posture. current_pos -> previous_pos
        previous_pos.x = current_pos.x;
        previous_pos.y = current_pos.y;
        previous_pos.z = current_pos.z;
        previous_pos.roll = current_pos.roll;
        previous_pos.pitch = current_pos.pitch;
        previous_pos.yaw = current_pos.yaw;

        callback_end = ros::Time::now();
        d_callback = callback_end - callback_start;

        std::cout << "-----------------------------------------------------------------" << std::endl;
        std::cout << "Sequence number: " << input->header.seq << std::endl;
        std::cout << "Number of scan points: " << additional_map_ptr->size() << " points." << std::endl;
        std::cout << "Number of filtered scan points: " << filtered_additional_map_ptr->size() << " points." << std::endl;
        std::cout << "NDT has converged: " << ndt.hasConverged() << std::endl;
        std::cout << "Fitness score: " << ndt.getFitnessScore() << std::endl;
        std::cout << "Number of iteration: " << ndt.getFinalNumIteration() << std::endl;
        std::cout << "(x,y,z,roll,pitch,yaw):" << std::endl;
        std::cout << "(" << current_pos.x << ", " << current_pos.y << ", " << current_pos.z << ", " << current_pos.roll << ", " << current_pos.pitch << ", " << current_pos.yaw << ")" << std::endl;
        std::cout << "Transformation Matrix:" << std::endl;
        std::cout << t << std::endl;
#ifdef VIEW_TIME
        std::cout << "Duration of velodyne_callback: " << d_callback.toSec() << " secs." << std::endl;
        std::cout << "Adding scan points: " << d1.toSec() << " secs." << std::endl;
        std::cout << "VoxelGrid Filter: " << d2.toSec() << " secs." << std::endl;
        std::cout << "Guessing the initial gross estimation: " << d3.toSec() << " secs." << std::endl;
        std::cout << "NDT: " << d4.toSec() << " secs." << std::endl;
        std::cout << "tf: " << d5.toSec() << " secs." << std::endl;
#endif
        std::cout << "-----------------------------------------------------------------" << std::endl;
    }
}
コード例 #18
0
calibration_msgs::Interval IntervalCalc::computeLatestInterval(const SortedDeque<DeflatedConstPtr>& signal,
                                                               const std::vector<double>& tolerances,
                                                               ros::Duration max_spacing)
{
  if (max_spacing < ros::Duration(0,0))
  {
    ROS_WARN("max_spacing is negative (%.3f). Should be positive", max_spacing.toSec());
    max_spacing = -max_spacing;
  }

  if (signal.size() == 0)
  {
    ROS_WARN("Can't compute range of an empty signal");
    return calibration_msgs::Interval();
  }

  std::deque<DeflatedConstPtr>::const_reverse_iterator rev_it = signal.rbegin();

  assert(*rev_it);  // Make sure it's not a NULL pointer

  const unsigned int N = (*rev_it)->channels_.size();

  assert(tolerances.size() == N);
  vector<double> channel_max( (*rev_it)->channels_ );
  vector<double> channel_min( (*rev_it)->channels_ );
  vector<double> channel_range(N);

  calibration_msgs::Interval result;
  result.end   = (*signal.rbegin())->header.stamp;
  result.start = result.end;


  INTERVAL_DEBUG("Starting to walk along interval:");
  while( rev_it != signal.rend() )
  {
    if ( (*rev_it)->channels_.size() != N)
    {
      ROS_WARN("Num channels has changed. Cutting off interval prematurely ");
      return result;
    }

    ros::Duration cur_step = result.start - (*rev_it)->header.stamp;
    if ( cur_step > max_spacing)
    {
      INTERVAL_DEBUG("Difference between interval.start and it.stamp is [%.3fs]"
                     "Exceeds [%.3fs]", cur_step.toSec(), max_spacing.toSec());
      return result;
    }

    ostringstream max_debug;
    ostringstream min_debug;
    ostringstream range_debug;
    max_debug   << "  max:  ";
    min_debug   << "  min:  ";
    range_debug << "  range:";
    for (unsigned int i=0; i<N; i++)
    {
      channel_max[i]   = fmax( channel_max[i], (*rev_it)->channels_[i] );
      channel_min[i]   = fmin( channel_min[i], (*rev_it)->channels_[i] );
      channel_range[i] = channel_max[i] - channel_min[i];

      max_debug << "  " << channel_max[i];
      min_debug << "  " << channel_min[i];
      range_debug << "  " << channel_range[i];
    }

    INTERVAL_DEBUG("Current stats:\n%s\n%s\n%s",
                   max_debug.str().c_str(),
                   min_debug.str().c_str(),
                   range_debug.str().c_str());

    for (unsigned int i=0; i<N; i++)
    {
      if (channel_range[i] > tolerances[i])
      {
        INTERVAL_DEBUG("Channel %u range is %.3f.  Exceeds tolerance of %.3f", i, channel_range[i], tolerances[i]);
        return result;
      }
    }

    result.start = (*rev_it)->header.stamp;

    rev_it++;
  }
  return result;
}
コード例 #19
0
ファイル: iiwa_hw.cpp プロジェクト: muhrix/iiwa_stack
bool IIWA_HW::read(ros::Duration period)
{
    ros::Duration delta = ros::Time::now() - timer_;
    
    static bool was_connected = false;
    
    if (iiwa_ros_conn_.getRobotIsConnected()) {
        
        iiwa_ros_conn_.getJointPosition(joint_position_);
        iiwa_ros_conn_.getJointTorque(joint_torque_);
        
        device_->joint_position_prev = device_->joint_position;
        iiwaMsgsJointToVector(joint_position_.position, device_->joint_position);
        iiwaMsgsJointToVector(joint_torque_.torque, device_->joint_effort);
        
        // if there is no controller active the robot goes to zero position 
        if (!was_connected) {
            for (int j = 0; j < IIWA_JOINTS; j++)
                device_->joint_position_command[j] = device_->joint_position[j];
            
            was_connected = true;
        }
        
        for (int j = 0; j < IIWA_JOINTS; j++)
            device_->joint_velocity[j] = filters::exponentialSmoothing((device_->joint_position[j]-device_->joint_position_prev[j])/period.toSec(), 
                                                                       device_->joint_velocity[j], 0.2);  
        
        return 1;
    } else if (delta.toSec() >= 10) {
        ROS_INFO("No LBR IIWA is connected. Waiting for the robot to connect before reading ...");
        timer_ = ros::Time::now();
    }
    return 0;
}
コード例 #20
0
	void BacksteppingController::update(const ros::Time& time, const ros::Duration& period)
	{
		// get joint positions
  		for(int i=0; i < joint_handles_.size(); i++) 
  		{
    		joint_msr_states_.q(i) = joint_handles_[i].getPosition();
    		joint_msr_states_.qdot(i) = joint_handles_[i].getVelocity();
    		//joint_des_states_.q(i) = joint_msr_states_.q(i);
    		//joint_des_states_.qdot(i) = joint_msr_states_.qdot(i);
    	}

    	/* TASK 0 (Main)*/

    	double freq_ = 3.0;
    	double amp_ = 0.1;

		//Desired posture 8 figure (circle figure)
		x_des_.p(0) = /*0.3*/0.25 + /*amp_*(1 - cos(freq_*period.toSec()*step_));*/amp_/2*sin(freq_*period.toSec()*step_);
    	x_des_.p(1) = /*0.3*/0 + /*amp_*sin(freq_*period.toSec()*step_);*/amp_/2*sin(2*freq_*period.toSec()*step_);
    	x_des_.p(2) = /*1.1*/1.75;
    	x_des_.M = KDL::Rotation(KDL::Rotation::RPY(0,0,0));//RPY(0,3.1415,0));

    	//velocity
    	x_des_dot_(0) = /*amp_*freq_*sin(freq_*period.toSec()*step_);*/amp_/2*freq_*cos(freq_*period.toSec()*step_);
    	x_des_dot_(1) = /*amp_*freq_*cos(freq_*period.toSec()*step_);*/amp_/2*2*freq_*cos(2*freq_*period.toSec()*step_);
    	x_des_dot_(2) = 0;
    	x_des_dot_(3) = 0;
    	x_des_dot_(4) = 0;
    	x_des_dot_(5) = 0;

    	//acc
    	x_des_dotdot_(0) = /*amp_*freq_*freq_*cos(freq_*period.toSec()*step_);*/-amp_/2*freq_*freq_*sin(freq_*period.toSec()*step_);
    	x_des_dotdot_(1) = /*-amp_*freq_*freq_*sin(freq_*period.toSec()*step_);*/-amp_/2*2*2*freq_*freq_*sin(freq_*period.toSec()*step_);
    	x_des_dotdot_(2) = 0;
    	x_des_dotdot_(3) = 0;
    	x_des_dotdot_(4) = 0;
    	x_des_dotdot_(5) = 0;

    	step_++;

    	// clearing msgs before publishing
    	msg_err_.data.clear();
    	msg_pose_.data.clear();
    	msg_traj_.data.clear();
    	
    	if (cmd_flag_)
    	{
    		// computing Inertia, Coriolis and Gravity matrices
		    id_solver_->JntToMass(joint_msr_states_.q, M_);
		    id_solver_->JntToCoriolis(joint_msr_states_.q, joint_msr_states_.qdot, C_);
		    id_solver_->JntToGravity(joint_msr_states_.q, G_);

	    	// computing Jacobian J(q)
	    	jnt_to_jac_solver_->JntToJac(joint_msr_states_.q,J_);

	    	// computing Jacobian pseudo-inversion
	    	pseudo_inverse(J_.data,J_pinv_);

	    	// using the first step to compute jacobian of the tasks
	    	if (first_step_)
	    	{
	    		J_last_ = J_;
	    		first_step_ = 0;
	    		return;
	    	}

	    	// computing the derivative of Jacobian J_dot(q) through numerical differentiation
	    	J_dot_.data = (J_.data - J_last_.data)/period.toSec();

	    	// computing forward kinematics
	    	fk_pos_solver_->JntToCart(joint_msr_states_.q,x_);

	    	// pushing x to the pose msg
	    	for (int i = 0; i < 3; i++)
	    	{
	    		msg_pose_.data.push_back(x_.p(i));
	    		msg_traj_.data.push_back(x_des_.p(i));
	    	}

	    	// setting marker parameters
	    	set_marker(x_,msg_id_);

	    	// computing end-effector position/orientation error w.r.t. desired frame
	    	x_err_ = diff(x_,x_des_);
	    	
	    	// computing task space velocity (of end-effector)
	    	x_dot_ = J_.data*joint_msr_states_.qdot.data;

	    	// computing reference variables in task space
	    	for (int i = 0; i < 6; i++)
	    	{
	    		x_ref_dot_(i) = x_des_dot_(i) + Kp_(i)*x_err_(i);
	    		x_ref_dotdot_(i) = x_des_dotdot_(i) + Kd_(i)*(x_des_dot_(i) - x_dot_(i)) + Kp_(i)*x_err_(i);
	    	}

	    	// computing reference variable in joint space
	    	joint_ref_.qdot.data = J_pinv_*x_ref_dot_;

	    	joint_ref_.qdotdot.data = J_pinv_*(x_ref_dotdot_ - J_dot_.data*joint_msr_states_.qdot.data);

	    	joint_des_states_.qdot.data = J_pinv_*x_des_dot_;

	    	joint_des_states_.q.data += period.toSec()*joint_des_states_.qdot.data;

	    	// finally, computing the torque tau
	    	tau_.data = M_.data*joint_ref_.qdotdot.data + C_.data.cwiseProduct(joint_ref_.qdot.data) + G_.data + /*K_d*/(joint_ref_.qdot.data - joint_msr_states_.qdot.data) + (joint_des_states_.q.data - joint_msr_states_.q.data); 

	    	// saving J_ of the last iteration
	    	J_last_ = J_;		
    	}

    	// set controls for joints
    	for (int i = 0; i < joint_handles_.size(); i++)
    	{
    		if(cmd_flag_)
    			joint_handles_[i].setCommand(tau_(i));
    		else
    			joint_handles_[i].setCommand(PIDs_[i].computeCommand(joint_des_states_.q(i) - joint_msr_states_.q(i),period));
    	}

    	// publishing markers for visualization in rviz
    	pub_marker_.publish(msg_marker_);
    	msg_id_++;

	    // publishing error for all tasks as an array of ntasks*6
	    pub_error_.publish(msg_err_);
	    // publishing actual and desired trajectory for each task (links) as an array of ntasks*3
	    pub_pose_.publish(msg_pose_);
	    pub_traj_.publish(msg_traj_);
	    ros::spinOnce();

	}
コード例 #21
0
ファイル: amcl_node.cpp プロジェクト: Renda110/AGVC
void
AmclNode::laserReceived(const sensor_msgs::LaserScanConstPtr& laser_scan)
{
    last_laser_received_ts_ = ros::Time::now();
    if( map_ == NULL ) {
        return;
    }
    boost::recursive_mutex::scoped_lock lr(configuration_mutex_);
    int laser_index = -1;

    // Do we have the base->base_laser Tx yet?
    if(frame_to_laser_.find(laser_scan->header.frame_id) == frame_to_laser_.end())
    {
        ROS_DEBUG("Setting up laser %d (frame_id=%s)\n", (int)frame_to_laser_.size(), laser_scan->header.frame_id.c_str());
        lasers_.push_back(new AMCLLaser(*laser_));
        lasers_update_.push_back(true);
        laser_index = frame_to_laser_.size();

        tf::Stamped<tf::Pose> ident (tf::Transform(tf::createIdentityQuaternion(),
                                     tf::Vector3(0,0,0)),
                                     ros::Time(), laser_scan->header.frame_id);
        tf::Stamped<tf::Pose> laser_pose;
        try
        {
            this->tf_->transformPose(base_frame_id_, ident, laser_pose);
        }
        catch(tf::TransformException& e)
        {
            ROS_ERROR("Couldn't transform from %s to %s, "
                      "even though the message notifier is in use",
                      laser_scan->header.frame_id.c_str(),
                      base_frame_id_.c_str());
            return;
        }

        pf_vector_t laser_pose_v;
        laser_pose_v.v[0] = laser_pose.getOrigin().x();
        laser_pose_v.v[1] = laser_pose.getOrigin().y();
        // laser mounting angle gets computed later -> set to 0 here!
        laser_pose_v.v[2] = 0;
        lasers_[laser_index]->SetLaserPose(laser_pose_v);
        ROS_DEBUG("Received laser's pose wrt robot: %.3f %.3f %.3f",
                  laser_pose_v.v[0],
                  laser_pose_v.v[1],
                  laser_pose_v.v[2]);

        frame_to_laser_[laser_scan->header.frame_id] = laser_index;
    } else {
        // we have the laser pose, retrieve laser index
        laser_index = frame_to_laser_[laser_scan->header.frame_id];
    }

    // Where was the robot when this scan was taken?
    tf::Stamped<tf::Pose> odom_pose;
    pf_vector_t pose;
    if(!getOdomPose(odom_pose, pose.v[0], pose.v[1], pose.v[2],
                    laser_scan->header.stamp, base_frame_id_))
    {
        ROS_ERROR("Couldn't determine robot's pose associated with laser scan");
        return;
    }


    pf_vector_t delta = pf_vector_zero();

    if(pf_init_)
    {
        // Compute change in pose
        //delta = pf_vector_coord_sub(pose, pf_odom_pose_);
        delta.v[0] = pose.v[0] - pf_odom_pose_.v[0];
        delta.v[1] = pose.v[1] - pf_odom_pose_.v[1];
        delta.v[2] = angle_diff(pose.v[2], pf_odom_pose_.v[2]);

        // See if we should update the filter
        bool update = fabs(delta.v[0]) > d_thresh_ ||
                      fabs(delta.v[1]) > d_thresh_ ||
                      fabs(delta.v[2]) > a_thresh_;
        update = update || m_force_update;
        m_force_update=false;

        // Set the laser update flags
        if(update)
            for(unsigned int i=0; i < lasers_update_.size(); i++)
                lasers_update_[i] = true;
    }

    bool force_publication = false;
    if(!pf_init_)
    {
        // Pose at last filter update
        pf_odom_pose_ = pose;

        // Filter is now initialized
        pf_init_ = true;

        // Should update sensor data
        for(unsigned int i=0; i < lasers_update_.size(); i++)
            lasers_update_[i] = true;

        force_publication = true;

        resample_count_ = 0;
    }
    // If the robot has moved, update the filter
    else if(pf_init_ && lasers_update_[laser_index])
    {
        //printf("pose\n");
        //pf_vector_fprintf(pose, stdout, "%.3f");

        AMCLOdomData odata;
        odata.pose = pose;
        // HACK
        // Modify the delta in the action data so the filter gets
        // updated correctly
        odata.delta = delta;

        // Use the action data to update the filter
        odom_->UpdateAction(pf_, (AMCLSensorData*)&odata);

        // Pose at last filter update
        //this->pf_odom_pose = pose;
    }

    bool resampled = false;
    // If the robot has moved, update the filter
    if(lasers_update_[laser_index])
    {
        AMCLLaserData ldata;
        ldata.sensor = lasers_[laser_index];
        ldata.range_count = laser_scan->ranges.size();

        // To account for lasers that are mounted upside-down, we determine the
        // min, max, and increment angles of the laser in the base frame.
        //
        // Construct min and max angles of laser, in the base_link frame.
        tf::Quaternion q;
        q.setRPY(0.0, 0.0, laser_scan->angle_min);
        tf::Stamped<tf::Quaternion> min_q(q, laser_scan->header.stamp,
                                          laser_scan->header.frame_id);
        q.setRPY(0.0, 0.0, laser_scan->angle_min + laser_scan->angle_increment);
        tf::Stamped<tf::Quaternion> inc_q(q, laser_scan->header.stamp,
                                          laser_scan->header.frame_id);
        try
        {
            tf_->transformQuaternion(base_frame_id_, min_q, min_q);
            tf_->transformQuaternion(base_frame_id_, inc_q, inc_q);
        }
        catch(tf::TransformException& e)
        {
            ROS_WARN("Unable to transform min/max laser angles into base frame: %s",
                     e.what());
            return;
        }

        double angle_min = tf::getYaw(min_q);
        double angle_increment = tf::getYaw(inc_q) - angle_min;

        // wrapping angle to [-pi .. pi]
        angle_increment = fmod(angle_increment + 5*M_PI, 2*M_PI) - M_PI;

        ROS_DEBUG("Laser %d angles in base frame: min: %.3f inc: %.3f", laser_index, angle_min, angle_increment);

        // Apply range min/max thresholds, if the user supplied them
        if(laser_max_range_ > 0.0)
            ldata.range_max = std::min(laser_scan->range_max, (float)laser_max_range_);
        else
            ldata.range_max = laser_scan->range_max;
        double range_min;
        if(laser_min_range_ > 0.0)
            range_min = std::max(laser_scan->range_min, (float)laser_min_range_);
        else
            range_min = laser_scan->range_min;
        // The AMCLLaserData destructor will free this memory
        ldata.ranges = new double[ldata.range_count][2];
        ROS_ASSERT(ldata.ranges);
        for(int i=0; i<ldata.range_count; i++)
        {
            // amcl doesn't (yet) have a concept of min range.  So we'll map short
            // readings to max range.
            if(laser_scan->ranges[i] <= range_min)
                ldata.ranges[i][0] = ldata.range_max;
            else
                ldata.ranges[i][0] = laser_scan->ranges[i];
            // Compute bearing
            ldata.ranges[i][1] = angle_min +
                                 (i * angle_increment);
        }

        lasers_[laser_index]->UpdateSensor(pf_, (AMCLSensorData*)&ldata);

        lasers_update_[laser_index] = false;

        pf_odom_pose_ = pose;

        // Resample the particles
        if(!(++resample_count_ % resample_interval_))
        {
            pf_update_resample(pf_);
            resampled = true;
        }

        pf_sample_set_t* set = pf_->sets + pf_->current_set;
        ROS_DEBUG("Num samples: %d\n", set->sample_count);

        // Publish the resulting cloud
        // TODO: set maximum rate for publishing
        if (!m_force_update) {
            geometry_msgs::PoseArray cloud_msg;
            cloud_msg.header.stamp = ros::Time::now();
            cloud_msg.header.frame_id = global_frame_id_;
            cloud_msg.poses.resize(set->sample_count);
            for(int i=0; i<set->sample_count; i++)
            {
                tf::poseTFToMsg(tf::Pose(tf::createQuaternionFromYaw(set->samples[i].pose.v[2]),
                                         tf::Vector3(set->samples[i].pose.v[0],
                                                     set->samples[i].pose.v[1], 0)),
                                cloud_msg.poses[i]);
            }
            particlecloud_pub_.publish(cloud_msg);
        }
    }

    if(resampled || force_publication)
    {
        // Read out the current hypotheses
        double max_weight = 0.0;
        int max_weight_hyp = -1;
        std::vector<amcl_hyp_t> hyps;
        hyps.resize(pf_->sets[pf_->current_set].cluster_count);
        for(int hyp_count = 0;
                hyp_count < pf_->sets[pf_->current_set].cluster_count; hyp_count++)
        {
            double weight;
            pf_vector_t pose_mean;
            pf_matrix_t pose_cov;
            if (!pf_get_cluster_stats(pf_, hyp_count, &weight, &pose_mean, &pose_cov))
            {
                ROS_ERROR("Couldn't get stats on cluster %d", hyp_count);
                break;
            }

            hyps[hyp_count].weight = weight;
            hyps[hyp_count].pf_pose_mean = pose_mean;
            hyps[hyp_count].pf_pose_cov = pose_cov;

            if(hyps[hyp_count].weight > max_weight)
            {
                max_weight = hyps[hyp_count].weight;
                max_weight_hyp = hyp_count;
            }
        }

        if(max_weight > 0.0)
        {
            ROS_DEBUG("Max weight pose: %.3f %.3f %.3f",
                      hyps[max_weight_hyp].pf_pose_mean.v[0],
                      hyps[max_weight_hyp].pf_pose_mean.v[1],
                      hyps[max_weight_hyp].pf_pose_mean.v[2]);

            /*
               puts("");
               pf_matrix_fprintf(hyps[max_weight_hyp].pf_pose_cov, stdout, "%6.3f");
               puts("");
             */

            geometry_msgs::PoseWithCovarianceStamped p;
            // Fill in the header
            p.header.frame_id = global_frame_id_;
            p.header.stamp = laser_scan->header.stamp;
            // Copy in the pose
            p.pose.pose.position.x = hyps[max_weight_hyp].pf_pose_mean.v[0];
            p.pose.pose.position.y = hyps[max_weight_hyp].pf_pose_mean.v[1];
            tf::quaternionTFToMsg(tf::createQuaternionFromYaw(hyps[max_weight_hyp].pf_pose_mean.v[2]),
                                  p.pose.pose.orientation);
            // Copy in the covariance, converting from 3-D to 6-D
            pf_sample_set_t* set = pf_->sets + pf_->current_set;
            for(int i=0; i<2; i++)
            {
                for(int j=0; j<2; j++)
                {
                    // Report the overall filter covariance, rather than the
                    // covariance for the highest-weight cluster
                    //p.covariance[6*i+j] = hyps[max_weight_hyp].pf_pose_cov.m[i][j];
                    p.pose.covariance[6*i+j] = set->cov.m[i][j];
                }
            }
            // Report the overall filter covariance, rather than the
            // covariance for the highest-weight cluster
            //p.covariance[6*5+5] = hyps[max_weight_hyp].pf_pose_cov.m[2][2];
            p.pose.covariance[6*5+5] = set->cov.m[2][2];

            /*
               printf("cov:\n");
               for(int i=0; i<6; i++)
               {
               for(int j=0; j<6; j++)
               printf("%6.3f ", p.covariance[6*i+j]);
               puts("");
               }
             */

            pose_pub_.publish(p);
            last_published_pose = p;

            ROS_DEBUG("New pose: %6.3f %6.3f %6.3f",
                      hyps[max_weight_hyp].pf_pose_mean.v[0],
                      hyps[max_weight_hyp].pf_pose_mean.v[1],
                      hyps[max_weight_hyp].pf_pose_mean.v[2]);

            // subtracting base to odom from map to base and send map to odom instead
            tf::Stamped<tf::Pose> odom_to_map;
            try
            {
                tf::Transform tmp_tf(tf::createQuaternionFromYaw(hyps[max_weight_hyp].pf_pose_mean.v[2]),
                                     tf::Vector3(hyps[max_weight_hyp].pf_pose_mean.v[0],
                                                 hyps[max_weight_hyp].pf_pose_mean.v[1],
                                                 0.0));
                tf::Stamped<tf::Pose> tmp_tf_stamped (tmp_tf.inverse(),
                                                      laser_scan->header.stamp,
                                                      base_frame_id_);
                this->tf_->transformPose(odom_frame_id_,
                                         tmp_tf_stamped,
                                         odom_to_map);
            }
            catch(tf::TransformException)
            {
                ROS_DEBUG("Failed to subtract base to odom transform");
                return;
            }

            latest_tf_ = tf::Transform(tf::Quaternion(odom_to_map.getRotation()),
                                       tf::Point(odom_to_map.getOrigin()));
            latest_tf_valid_ = true;

            if (tf_broadcast_ == true)
            {
                // We want to send a transform that is good up until a
                // tolerance time so that odom can be used
                ros::Time transform_expiration = (laser_scan->header.stamp +
                                                  transform_tolerance_);
                tf::StampedTransform tmp_tf_stamped(latest_tf_.inverse(),
                                                    transform_expiration,
                                                    global_frame_id_, odom_frame_id_);
                this->tfb_->sendTransform(tmp_tf_stamped);
                sent_first_transform_ = true;
            }
        }
        else
        {
            ROS_ERROR("No pose!");
        }
    }
    else if(latest_tf_valid_)
    {
        if (tf_broadcast_ == true)
        {
            // Nothing changed, so we'll just republish the last transform, to keep
            // everybody happy.
            ros::Time transform_expiration = (laser_scan->header.stamp +
                                              transform_tolerance_);
            tf::StampedTransform tmp_tf_stamped(latest_tf_.inverse(),
                                                transform_expiration,
                                                global_frame_id_, odom_frame_id_);
            this->tfb_->sendTransform(tmp_tf_stamped);
        }

        // Is it time to save our last pose to the param server
        ros::Time now = ros::Time::now();
        if((save_pose_period.toSec() > 0.0) &&
                (now - save_pose_last_time) >= save_pose_period)
        {
            // We need to apply the last transform to the latest odom pose to get
            // the latest map pose to store.  We'll take the covariance from
            // last_published_pose.
            tf::Pose map_pose = latest_tf_.inverse() * odom_pose;
            double yaw,pitch,roll;
            map_pose.getBasis().getEulerYPR(yaw, pitch, roll);

            private_nh_.setParam("initial_pose_x", map_pose.getOrigin().x());
            private_nh_.setParam("initial_pose_y", map_pose.getOrigin().y());
            private_nh_.setParam("initial_pose_a", yaw);
            private_nh_.setParam("initial_cov_xx",
                                 last_published_pose.pose.covariance[6*0+0]);
            private_nh_.setParam("initial_cov_yy",
                                 last_published_pose.pose.covariance[6*1+1]);
            private_nh_.setParam("initial_cov_aa",
                                 last_published_pose.pose.covariance[6*5+5]);
            save_pose_last_time = now;
        }
    }

}
コード例 #22
0
ファイル: command.cpp プロジェクト: paul2883/mavros
	bool wait_ack_for(CommandTransaction *tr) {
		std::unique_lock<std::mutex> lock(tr->cond_mutex);

		return tr->ack.wait_for(lock, std::chrono::nanoseconds(ACK_TIMEOUT_DT.toNSec()))
		       == std::cv_status::no_timeout;
	}
コード例 #23
0
    void OneTaskInverseKinematics::update(const ros::Time& time, const ros::Duration& period)
    {

        // get joint positions
        for(int i=0; i < joint_handles_.size(); i++)
        {
            joint_msr_states_.q(i) = joint_handles_[i].getPosition();
        }

        if (cmd_flag_)
        {
            // computing Jacobian
            jnt_to_jac_solver_->JntToJac(joint_msr_states_.q, J_);

            // computing J_pinv_
            pseudo_inverse(J_.data, J_pinv_);

            // computing forward kinematics
            fk_pos_solver_->JntToCart(joint_msr_states_.q, x_);

            // end-effector position error
            x_err_.vel = x_des_.p - x_.p;

            // getting quaternion from rotation matrix
            x_.M.GetQuaternion(quat_curr_.v(0),quat_curr_.v(1),quat_curr_.v(2),quat_curr_.a);
            x_des_.M.GetQuaternion(quat_des_.v(0),quat_des_.v(1),quat_des_.v(2),quat_des_.a);

            skew_symmetric(quat_des_.v, skew_);

            for (int i = 0; i < skew_.rows(); i++)
            {
                v_temp_(i) = 0.0;
                for (int k = 0; k < skew_.cols(); k++)
                    v_temp_(i) += skew_(i,k)*(quat_curr_.v(k));
            }

            // end-effector orientation error
            x_err_.rot = quat_curr_.a*quat_des_.v - quat_des_.a*quat_curr_.v - v_temp_;

            // computing q_dot
            for (int i = 0; i < J_pinv_.rows(); i++)
            {
                joint_des_states_.qdot(i) = 0.0;
                for (int k = 0; k < J_pinv_.cols(); k++)
                    joint_des_states_.qdot(i) += J_pinv_(i,k)*x_err_(k); //removed scaling factor of .7

            }

            // integrating q_dot -> getting q (Euler method)
            for (int i = 0; i < joint_handles_.size(); i++){
                joint_des_states_.q(i) += period.toSec()*joint_des_states_.qdot(i);
            }

            // joint limits saturation
            for (int i =0;  i < joint_handles_.size(); i++)
            {
                if (joint_des_states_.q(i) < joint_limits_.min(i))
                    joint_des_states_.q(i) = joint_limits_.min(i);
                if (joint_des_states_.q(i) > joint_limits_.max(i))
                    joint_des_states_.q(i) = joint_limits_.max(i);
            }

            if (Equal(x_, x_des_, 0.005))
            {
                ROS_INFO("On target");
                cmd_flag_ = 0;
            }
        }

        // set controls for joints
        for (int i = 0; i < joint_handles_.size(); i++)
        {
            joint_handles_[i].setCommand(joint_des_states_.q(i));
        }
    }
	void MultiTaskPriorityInverseKinematics::update(const ros::Time& time, const ros::Duration& period)
	{
		// get joint positions
  		for(int i=0; i < joint_handles_.size(); i++) 
  		{
    		joint_msr_states_.q(i) = joint_handles_[i].getPosition();
    		joint_msr_states_.qdot(i) = joint_handles_[i].getVelocity();
    	}
    	
    	// clearing error msg before publishing
    	msg_err_.data.clear();

    	if (cmd_flag_)
    	{
    		// resetting P and qdot(t=0) for the highest priority task
    		P_ = I_;	
    		SetToZero(joint_des_states_.qdot);

    		for (int index = 0; index < ntasks_; index++)
    		{
		    	// computing Jacobian
		    	jnt_to_jac_solver_->JntToJac(joint_msr_states_.q,J_,links_index_[index]);

		    	// computing forward kinematics
		    	fk_pos_solver_->JntToCart(joint_msr_states_.q,x_,links_index_[index]);

		    	// setting marker parameters
		    	set_marker(x_,index,msg_id_);

		    	// computing end-effector position/orientation error w.r.t. desired frame
		    	x_err_ = diff(x_,x_des_[index]);

		    	for(int i = 0; i < e_dot_.size(); i++)
		    	{
		    		e_dot_(i) = x_err_(i);
	    			msg_err_.data.push_back(e_dot_(i));
		    	}

		    	// computing (J[i]*P[i-1])^pinv
		    	J_star_.data = J_.data*P_;
		    	pseudo_inverse(J_star_.data,J_pinv_);

		    	// computing q_dot (qdot(i) = qdot[i-1] + (J[i]*P[i-1])^pinv*(x_err[i] - J[i]*qdot[i-1]))
		    	joint_des_states_.qdot.data = joint_des_states_.qdot.data + J_pinv_*(e_dot_ - J_.data*joint_des_states_.qdot.data);

		    	// stop condition
		    	if (!on_target_flag_[index])
		    	{
			    	if (Equal(x_,x_des_[index],0.01))
			    	{
			    		ROS_INFO("Task %d on target",index);
			    		on_target_flag_[index] = true;
			    		if (index == (ntasks_ - 1))
			    			cmd_flag_ = 0;
			    	}
			    }

		    	// updating P_ (it mustn't make use of the damped pseudo inverse)
		    	pseudo_inverse(J_star_.data,J_pinv_,false);
		    	P_ = P_ - J_pinv_*J_star_.data;
		    }

		    // integrating q_dot -> getting q (Euler method)
		    for (int i = 0; i < joint_handles_.size(); i++)
		    	joint_des_states_.q(i) += period.toSec()*joint_des_states_.qdot(i);			
    	}

    	// set controls for joints
    	for (int i = 0; i < joint_handles_.size(); i++)
    	{
    		tau_cmd_(i) = PIDs_[i].computeCommand(joint_des_states_.q(i) - joint_msr_states_.q(i),joint_des_states_.qdot(i) - joint_msr_states_.qdot(i),period);
    		joint_handles_[i].setCommand(tau_cmd_(i));
    	}

    	// publishing markers for visualization in rviz
    	pub_marker_.publish(msg_marker_);
    	msg_id_++;

	    // publishing error for all tasks as an array of ntasks*6
	    pub_error_.publish(msg_err_);
	    ros::spinOnce();

	}
コード例 #25
0
void LWRHWreal::write(ros::Time time, ros::Duration period)
{
  static int warning = 0;

  for (int j = 0; j < LBR_MNJ; j++)
  {
    // fake velocity command computed as:
    // (desired position - current position) / period, to avoid speed limit error
    joint_velocity_command_[j] = (joint_position_command_[j]-joint_position_[j])/period.toSec();
  }

  // enforce limits
  ej_sat_interface_.enforceLimits(period);
  ej_limits_interface_.enforceLimits(period);
  vj_sat_interface_.enforceLimits(period);
  vj_limits_interface_.enforceLimits(period);
  pj_sat_interface_.enforceLimits(period);
  pj_limits_interface_.enforceLimits(period);

  // write to real robot
  float newJntPosition[LBR_MNJ];
  float newJntStiff[LBR_MNJ];
  float newJntDamp[LBR_MNJ];
  float newJntAddTorque[LBR_MNJ];

  if ( device_->isPowerOn() )
  { 
    // check control mode
    //if ( device_->getState() == FRI_STATE_CMD )
    //{
      // check control scheme
      if( device_->getCurrentControlScheme() == FRI_CTRL_JNT_IMP )
      {
        for (int i = 0; i < LBR_MNJ; i++)
        {
            newJntPosition[i] = joint_position_command_[i]; // zero for now
            newJntAddTorque[i] = joint_effort_command_[i]; // comes from the controllers
            newJntStiff[i] = joint_stiffness_command_[i]; // default values for now
            newJntDamp[i] = joint_damping_command_[i]; // default values for now
        }

        // only joint impedance control is performed, since it is the only one that provide access to the joint torque directly
        // note that stiffness and damping are 0, as well as the position, since only effort is allowed to be sent
        // the KRC adds the dynamic terms, such that if zero torque is sent, the robot apply torques necessary to mantain the robot in the current position
        // the only interface is effort, thus any other action you want to do, you have to compute the added torque and send it through a controller
        device_->doJntImpedanceControl(newJntPosition, newJntStiff, newJntDamp, newJntAddTorque, true);
      } 
      else if( device_->getCurrentControlScheme() == FRI_CTRL_POSITION )
      {
        for (int i = 0; i < LBR_MNJ; i++)
        {
            newJntPosition[i] = joint_position_command_[i]; 
        }

        // only joint impedance control is performed, since it is the only one that provide access to the joint torque directly
        // note that stiffness and damping are 0, as well as the position, since only effort is allowed to be sent
        // the KRC adds the dynamic terms, such that if zero torque is sent, the robot apply torques necessary to mantain the robot in the current position
        // the only interface is effort, thus any other action you want to do, you have to compute the added torque and send it through a controller
        device_->doPositionControl(newJntPosition, true);
      } 
      else if( this->device_->getCurrentControlScheme() == FRI_CTRL_OTHER ) // Gravity compensation: just read status, but we have to keep FRI alive
      {
        device_->doDataExchange();
      }
    //}
  }

  // Stop request is issued from the other side
  /*
  if ( this->device_->interface->getFrmKRLInt(0) == -1)
  {
      ROS_INFO(" Stop request issued from the other side");
      this->stop();
  }*/

  // Quality change leads to output of statistics
  // for informational reasons
  //
  /*if ( this->device_->interface->getQuality() != this->device_->lastQuality )
  {
      ROS_INFO_STREAM("Quality change detected "<< this->device_->interface->getQuality()<< " \n");
      ROS_INFO_STREAM("" << this->device_->interface->getMsrBuf().intf);
      this->device_->lastQuality = this->device_->interface->getQuality();
  }*/

  // this is already done in the doJntImpedance Control setting to true the last flag
  // this->device_->interface->doDataExchange();

  return;
}
コード例 #26
0
sensor_msgs::PointCloud LaserJointProjector::project(const vector<sensor_msgs::JointState>& joint_state_vec, const ros::Duration& time_shift)
{
  sensor_msgs::PointCloud cloud;
  cloud.points.clear();

  for (unsigned int i=0; i<joint_state_vec.size(); i++)
  {
    map<string, double> joint_map;
    for (unsigned int j=0; j<joint_state_vec[i].name.size(); j++)
    {
      joint_map.insert(make_pair(joint_state_vec[i].name[j],
                                 joint_state_vec[i].position[j] +  joint_state_vec[i].velocity[j]*time_shift.toSec()));
    }
    geometry_msgs::Point32 pt = project(joint_map);
    cloud.points.push_back(pt);
  }

  return cloud;
}
コード例 #27
0
ファイル: node.cpp プロジェクト: whispercoros/Sub8
  void timer_callback(const ros::TimerEvent &)
  {
    mil_msgs::MoveToResult actionresult;

    // Handle disabled, killed, or no odom before attempting to produce trajectory
    std::string err = "";
    if (disabled)
      err = "c3 disabled";
    else if (kill_listener.isRaised())
      err = "killed";
    else if (!c3trajectory)
      err = "no odom";

    if (!err.empty())
    {
      if (c3trajectory)
        c3trajectory.reset();  // On revive/enable, wait for odom before station keeping

      // Cancel all goals while killed/disabled/no odom
      if (actionserver.isNewGoalAvailable())
        actionserver.acceptNewGoal();
      if (actionserver.isActive())
      {
        actionresult.error = err;
        actionresult.success = false;
        actionserver.setAborted(actionresult);
      }
      return;
    }

    ros::Time now = ros::Time::now();

    auto old_waypoint = current_waypoint;

    if (actionserver.isNewGoalAvailable())
    {
      boost::shared_ptr<const mil_msgs::MoveToGoal> goal = actionserver.acceptNewGoal();
      current_waypoint =
          subjugator::C3Trajectory::Waypoint(Point_from_PoseTwist(goal->posetwist.pose, goal->posetwist.twist),
                                             goal->speed, !goal->uncoordinated, !goal->blind);
      current_waypoint_t = now;
      this->linear_tolerance = goal->linear_tolerance;
      this->angular_tolerance = goal->angular_tolerance;

      waypoint_validity_.pub_size_ogrid(Pose_from_Waypoint(current_waypoint), (int)OGRID_COLOR::GREEN);

      // Check if waypoint is valid
      std::pair<bool, WAYPOINT_ERROR_TYPE> checkWPResult = waypoint_validity_.is_waypoint_valid(
          Pose_from_Waypoint(current_waypoint), current_waypoint.do_waypoint_validation);
      actionresult.error = WAYPOINT_ERROR_TO_STRING.at(checkWPResult.second);
      actionresult.success = checkWPResult.first;
      if (checkWPResult.first == false)  // got a point that we should not move to
      {
        waypoint_validity_.pub_size_ogrid(Pose_from_Waypoint(current_waypoint), (int)OGRID_COLOR::RED);
        if (checkWPResult.second ==
            WAYPOINT_ERROR_TYPE::UNKNOWN)  // if unknown, check if there's a huge displacement with the new waypoint
        {
          auto a_point = Pose_from_Waypoint(current_waypoint);
          auto b_point = Pose_from_Waypoint(old_waypoint);
          // If moved more than .5m, then don't allow
          if (abs(a_point.position.x - b_point.position.x) > .5 || abs(a_point.position.y - b_point.position.y) > .5)
          {
            ROS_ERROR("can't move there! - need to rotate");
            current_waypoint = old_waypoint;
          }
        }
        // if point is occupied, reject move
        if (checkWPResult.second == WAYPOINT_ERROR_TYPE::OCCUPIED)
        {
          ROS_ERROR("can't move there! - waypoint is occupied");
          current_waypoint = old_waypoint;
        }
        // if point is above water, reject move
        if (checkWPResult.second == WAYPOINT_ERROR_TYPE::ABOVE_WATER)
        {
          ROS_ERROR("can't move there! - waypoint is above water");
          current_waypoint = old_waypoint;
        }
        if (checkWPResult.second == WAYPOINT_ERROR_TYPE::NO_OGRID)
        {
          ROS_ERROR("WaypointValidity - Did not recieve any ogrid");
        }
      }
    }
    if (actionserver.isPreemptRequested())
    {
      current_waypoint = c3trajectory->getCurrentPoint();
      current_waypoint.do_waypoint_validation = false;
      current_waypoint.r.qdot = subjugator::Vector6d::Zero();  // zero velocities
      current_waypoint_t = now;

      // don't try to make output c3 continuous when cancelled - instead stop as quickly as possible
      c3trajectory.reset(new subjugator::C3Trajectory(current_waypoint.r, limits));
      c3trajectory_t = now;
    }

    // Remember the previous trajectory
    auto old_trajectory = c3trajectory->getCurrentPoint();

    while (c3trajectory_t + traj_dt < now)
    {
      c3trajectory->update(traj_dt.toSec(), current_waypoint, (c3trajectory_t - current_waypoint_t).toSec());
      c3trajectory_t += traj_dt;
    }

    // Check if we will hit something while in trajectory the new trajectory
    geometry_msgs::Pose traj_point;  // Convert messages to correct type
    auto p = c3trajectory->getCurrentPoint();
    traj_point.position = vec2xyz<Point>(p.q.head(3));
    quaternionTFToMsg(tf::createQuaternionFromRPY(p.q[3], p.q[4], p.q[5]), traj_point.orientation);

    std::pair<bool, WAYPOINT_ERROR_TYPE> checkWPResult =
        waypoint_validity_.is_waypoint_valid(Pose_from_Waypoint(p), c3trajectory->do_waypoint_validation);

    if (checkWPResult.first == false && checkWPResult.second == WAYPOINT_ERROR_TYPE::OCCUPIED)
    {  // New trajectory will hit an occupied goal, so reject
      ROS_ERROR("can't move there! - bad trajectory");
      current_waypoint = old_trajectory;
      current_waypoint.do_waypoint_validation = false;
      current_waypoint.r.qdot = subjugator::Vector6d::Zero();  // zero velocities
      current_waypoint_t = now;

      c3trajectory.reset(new subjugator::C3Trajectory(current_waypoint.r, limits));
      c3trajectory_t = now;
      actionresult.success = false;
      actionresult.error = WAYPOINT_ERROR_TO_STRING.at(WAYPOINT_ERROR_TYPE::OCCUPIED_TRAJECTORY);
    }

    PoseTwistStamped msg;
    msg.header.stamp = c3trajectory_t;
    msg.header.frame_id = fixed_frame;
    msg.posetwist = PoseTwist_from_PointWithAcceleration(c3trajectory->getCurrentPoint());
    trajectory_pub.publish(msg);

    waypoint_validity_.pub_size_ogrid(Pose_from_Waypoint(c3trajectory->getCurrentPoint()), 200);

    PoseStamped msgVis;
    msgVis.header = msg.header;
    msgVis.pose = msg.posetwist.pose;
    trajectory_vis_pub.publish(msgVis);

    PoseStamped posemsg;
    posemsg.header.stamp = c3trajectory_t;
    posemsg.header.frame_id = fixed_frame;
    posemsg.pose = Pose_from_Waypoint(current_waypoint);
    waypoint_pose_pub.publish(posemsg);

    if (actionserver.isActive() &&
        c3trajectory->getCurrentPoint().is_approximately(current_waypoint.r, max(1e-3, linear_tolerance),
                                                         max(1e-3, angular_tolerance)) &&
        current_waypoint.r.qdot == subjugator::Vector6d::Zero())
    {
      actionresult.error = "";
      actionresult.success = true;
      actionserver.setSucceeded(actionresult);
    }
  }
	void DynamicSlidingModeControllerTaskSpace::update(const ros::Time& time, const ros::Duration& period)
	{
		// get joint positions
  		for(int i=0; i < joint_handles_.size(); i++) 
  		{
    		joint_msr_states_.q(i) = joint_handles_[i].getPosition();
    		joint_msr_states_.qdot(i) = joint_handles_[i].getVelocity();
    	} 

    	if (cmd_flag_) 
    	{
	    	// computing forward kinematics
		    fk_pos_solver_->JntToCart(joint_msr_states_.q,x_);

		    // computing Jacobian J(q)
		    jnt_to_jac_solver_->JntToJac(joint_msr_states_.q,J_);

		    // computing Jacobian pseudo-inversion
		    pseudo_inverse(J_.data,J_pinv_);

		    // computing end-effector position/orientation error w.r.t. desired frame
		    x_err_ = diff(x_,x_des_);
		    
		    /* Trying quaternions, it seems to work better 

		    // end-effector position/orientation error
	    	x_err_.vel = (x_des_.p - x_.p);

	    	// getting quaternion from rotation matrix
	    	x_.M.GetQuaternion(quat_curr_.v(0),quat_curr_.v(1),quat_curr_.v(2),quat_curr_.a);
	    	x_des_.M.GetQuaternion(quat_des_.v(0),quat_des_.v(1),quat_des_.v(2),quat_des_.a);

	    	skew_symmetric(quat_des_.v,skew_);

	    	for (int i = 0; i < skew_.rows(); i++)
	    	{
	    		v_temp_(i) = 0.0;
	    		for (int k = 0; k < skew_.cols(); k++)
	    			v_temp_(i) += skew_(i,k)*(quat_curr_.v(k));
	    	}

	    	x_err_.rot = (quat_curr_.a*quat_des_.v - quat_des_.a*quat_curr_.v) - v_temp_; 
			*/
	    	// clearing error msg before publishing
    		msg_err_.data.clear();

		    for(int i = 0; i < e_ref_.size(); i++)
			{
				e_ref_(i) = x_err_(i);
		    	msg_err_.data.push_back(e_ref_(i));
			}

			joint_des_states_.qdot.data = J_pinv_*e_ref_;

			joint_des_states_.q.data = joint_msr_states_.q.data + period.toSec()*joint_des_states_.qdot.data;

	    	// computing S
	    	S_.data = (joint_msr_states_.qdot.data - joint_des_states_.qdot.data) + alpha_.data.cwiseProduct(joint_msr_states_.q.data - joint_des_states_.q.data);

			//for (int i = 0; i < joint_handles_.size(); i++)
				//S_(i) = (joint_msr_states_.qdot(i) - joint_des_states_.qdot(i)) + alpha_(i)*tanh(lambda_(i)*(joint_msr_states_.q(i) - joint_des_states_.q(i)));
	    	
	    	// saving S0 on the first step
	    	if (step_ == 0)
	    		S0_ = S_;

	    	// computing Sd
	    	for (int i = 0; i < joint_handles_.size(); i++)
	    		Sd_(i) = S0_(i)*exp(-k_(i)*(step_*period.toSec()));

	    	Sq_.data = S_.data + Sd_.data;//- Sd_.data;

	    	// computing sigma_dot as sgn(Sq)
	    	for (int i = 0; i < joint_handles_.size(); i++)
	    		sigma_dot_(i) = -(Sq_(i) < 0) + (Sq_(i) > 0); 

	    	// integrating sigma_dot
	    	sigma_.data += period.toSec()*sigma_dot_.data;

	    	//for (int i = 0; i < joint_handles_.size(); i++)
	    		//sigma_(i) += period.toSec()*pow(Sq_(i),0.5);

	    	// computing Sr
	    	Sr_.data = Sq_.data + gamma_.data.cwiseProduct(sigma_.data);

	    	// computing tau
	    	tau_.data = -Kd_.data.cwiseProduct(Sr_.data);

	    	step_++;

	    	if (Equal(x_,x_des_,0.05))
	    	{
	    		
	    		ROS_INFO("On target");
	    		cmd_flag_ = 0;
	    		return;
	    	}
	    } 

    	// set controls for joints
    	for (int i = 0; i < joint_handles_.size(); i++)
    	{	
    		if (!cmd_flag_)
    			tau_(i) = PIDs_[i].computeCommand(joint_des_states_.q(i) - joint_msr_states_.q(i),period);//Kd_(i)*(alpha_(i)*(joint_des_states_.q(i) - joint_msr_states_.q(i)) + (joint_des_states_.qdot(i) - joint_msr_states_.qdot(i))) ;

	    	joint_handles_[i].setCommand(tau_(i));
    	}

    	// publishing markers for visualization in rviz
    	pub_marker_.publish(msg_marker_);
    	msg_id_++;

	    // publishing error for all tasks as an array of ntasks*6
	    pub_error_.publish(msg_err_);
	    // publishing actual and desired trajectory for each task (links) as an array of ntasks*3
	    pub_pose_.publish(msg_pose_);
	    pub_traj_.publish(msg_traj_);
	    ros::spinOnce();

	}
コード例 #29
0
ファイル: local_io.cpp プロジェクト: melodysu83/raven2
/**
*  \brief Publishes the joint angles for the visualization
*
*  \param device0 the robot and its state
*
*  \ingroup ROS
*
*/
void publish_joints(struct robot_device* device0){

    static int count=0;
    static ros::Time t1;
    static ros::Time t2;
    static ros::Duration d;

    if (count == 0){
        t1 = t1.now();
    }
    count ++;
    t2 = t2.now();
    d = t2-t1;

    if (d.toSec()<0.030)
        return;
    t1=t2;

    publish_marker(device0);

    sensor_msgs::JointState joint_state;
    //update joint_state
    joint_state.header.stamp = ros::Time::now();
    joint_state.name.resize(28);
    joint_state.position.resize(28);
//    joint_state.name.resize(14);
//    joint_state.position.resize(14);
    int left, right;
    if (device0->mech[0].type == GOLD_ARM)
    {
        left = 0;
        right = 1;
    }
    else
    {
        left = 1;
        right = 0;
    }
    //======================LEFT ARM===========================
    joint_state.name[0] ="shoulder_L";
    joint_state.position[0] = device0->mech[left].joint[0].jpos + offsets_l.shoulder_off;
    joint_state.name[1] ="elbow_L";
    joint_state.position[1] = device0->mech[left].joint[1].jpos + offsets_l.elbow_off;
    joint_state.name[2] ="insertion_L";
    joint_state.position[2] = device0->mech[left].joint[2].jpos + d4 + offsets_l.insertion_off;
    joint_state.name[3] ="tool_roll_L";
    joint_state.position[3] = device0->mech[left].joint[4].jpos - 45 * d2r + offsets_l.roll_off;
    joint_state.name[4] ="wrist_joint_L";
    joint_state.position[4] = device0->mech[left].joint[5].jpos + offsets_l.wrist_off;
    joint_state.name[5] ="grasper_joint_1_L";
    joint_state.position[5] = device0->mech[left].joint[6].jpos + offsets_l.grasp1_off;
    joint_state.name[6] ="grasper_joint_2_L";
    joint_state.position[6] = device0->mech[left].joint[7].jpos * -1 + offsets_l.grasp2_off;

    //======================RIGHT ARM===========================
    joint_state.name[7] ="shoulder_R";
    joint_state.position[7] = device0->mech[right].joint[0].jpos + offsets_r.shoulder_off;
    joint_state.name[8] ="elbow_R";
    joint_state.position[8] = device0->mech[right].joint[1].jpos + offsets_r.elbow_off;
    joint_state.name[9] ="insertion_R";
    joint_state.position[9] = device0->mech[right].joint[2].jpos + d4 + offsets_r.insertion_off;
    joint_state.name[10] ="tool_roll_R";
    joint_state.position[10] = device0->mech[right].joint[4].jpos + 45 * d2r + offsets_r.roll_off;
    joint_state.name[11] ="wrist_joint_R";
    joint_state.position[11] = device0->mech[right].joint[5].jpos * -1 + offsets_r.wrist_off;
    joint_state.name[12] ="grasper_joint_1_R";
    joint_state.position[12] = device0->mech[right].joint[6].jpos + offsets_r.grasp1_off;
    joint_state.name[13] ="grasper_joint_2_R";
    joint_state.position[13] = device0->mech[right].joint[7].jpos * -1 + offsets_r.grasp2_off;

    //======================LEFT ARM===========================

    joint_state.name[14] ="shoulder_L2";
    joint_state.position[14] = device0->mech[left].joint[0].jpos_d + offsets_l.shoulder_off;
    joint_state.name[15] ="elbow_L2";
    joint_state.position[15] = device0->mech[left].joint[1].jpos_d + offsets_l.elbow_off;
    joint_state.name[16] ="insertion_L2";
    joint_state.position[16] = device0->mech[left].joint[2].jpos_d + d4 + offsets_l.insertion_off;
    joint_state.name[17] ="tool_roll_L2";
    joint_state.position[17] = device0->mech[left].joint[4].jpos_d - 45 * d2r + offsets_l.roll_off;
    joint_state.name[18] ="wrist_joint_L2";
    joint_state.position[18] = device0->mech[left].joint[5].jpos_d + offsets_l.wrist_off;
    joint_state.name[19] ="grasper_joint_1_L2";
    joint_state.position[19] = device0->mech[left].joint[6].jpos_d + offsets_l.grasp1_off;
    joint_state.name[20] ="grasper_joint_2_L2";
    joint_state.position[20] = device0->mech[left].joint[7].jpos_d * -1 + offsets_l.grasp2_off;

    //======================RIGHT ARM===========================
    joint_state.name[21] ="shoulder_R2";
    joint_state.position[21] = device0->mech[right].joint[0].jpos_d + offsets_r.shoulder_off;
    joint_state.name[22] ="elbow_R2";
    joint_state.position[22] = device0->mech[right].joint[1].jpos_d + offsets_r.elbow_off;
    joint_state.name[23] ="insertion_R2";
    joint_state.position[23] = device0->mech[right].joint[2].jpos_d + d4 + offsets_r.insertion_off;
    joint_state.name[24] ="tool_roll_R2";
    joint_state.position[24] = device0->mech[right].joint[4].jpos_d + 45 * d2r + offsets_r.roll_off;
    joint_state.name[25] ="wrist_joint_R2";
    joint_state.position[25] = device0->mech[right].joint[5].jpos_d * -1 + offsets_r.wrist_off;
    joint_state.name[26] ="grasper_joint_1_R2";
    joint_state.position[26] = device0->mech[right].joint[6].jpos_d + offsets_r.grasp1_off;
    joint_state.name[27] ="grasper_joint_2_R2";
    joint_state.position[27] = device0->mech[right].joint[7].jpos_d * -1 + offsets_r.grasp2_off;

    //Publish the joint states
    joint_publisher.publish(joint_state);

}
コード例 #30
0
ファイル: icp_matching.cpp プロジェクト: 794523332/Autoware
static void points_callback(const sensor_msgs::PointCloud2::ConstPtr& input)
{
  if (map_loaded == 1 && init_pos_set == 1)
  {
    matching_start = std::chrono::system_clock::now();

    static tf::TransformBroadcaster br;
    tf::Transform transform;
    tf::Quaternion predict_q, icp_q, current_q, localizer_q;

    pcl::PointXYZ p;
    pcl::PointCloud<pcl::PointXYZ> filtered_scan;

    current_scan_time = input->header.stamp;

    pcl::fromROSMsg(*input, filtered_scan);
    pcl::PointCloud<pcl::PointXYZ>::Ptr filtered_scan_ptr(new pcl::PointCloud<pcl::PointXYZ>(filtered_scan));
    int scan_points_num = filtered_scan_ptr->size();

    Eigen::Matrix4f t(Eigen::Matrix4f::Identity());   // base_link
    Eigen::Matrix4f t2(Eigen::Matrix4f::Identity());  // localizer

    std::chrono::time_point<std::chrono::system_clock> align_start, align_end, getFitnessScore_start, getFitnessScore_end;
    static double align_time, getFitnessScore_time = 0.0;

    // Setting point cloud to be aligned.
//    ndt.setInputSource(filtered_scan_ptr);
    icp.setInputSource(filtered_scan_ptr);

    // Guess the initial gross estimation of the transformation
    predict_pose.x = previous_pose.x + offset_x;
    predict_pose.y = previous_pose.y + offset_y;
    predict_pose.z = previous_pose.z + offset_z;
    predict_pose.roll = previous_pose.roll;
    predict_pose.pitch = previous_pose.pitch;
    predict_pose.yaw = previous_pose.yaw + offset_yaw;

    Eigen::Translation3f init_translation(predict_pose.x, predict_pose.y, predict_pose.z);
    Eigen::AngleAxisf init_rotation_x(predict_pose.roll, Eigen::Vector3f::UnitX());
    Eigen::AngleAxisf init_rotation_y(predict_pose.pitch, Eigen::Vector3f::UnitY());
    Eigen::AngleAxisf init_rotation_z(predict_pose.yaw, Eigen::Vector3f::UnitZ());
    Eigen::Matrix4f init_guess = (init_translation * init_rotation_z * init_rotation_y * init_rotation_x) * tf_btol;

    pcl::PointCloud<pcl::PointXYZ>::Ptr output_cloud(new pcl::PointCloud<pcl::PointXYZ>);
//    ndt.align(*output_cloud, init_guess);

    icp.setMaximumIterations(maximum_iterations);
    icp.setTransformationEpsilon(transformation_epsilon);
    icp.setMaxCorrespondenceDistance(max_correspondence_distance);
    icp.setEuclideanFitnessEpsilon(euclidean_fitness_epsilon);
    icp.setRANSACOutlierRejectionThreshold(ransac_outlier_rejection_threshold);

    align_start = std::chrono::system_clock::now();
    icp.align(*output_cloud, init_guess);
    align_end = std::chrono::system_clock::now();
    align_time = std::chrono::duration_cast<std::chrono::microseconds>(align_end - align_start).count() / 1000.0;

//    t = ndt.getFinalTransformation();  // localizer
    t = icp.getFinalTransformation();  // localizer
    t2 = t * tf_ltob;                  // base_link

//    iteration = ndt.getFinalNumIteration();
//    score = ndt.getFitnessScore();

    getFitnessScore_start = std::chrono::system_clock::now();
    fitness_score = icp.getFitnessScore();
    getFitnessScore_end = std::chrono::system_clock::now();
    getFitnessScore_time = std::chrono::duration_cast<std::chrono::microseconds>(getFitnessScore_end - getFitnessScore_start).count() / 1000.0;

//    trans_probability = ndt.getTransformationProbability();

    tf::Matrix3x3 mat_l;  // localizer
    mat_l.setValue(static_cast<double>(t(0, 0)), static_cast<double>(t(0, 1)), static_cast<double>(t(0, 2)),
                   static_cast<double>(t(1, 0)), static_cast<double>(t(1, 1)), static_cast<double>(t(1, 2)),
                   static_cast<double>(t(2, 0)), static_cast<double>(t(2, 1)), static_cast<double>(t(2, 2)));

    // Update localizer_pose
    localizer_pose.x = t(0, 3);
    localizer_pose.y = t(1, 3);
    localizer_pose.z = t(2, 3);
    mat_l.getRPY(localizer_pose.roll, localizer_pose.pitch, localizer_pose.yaw, 1);

    tf::Matrix3x3 mat_b;  // base_link
    mat_b.setValue(static_cast<double>(t2(0, 0)), static_cast<double>(t2(0, 1)), static_cast<double>(t2(0, 2)),
                   static_cast<double>(t2(1, 0)), static_cast<double>(t2(1, 1)), static_cast<double>(t2(1, 2)),
                   static_cast<double>(t2(2, 0)), static_cast<double>(t2(2, 1)), static_cast<double>(t2(2, 2)));

    // Update ndt_pose
    icp_pose.x = t2(0, 3);
    icp_pose.y = t2(1, 3);
    icp_pose.z = t2(2, 3);
    mat_b.getRPY(icp_pose.roll, icp_pose.pitch, icp_pose.yaw, 1);

    // Calculate the difference between ndt_pose and predict_pose
    predict_pose_error = sqrt((icp_pose.x - predict_pose.x) * (icp_pose.x - predict_pose.x) +
                              (icp_pose.y - predict_pose.y) * (icp_pose.y - predict_pose.y) +
                              (icp_pose.z - predict_pose.z) * (icp_pose.z - predict_pose.z));

    if (predict_pose_error <= PREDICT_POSE_THRESHOLD)
    {
      use_predict_pose = 0;
    }
    else
    {
      use_predict_pose = 1;
    }
    use_predict_pose = 0;

    if (use_predict_pose == 0)
    {
      current_pose.x = icp_pose.x;
      current_pose.y = icp_pose.y;
      current_pose.z = icp_pose.z;
      current_pose.roll = icp_pose.roll;
      current_pose.pitch = icp_pose.pitch;
      current_pose.yaw = icp_pose.yaw;
    }
    else
    {
      current_pose.x = predict_pose.x;
      current_pose.y = predict_pose.y;
      current_pose.z = predict_pose.z;
      current_pose.roll = predict_pose.roll;
      current_pose.pitch = predict_pose.pitch;
      current_pose.yaw = predict_pose.yaw;
    }

    // Compute the velocity and acceleration
    scan_duration = current_scan_time - previous_scan_time;
    double secs = scan_duration.toSec();
    diff_x = current_pose.x - previous_pose.x;
    diff_y = current_pose.y - previous_pose.y;
    diff_z = current_pose.z - previous_pose.z;
    diff_yaw = current_pose.yaw - previous_pose.yaw;
    diff = sqrt(diff_x * diff_x + diff_y * diff_y + diff_z * diff_z);

    current_velocity = diff / secs;
    current_velocity_x = diff_x / secs;
    current_velocity_y = diff_y / secs;
    current_velocity_z = diff_z / secs;
    angular_velocity = diff_yaw / secs;

    current_velocity_smooth = (current_velocity + previous_velocity + previous_previous_velocity) / 3.0;
    if (current_velocity_smooth < 0.2)
    {
      current_velocity_smooth = 0.0;
    }

    current_accel = (current_velocity - previous_velocity) / secs;
    current_accel_x = (current_velocity_x - previous_velocity_x) / secs;
    current_accel_y = (current_velocity_y - previous_velocity_y) / secs;
    current_accel_z = (current_velocity_z - previous_velocity_z) / secs;

    estimated_vel_mps.data = current_velocity;
    estimated_vel_kmph.data = current_velocity * 3.6;

    estimated_vel_mps_pub.publish(estimated_vel_mps);
    estimated_vel_kmph_pub.publish(estimated_vel_kmph);

    // Set values for publishing pose
    predict_q.setRPY(predict_pose.roll, predict_pose.pitch, predict_pose.yaw);
    predict_pose_msg.header.frame_id = "/map";
    predict_pose_msg.header.stamp = current_scan_time;
    predict_pose_msg.pose.position.x = predict_pose.x;
    predict_pose_msg.pose.position.y = predict_pose.y;
    predict_pose_msg.pose.position.z = predict_pose.z;
    predict_pose_msg.pose.orientation.x = predict_q.x();
    predict_pose_msg.pose.orientation.y = predict_q.y();
    predict_pose_msg.pose.orientation.z = predict_q.z();
    predict_pose_msg.pose.orientation.w = predict_q.w();

    icp_q.setRPY(icp_pose.roll, icp_pose.pitch, icp_pose.yaw);
    icp_pose_msg.header.frame_id = "/map";
    icp_pose_msg.header.stamp = current_scan_time;
    icp_pose_msg.pose.position.x = icp_pose.x;
    icp_pose_msg.pose.position.y = icp_pose.y;
    icp_pose_msg.pose.position.z = icp_pose.z;
    icp_pose_msg.pose.orientation.x = icp_q.x();
    icp_pose_msg.pose.orientation.y = icp_q.y();
    icp_pose_msg.pose.orientation.z = icp_q.z();
    icp_pose_msg.pose.orientation.w = icp_q.w();

    current_q.setRPY(current_pose.roll, current_pose.pitch, current_pose.yaw);
    current_pose_msg.header.frame_id = "/map";
    current_pose_msg.header.stamp = current_scan_time;
    current_pose_msg.pose.position.x = current_pose.x;
    current_pose_msg.pose.position.y = current_pose.y;
    current_pose_msg.pose.position.z = current_pose.z;
    current_pose_msg.pose.orientation.x = current_q.x();
    current_pose_msg.pose.orientation.y = current_q.y();
    current_pose_msg.pose.orientation.z = current_q.z();
    current_pose_msg.pose.orientation.w = current_q.w();

    localizer_q.setRPY(localizer_pose.roll, localizer_pose.pitch, localizer_pose.yaw);
    localizer_pose_msg.header.frame_id = "/map";
    localizer_pose_msg.header.stamp = current_scan_time;
    localizer_pose_msg.pose.position.x = localizer_pose.x;
    localizer_pose_msg.pose.position.y = localizer_pose.y;
    localizer_pose_msg.pose.position.z = localizer_pose.z;
    localizer_pose_msg.pose.orientation.x = localizer_q.x();
    localizer_pose_msg.pose.orientation.y = localizer_q.y();
    localizer_pose_msg.pose.orientation.z = localizer_q.z();
    localizer_pose_msg.pose.orientation.w = localizer_q.w();

    predict_pose_pub.publish(predict_pose_msg);
    icp_pose_pub.publish(icp_pose_msg);
    current_pose_pub.publish(current_pose_msg);
    localizer_pose_pub.publish(localizer_pose_msg);

    // Send TF "/base_link" to "/map"
    transform.setOrigin(tf::Vector3(current_pose.x, current_pose.y, current_pose.z));
    transform.setRotation(current_q);
    br.sendTransform(tf::StampedTransform(transform, current_scan_time, "/map", "/base_link"));

    matching_end = std::chrono::system_clock::now();
    exe_time = std::chrono::duration_cast<std::chrono::microseconds>(matching_end - matching_start).count() / 1000.0;
    time_icp_matching.data = exe_time;
    time_icp_matching_pub.publish(time_icp_matching);

    // Set values for /estimate_twist
    estimate_twist_msg.header.stamp = current_scan_time;
    estimate_twist_msg.twist.linear.x = current_velocity;
    estimate_twist_msg.twist.linear.y = 0.0;
    estimate_twist_msg.twist.linear.z = 0.0;
    estimate_twist_msg.twist.angular.x = 0.0;
    estimate_twist_msg.twist.angular.y = 0.0;
    estimate_twist_msg.twist.angular.z = angular_velocity;

    estimate_twist_pub.publish(estimate_twist_msg);

    geometry_msgs::Vector3Stamped estimate_vel_msg;
    estimate_vel_msg.header.stamp = current_scan_time;
    estimate_vel_msg.vector.x = current_velocity;
    estimated_vel_pub.publish(estimate_vel_msg);

    // Set values for /icp_stat
    icp_stat_msg.header.stamp = current_scan_time;
    icp_stat_msg.exe_time = time_icp_matching.data;
//    icp_stat_msg.iteration = iteration;
    icp_stat_msg.score = fitness_score;
    icp_stat_msg.velocity = current_velocity;
    icp_stat_msg.acceleration = current_accel;
    icp_stat_msg.use_predict_pose = 0;

    icp_stat_pub.publish(icp_stat_msg);

    // Write log
    if (!ofs)
    {
      std::cerr << "Could not open " << filename << "." << std::endl;
      exit(1);
    }
    ofs << input->header.seq << "," << scan_points_num << ","
            << current_pose.x << "," << current_pose.y << "," << current_pose.z << "," << current_pose.roll << ","
            << current_pose.pitch << "," << current_pose.yaw << "," << predict_pose.x << "," << predict_pose.y << ","
            << predict_pose.z << "," << predict_pose.roll << "," << predict_pose.pitch << "," << predict_pose.yaw << ","
            << current_pose.x - predict_pose.x << "," << current_pose.y - predict_pose.y << ","
            << current_pose.z - predict_pose.z << "," << current_pose.roll - predict_pose.roll << ","
            << current_pose.pitch - predict_pose.pitch << "," << current_pose.yaw - predict_pose.yaw << ","
            << predict_pose_error << "," <<  "," << fitness_score << ","
            << "," << current_velocity << "," << current_velocity_smooth << "," << current_accel
            << "," << angular_velocity << "," << exe_time << "," << align_time << "," << getFitnessScore_time << std::endl;

    std::cout << "-----------------------------------------------------------------" << std::endl;
    std::cout << "Sequence: " << input->header.seq << std::endl;
    std::cout << "Timestamp: " << input->header.stamp << std::endl;
    std::cout << "Frame ID: " << input->header.frame_id << std::endl;
    //		std::cout << "Number of Scan Points: " << scan_ptr->size() << " points." << std::endl;
    std::cout << "Number of Filtered Scan Points: " << scan_points_num << " points." << std::endl;
    std::cout << "ICP has converged: " << icp.hasConverged() << std::endl;
    std::cout << "Fitness Score: " << fitness_score << std::endl;
//    std::cout << "Transformation Probability: " << ndt.getTransformationProbability() << std::endl;
    std::cout << "Execution Time: " << exe_time << " ms." << std::endl;
//    std::cout << "Number of Iterations: " << ndt.getFinalNumIteration() << std::endl;
//    std::cout << "NDT Reliability: " << ndt_reliability.data << std::endl;
    std::cout << "(x,y,z,roll,pitch,yaw): " << std::endl;
    std::cout << "(" << current_pose.x << ", " << current_pose.y << ", " << current_pose.z << ", " << current_pose.roll
              << ", " << current_pose.pitch << ", " << current_pose.yaw << ")" << std::endl;
    std::cout << "Transformation Matrix: " << std::endl;
    std::cout << t << std::endl;
    std::cout << "-----------------------------------------------------------------" << std::endl;

    // Update offset
    if (_offset == "linear")
    {
      offset_x = diff_x;
      offset_y = diff_y;
      offset_z = diff_z;
      offset_yaw = diff_yaw;
    }
    else if (_offset == "quadratic")
    {
      offset_x = (current_velocity_x + current_accel_x * secs) * secs;
      offset_y = (current_velocity_y + current_accel_y * secs) * secs;
      offset_z = diff_z;
      offset_yaw = diff_yaw;
    }
    else if (_offset == "zero")
    {
      offset_x = 0.0;
      offset_y = 0.0;
      offset_z = 0.0;
      offset_yaw = 0.0;
    }

    // Update previous_***
    previous_pose.x = current_pose.x;
    previous_pose.y = current_pose.y;
    previous_pose.z = current_pose.z;
    previous_pose.roll = current_pose.roll;
    previous_pose.pitch = current_pose.pitch;
    previous_pose.yaw = current_pose.yaw;

    previous_scan_time.sec = current_scan_time.sec;
    previous_scan_time.nsec = current_scan_time.nsec;

    previous_previous_velocity = previous_velocity;
    previous_velocity = current_velocity;
    previous_velocity_x = current_velocity_x;
    previous_velocity_y = current_velocity_y;
    previous_velocity_z = current_velocity_z;
    previous_accel = current_accel;

    previous_estimated_vel_kmph.data = estimated_vel_kmph.data;
  }
}