TEST_F(LoadPlanningModelsPr2, InitOK)
{
  ASSERT_TRUE(urdf_ok_);
  ASSERT_EQ(urdf_model_->getName(), "pr2_test");

  robot_model::RobotModelPtr kmodel(new robot_model::RobotModel(urdf_model_, srdf_model_));
  robot_state::RobotState ks(kmodel);
  ks.setToRandomValues();
  ks.setToDefaultValues();


  robot_state::Transforms tf(kmodel->getModelFrame());

  Eigen::Affine3d t1;
  t1.setIdentity();
  t1.translation() = Eigen::Vector3d(10.0, 1.0, 0.0);
  tf.setTransform(t1, "some_frame_1");

  Eigen::Affine3d t2(Eigen::Translation3d(10.0, 1.0, 0.0)*Eigen::AngleAxisd(0.5, Eigen::Vector3d::UnitY()));
  tf.setTransform(t2, "some_frame_2");

  Eigen::Affine3d t3;
  t3.setIdentity();
  t3.translation() = Eigen::Vector3d(0.0, 1.0, -1.0);
  tf.setTransform(t3, "some_frame_3");


  EXPECT_TRUE(tf.isFixedFrame("some_frame_1"));
  EXPECT_FALSE(tf.isFixedFrame("base_footprint"));
  EXPECT_TRUE(tf.isFixedFrame(kmodel->getModelFrame()));

  Eigen::Affine3d x;
  x.setIdentity();
  tf.transformPose(ks, "some_frame_2", x, x);
  
  EXPECT_TRUE(t2.translation() == x.translation());
  EXPECT_TRUE(t2.rotation() == x.rotation());

  tf.transformPose(ks, kmodel->getModelFrame(), x, x);
  EXPECT_TRUE(t2.translation() == x.translation());
  EXPECT_TRUE(t2.rotation() == x.rotation());

  x.setIdentity();
  tf.transformPose(ks, "r_wrist_roll_link", x, x);

  EXPECT_NEAR(x.translation().x(), 0.585315, 1e-4);
  EXPECT_NEAR(x.translation().y(), -0.188, 1e-4);
  EXPECT_NEAR(x.translation().z(), 1.24001, 1e-4);
}
예제 #2
0
bool VisualOdometry::resetposSrvCallback(
        Save::Request& request,
        Save::Response& response)
{

    const std::string& new_tr = request.filename;
//    f2b_
    double m00,m01,m02,m03,m10,m11,m12,m13,m20,m21,m22,m23,m30,m31,m32,m33;
    int res;

    res = sscanf(new_tr.c_str(), "%lg %lg %lg %lg\n%lg %lg %lg %lg\n%lg %lg %lg %lg\n%lg %lg %lg %lg\n)",
                     &m00,&m01,&m02,&m03,&m10,&m11,&m12,&m13,&m20,&m21,&m22,&m23,&m30,&m31,&m32,&m33);
    if (res != 16)
    {
         res = sscanf(new_tr.c_str(), "%lg %lg %lg %lg %lg %lg %lg %lg %lg %lg %lg %lg %lg %lg %lg %lg)",
                         &m00,&m01,&m02,&m03,&m10,&m11,&m12,&m13,&m20,&m21,&m22,&m23,&m30,&m31,&m32,&m33);
         if (res != 16)
         {
             return false;
         }
    }

//    Eigen::Affine3d pose;
//    pose(0,0) = m00;
//    pose(0,1) = m01;
//    pose(0,2) = m02;
//    pose(0,3) = m03;
//    pose(1,0) = m10;
//    pose(1,1) = m11;
//    pose(1,2) = m12;
//    pose(1,3) = m13;
//    pose(2,0) = m20;
//    pose(2,1) = m21;
//    pose(2,2) = m22;
//    pose(2,3) = m23;
//    pose(3,0) = m30;
//    pose(3,1) = m31;
//    pose(3,2) = m32;
//    pose(3,3) = m33;
    double yaw,pitch,roll;
    tf::Matrix3x3 rot(m00,m01,m02,m10,m11,m12,m20,m21,m22);
    tf::Matrix3x3 correction(0,0,1,-1,0,0,0,-1,0); //convert from camera pose to world
    rot = rot*correction.inverse();
    rot.getEulerYPR(yaw,pitch,roll);

    Eigen::Affine3d pose;
    pose.setIdentity();
    pose =  Eigen::Translation<double,3>(m03,m13,m23)*
            Eigen::AngleAxis<double>(yaw,Eigen::Vector3d::UnitZ()) *
            Eigen::AngleAxis<double>(pitch,Eigen::Vector3d::UnitY()) *
            Eigen::AngleAxis<double>(roll,Eigen::Vector3d::UnitX());
//    pose = pose *   Eigen::AngleAxis<double>( M_PI,Eigen::Vector3d::UnitZ());
    tf::TransformEigenToTF(pose,f2b_);
    motion_estimation_.resetModel();
//    tf::Vector3 ori(m03,m13,m23);
//    f2b_.setBasis(rot);
//    f2b_.setOrigin(ori);
    return true;
}
예제 #3
0
void mesh_core::PlaneProjection::getFrame(Eigen::Affine3d& frame) const
{
  frame.setIdentity();
  frame.translation() = origin_;
  frame.linear().col(0) = x_axis_;
  frame.linear().col(1) = y_axis_;
  frame.linear().col(2) = normal_;
}
예제 #4
0
void Data4Viewer::drawRGBView()
{
	_pKinect->_pRGBCamera->setGLProjectionMatrix( 0.1f,100.f);

	glMatrixMode ( GL_MODELVIEW );
	Eigen::Affine3d tmp; tmp.setIdentity();
	Matrix4d mv = btl::utility::setModelViewGLfromPrj(tmp); //mv transform X_m to X_w i.e. model coordinate to world coordinate
	glLoadMatrixd( mv.data() );
	_pKinect->_pRGBCamera->renderCameraInLocal(_pKinect->_pCurrFrame->_gRGB,  _pGL.get(),false, NULL, 0.2f, true ); //render in model coordinate
	//PRINTSTR("drawRGBView");
    return;
}
TEST(SpherePointContainment, ComplexOutside)
{
    shapes::Sphere shape(1.0);
    bodies::Body* sphere = new bodies::Sphere(&shape);
    sphere->setScale(0.95);
    Eigen::Affine3d pose;
    pose.setIdentity();
    pose.translation() = Eigen::Vector3d(1.0,1.0,1.0);
    sphere->setPose(pose);
    bool contains = sphere->containsPoint(0.5,0.0,0.0);
    delete sphere;
    EXPECT_FALSE(contains);
}
예제 #6
0
void Data4Viewer::drawGlobalView()
{
	_pKinect->_pRGBCamera->setGLProjectionMatrix(0.1f, 100.f);

	glMatrixMode(GL_MODELVIEW);
	Eigen::Affine3d tmp; tmp.setIdentity();
	Eigen::Matrix4d mMat;
	mMat.row(0) = tmp.matrix().row(0);
	mMat.row(1) = -tmp.matrix().row(1);
	mMat.row(2) = -tmp.matrix().row(2);
	mMat.row(3) = tmp.matrix().row(3);

	glLoadMatrixd(mMat.data());
	GpuMat rgb(_pKinect->_cvmRGB);
	_pKinect->_pRGBCamera->renderCameraInLocal(rgb, _pGL.get(), false, NULL, 0.2f, true); //render in model coordinate
	//cout<<("drawRGBView");
	return;
}
 void RotatingRaySensor::prepareFinalPointcloud(){
   // Copies current full pointcloud to pointcloud_full.
   poseMutex.lock();
   Eigen::Affine3d current_pose2 = current_pose;
   Eigen::Affine3d rot;
   rot.setIdentity();
   rot.rotate(config.transf_sensor_rot_to_sensor);
   poseMutex.unlock();
   std::list<utils::Vector>::iterator it = fromCloud->begin();
   pointcloud_full.clear();
   pointcloud_full.reserve(fromCloud->size());
   base::Vector3d vec_local;
   for(int i=0; it != fromCloud->end(); it++, i++) {
     // Transforms the pointcloud back from world to current node (see receiveDate()).
     // In addition 'transf_sensor_rot_to_sensor' is applied which describes
     // the orientation of the sensor in the unturned sensor frame.
     vec_local = rot * current_pose2.inverse() * (*it);
     pointcloud_full.push_back(vec_local);
   }
   //LOG_DEBUG("[RotatingRaySensor::prepareFinalPointcloud]: Pointcloud size: %d", pointcloud_full.size());
   fromCloud->clear();
 }
예제 #8
0
robot_state::Transforms::Transforms(const std::string &target_frame) : target_frame_(target_frame)
{
  Eigen::Affine3d t;
  t.setIdentity();
  transforms_[target_frame_] = t;
}
void testSimple()
{
    ros::NodeHandle nh;
    planning_scene_monitor::PlanningSceneMonitor psm(ROBOT_DESCRIPTION);
    planning_scene::PlanningScenePtr scene = psm.getPlanningScene();

    ros::Publisher pub_state = nh.advertise<moveit_msgs::DisplayTrajectory>("display_motion_plan", 20);
    ros::Publisher pub_scene = nh.advertise<moveit_msgs::PlanningScene>("planning_scene", 1);
    sleep(1);

    std::vector<shapes::ShapeConstPtr> attached_shapes(1, shapes::ShapeConstPtr(new shapes::Box(0.2, 0.1, 0.1)));
    Eigen::Affine3d t;
    t.setIdentity();
    std::vector<Eigen::Affine3d> attached_poses(1, t);
    std::vector<std::string> touch;
    touch.push_back("r_wrist_roll_link");
    touch.push_back("r_forearm_link");
    touch.push_back("r_gripper_palm_link");
    touch.push_back("r_gripper_l_finger_link");
    touch.push_back("r_gripper_r_finger_link");

    scene->getCurrentState().getLinkState("r_wrist_roll_link")->attachBody("attached", attached_shapes,
                                       attached_poses, touch);

    collision_detection::CollisionRequest req;
    req.verbose = true;
    unsigned int N = 2;

    for (unsigned int i = 0 ; i < N ; ++i)
    {
    collision_detection::CollisionResult res;
    do
    {
        ROS_INFO("Trying new state...");
        res.collision = false;
        if (i + 1 == N)
        scene->getCurrentState().setToDefaultValues();
        else
        scene->getCurrentState().setToRandomValues();
        scene->checkSelfCollision(req, res);
    }
    while (res.collision);

    ROS_INFO("Displaying valid state...");
    moveit_msgs::PlanningScene psmsg;
    scene->getPlanningSceneMsg(psmsg);
    pub_scene.publish(psmsg);
    ros::Duration(0.5).sleep();

    /*
    moveit_msgs::DisplayTrajectory d;
    d.model_id = scene->getRobotModel()->getName();
    planning_models::robotStateToRobotStateMsg(scene->getCurrentState(), d.robot_state);
    pub_state.publish(d);
    for (int j = 0 ; j < 10 ; ++j)
    {
        //        ros::spinOnce();
        ros::Duration(0.01).sleep();
    }
    */
    }

    sleep(1);

    planning_scene::PlanningScenePtr colliding = planning_scene::PlanningScene::clone(scene);
    // construct a planning scene with 100 objects and no collisions
    random_numbers::RandomNumberGenerator rng;
    req.verbose = false;
    for (int i = 0 ; i < 100000 ; ++i)
    {
    t.translation() = Eigen::Vector3d(rng.uniformReal(-1, 1), rng.uniformReal(-1, 1), rng.uniformReal(0, 2));
    scene->getWorldNonConst()->clearObjects();
    scene->getWorldNonConst()->addToObject("spere1", shapes::ShapeConstPtr(new shapes::Sphere(0.05)), t);
    collision_detection::CollisionResult res;
    scene->checkCollision(req, res);
    if (!res.collision)
    {
        int x = colliding->getWorld()->getObjectIds().size();
        colliding->getWorldNonConst()->addToObject("speres" + boost::lexical_cast<std::string>(x),
                                                        shapes::ShapeConstPtr(new shapes::Sphere(0.05)), t);
        std::cout << x << "\n";
        if (x == 100)
        break;
    }
    }

    moveit_msgs::PlanningScene psmsg;
    colliding->getPlanningSceneMsg(psmsg);
    pub_scene.publish(psmsg);

    ros::WallTime start = ros::WallTime::now();
    unsigned int M = 1000;
    for (unsigned int i = 0 ; i < M ; ++i)
    {
    collision_detection::CollisionResult res;
    colliding->checkCollision(req, res);
    if (res.collision)
        ROS_ERROR("PROBLEM");
    }
    ROS_INFO("%lf full-collision checks per second", (double)M / (ros::WallTime::now() - start).toSec());


    /*
    req.verbose = false;
    ros::WallTime start = ros::WallTime::now();
    //    unsigned int N = 50000;
N = 50000;
    for (unsigned int i = 0 ; i < N ; ++i)
    {
    collision_detection::CollisionResult res;
    scene->getCurrentState().setToRandomValues();
    scene->checkSelfCollision(req, res);
    }
    ROS_INFO("%lf self-collision checks per second", (double)N / (ros::WallTime::now() - start).toSec());
    */
}
예제 #10
0
bool DenseTracker::match(RgbdImagePyramid& reference, RgbdImagePyramid& current, Eigen::Affine3d& transformation)
{
  reference.compute(cfg.getNumLevels());
  current.compute(cfg.getNumLevels());

  bool success = true;

  if(!cfg.UseInitialEstimate)
  {
    transformation.setIdentity();
  }

  // our first increment is the given guess
  Sophus::SE3 inc(transformation.rotation(), transformation.translation());

  Revertable<Sophus::SE3> old(last_xi_);
  Revertable<Sophus::SE3> initial(inc);
  Revertable<AffineTransform> estimate(AffineTransform::Identity());

  bool accept = true;

  static stopwatch_collection sw_level(5, "l", 100);
  static stopwatch_collection sw_it(5, "it@l", 500);

  for(itctx_.Level = cfg.FirstLevel; itctx_.Level >= cfg.LastLevel; --itctx_.Level)
  {
    // reset error after every pyramid level? yes because errors from different levels are not comparable
    itctx_.Iteration = 0;
    itctx_.Error = 1;

    RgbdImage& ref = reference.level(itctx_.Level);
    RgbdImage& cur = current.level(itctx_.Level);
    const IntrinsicMatrix& intrinsics = intrinsics_[itctx_.Level];

    NormalEquationsLeastSquares ls;
    Vector6 x;

    Vector6 last_x;
    sw_level[itctx_.Level].start();

    do
    {
      sw_it[itctx_.Level].start();
      estimate.update() = inc.matrix().cast<NumType>() * estimate().matrix();

      if(cfg.UseTemporalSmoothing())
      {
        old.update() = inc.inverse() * old();
      }

      if(cfg.UseEstimateSmoothing())
      {
        initial.update() = inc.inverse() * initial();
      }

      computeLeastSquaresEquationsInverseCompositional(ref, cur, intrinsics, estimate(), ls);

      itctx_.LastError = itctx_.Error;
      itctx_.Error = ls.error + 0.5 * cfg.Lambda * old().log().squaredNorm() + 0.5 * cfg.Mu * initial().log().squaredNorm();

      // accept the last increment?
      accept = itctx_.ErrorDiff() > 0.0;

      //ROS_DEBUG_STREAM_COND(!accept, itctx_);

      // if we get a worse result, we revert the increment and try our luck on the next pyramid level
      if(!accept)
      {
        old.revert();
        initial.revert();
        estimate.revert();
        inc = Sophus::SE3::exp(Vector6::Zero().cast<double>());

        break;
      }

      // calculate new increment
      Matrix6x6 A_diagonal = Matrix6x6::Identity();

      if(cfg.UseTemporalSmoothing())
      {
        ls.A += cfg.Lambda * A_diagonal;
        ls.b += cfg.Lambda * old().log().cast<NumType>();
      }

      if(cfg.UseEstimateSmoothing())
      {
        ls.A += cfg.Mu * A_diagonal;
        ls.b += cfg.Mu * initial().log().cast<NumType>();
      }

      // first estimate rotation on lowest level
      //if(itctx_.IsFirstLevel())
      //{
      //  Eigen::Vector3f rot = ls.A.bottomRightCorner(3, 3).ldlt().solve(ls.b.tail(3));
      //  x.setZero();
      //  x.tail<3>() = rot;
      //}
      //else
      //{
      //  ls.solve(x);
      //}

      ls.solve(x);
      if(itctx_.IsLastLevel())
      {
        // TODO: should only be used if we also accept the solution
        last_a_ = ls.A;
      }

      //ROS_DEBUG_STREAM_COND(accept, itctx_ << ", Increment: " << x.format(YamlArrayFmt));
      inc = Sophus::SE3::exp(x.cast<double>());

      itctx_.Iteration++;
      itctx_.NumConstraints = ls.num_constraints;

      sw_it[itctx_.Level].stopAndPrint();
    }
    while(accept && itctx_.ErrorDiff() > cfg.Precision && !itctx_.IterationsExceeded());

    sw_level[itctx_.Level].stopAndPrint();
  }

  // if last increment wasn't incorporated because of iterations exceeded, incorporate it
  if(itctx_.IterationsExceeded())
  {
    estimate.update() = inc.matrix().cast<NumType>() * estimate().matrix();
  }

  // log reason for termination on last level
  //ROS_DEBUG_STREAM_COND_NAMED(!itctx_.IterationsExceeded() && itctx_.ErrorDiff() > 0.0 && itctx_.ErrorDiff() <= cfg.Precision, "abort", "error_precision");
  //ROS_DEBUG_STREAM_COND_NAMED(!itctx_.IterationsExceeded() && itctx_.ErrorDiff() < 0.0, "abort", "error_increase");
  //ROS_DEBUG_STREAM_COND_NAMED(itctx_.IterationsExceeded(), "abort", "iteration_exceeded");

  if(success)
  {
    last_xi_ = Sophus::SE3(estimate().rotation().cast<double>(), estimate().translation().cast<double>());
  }

  transformation = estimate().inverse().cast<double>();

  return success;
}
예제 #11
0
파일: Smurf.cpp 프로젝트: Rauldg/smurf
void smurf::Robot::loadFromSmurf(const std::string& path)
{
    configmaps::ConfigMap map;

    // parse joints from model
    boost::filesystem::path filepath(path);
    model = smurf_parser::parseFile(&map, filepath.parent_path().generic_string(), filepath.filename().generic_string(), true);
    
    //first we need to create all Frames
    for (configmaps::ConfigVector::iterator it = map["frames"].begin(); it != map["frames"].end(); ++it) 
    {
        configmaps::ConfigMap &fr(it->children);

        Frame *frame = new Frame(fr["name"]);
        availableFrames.push_back(frame);
        //std::cout << "Adding additional frame " << frame->getName() << std::endl;
    }
    
    for(std::pair<std::string, boost::shared_ptr<urdf::Link>> link: model->links_)
    {
        Frame *frame = new Frame(link.first);
        for(boost::shared_ptr<urdf::Visual> visual : link.second->visual_array)
        {
            frame->addVisual(*visual);
        }
        availableFrames.push_back(frame);
        

        //std::cout << "Adding link frame " << frame->getName() << std::endl;
    }

    for(std::pair<std::string, boost::shared_ptr<urdf::Joint> > jointIt: model->joints_)
    {
        boost::shared_ptr<urdf::Joint> joint = jointIt.second;
        //std::cout << "Adding joint " << joint->name << std::endl;
        
        Frame *source = getFrameByName(joint->parent_link_name);
        Frame *target = getFrameByName(joint->child_link_name);

        //TODO this might not be set in some cases, perhaps force a check
        configmaps::ConfigMap annotations;
        for(configmaps::ConfigItem &cv : map["joints"])
        {
            if(static_cast<std::string>(cv.children["name"]) == joint->name)
            {
                annotations = cv.children;
            }
        }
        switch(joint->type)
        {
            case urdf::Joint::FIXED:
            {
                const urdf::Pose &tr(joint->parent_to_joint_origin_transform);     
                StaticTransformation *transform = new StaticTransformation(source, target,
                                                                           Eigen::Quaterniond(tr.rotation.w, tr.rotation.x, tr.rotation.y, tr.rotation.z),
                                                                           Eigen::Vector3d(tr.position.x, tr.position.y, tr.position.z));              
                staticTransforms.push_back(transform);
            }
            break;
            case urdf::Joint::FLOATING:
            {
                DynamicTransformation *transform = new DynamicTransformation(source, target, checkGet(annotations, "provider"), checkGet(annotations, "port"));
                dynamicTransforms.push_back(transform);
                Eigen::Vector3d axis(joint->axis.x, joint->axis.y, joint->axis.z);
                Eigen::Affine3d sourceToAxis(Eigen::Affine3d::Identity());
                sourceToAxis.translation() = axis;
                base::JointLimitRange limits;
                const urdf::Pose parentToOrigin(joint->parent_to_joint_origin_transform);
                Eigen::Quaterniond rot(parentToOrigin.rotation.w, parentToOrigin.rotation.x, parentToOrigin.rotation.y, parentToOrigin.rotation.z);
                Eigen::Vector3d trans(parentToOrigin.position.x, parentToOrigin.position.y, parentToOrigin.position.z);
                Eigen::Affine3d parentToOriginAff;
                parentToOriginAff.setIdentity();
                parentToOriginAff.rotate(rot);
                parentToOriginAff.translation() = trans;
                Joint *smurfJoint = new Joint (source, target, checkGet(annotations, "provider"), checkGet(annotations, "port"), checkGet(annotations, "driver"), limits, sourceToAxis, parentToOriginAff, joint); 
                joints.push_back(smurfJoint);
            }
            break;
            case urdf::Joint::REVOLUTE:
            case urdf::Joint::CONTINUOUS:
            case urdf::Joint::PRISMATIC:
            {
                base::JointState minState;
                minState.position = joint->limits->lower;
                minState.effort = 0;
                minState.speed = 0;
                
                base::JointState maxState;
                maxState.position = joint->limits->upper;
                maxState.effort = joint->limits->effort;
                maxState.speed = joint->limits->velocity;
                
                base::JointLimitRange limits;
                limits.min = minState;
                limits.max = maxState;
                
                Eigen::Vector3d axis(joint->axis.x, joint->axis.y, joint->axis.z);
                Eigen::Affine3d sourceToAxis(Eigen::Affine3d::Identity());
                sourceToAxis.translation() = axis;
                
                DynamicTransformation *transform = NULL;
                Joint *smurfJoint;
                // push the correspondent smurf::joint 
                const urdf::Pose parentToOrigin(joint->parent_to_joint_origin_transform);
                Eigen::Quaterniond rot(parentToOrigin.rotation.w, parentToOrigin.rotation.x, parentToOrigin.rotation.y, parentToOrigin.rotation.z);
                Eigen::Vector3d trans(parentToOrigin.position.x, parentToOrigin.position.y, parentToOrigin.position.z);
                Eigen::Affine3d parentToOriginAff;
                parentToOriginAff.setIdentity();
                parentToOriginAff.rotate(rot);
                parentToOriginAff.translation() = trans;
                if(joint->type == urdf::Joint::REVOLUTE || joint->type == urdf::Joint::CONTINUOUS)
                {
                    transform = new RotationalJoint(source, target, checkGet(annotations, "provider"), checkGet(annotations, "port"), checkGet(annotations, "driver"), limits, sourceToAxis, axis, parentToOriginAff, joint);
                    smurfJoint = (Joint *)transform;
                }
                else
                {
                    transform = new TranslationalJoint(source, target, checkGet(annotations, "provider"), checkGet(annotations, "port"), checkGet(annotations, "driver"), limits, sourceToAxis, axis, parentToOriginAff, joint);
                    smurfJoint = (Joint *)transform;
                }
                dynamicTransforms.push_back(transform);
                joints.push_back(smurfJoint);
            }
            break;
            default:
                throw std::runtime_error("Smurf: Error, got unhandles Joint type");
        }
    }

    
    // parse sensors from map
    for (configmaps::ConfigVector::iterator it = map["sensors"].begin(); it != map["sensors"].end(); ++it) 
    {
        configmaps::ConfigMap sensorMap = it->children;
        smurf::Sensor *sensor = new Sensor(sensorMap["name"], sensorMap["type"], sensorMap["taskInstanceName"], getFrameByName(sensorMap["link"]));
        sensors.push_back(sensor);
    }
}