void SetBindPose()
{
    assert( g_pFBXScene != nullptr );

    g_BindPoses.clear();
    INT iPoseCount = g_pFBXScene->GetPoseCount();
    for( INT i = 0; i < iPoseCount; ++i )
    {
        auto pPose = g_pFBXScene->GetPose( i );
        INT iNodeCount = pPose->GetCount();
        ExportLog::LogMsg( 4, "Found %spose: \"%s\" with %d nodes", pPose->IsBindPose() ? "bind " : "", pPose->GetName(), iNodeCount );
        for( INT j = 0; j < iNodeCount; ++j )
        {
            auto pPoseNode = pPose->GetNode( j );
            ExportLog::LogMsg( 5, "Pose node %d: %s", j, pPoseNode->GetName() );
        }
        if( pPose->IsBindPose() )
        {
            g_BindPoses.push_back( pPose );
        }
    }
    if( g_BindPoses.empty() )
    {
        if( g_pScene->Settings().bExportAnimations )
        {
            ExportLog::LogWarning( "No valid bind pose found; will export scene using the default pose." );
        }
        return;
    }

    size_t dwPoseCount = g_BindPoses.size();
    for( size_t i = 0; i < dwPoseCount; ++i )
    {
        FbxPose* pPose = g_BindPoses[i];
        INT iNodeCount = pPose->GetCount();
        for( INT j = 0; j < iNodeCount; ++j )
        {
            auto pNode = pPose->GetNode( j );
            auto matNode = pPose->GetMatrix( j );

            PoseMap::iterator iter = g_BindPoseMap.find( pNode );
            if( iter != g_BindPoseMap.end() )
            {
                FbxMatrix matExisting = iter->second;
                if( matExisting != matNode )
                {
                    ExportLog::LogWarning( "Node \"%s\" found in more than one bind pose, with conflicting transforms.", pNode->GetName() );
                }
            }

            g_BindPoseMap[pNode] = matNode;
        }
    }

    ExportLog::LogMsg( 3, "Created bind pose map with %Iu nodes.", g_BindPoseMap.size() );
}
Пример #2
0
void GraphLearner::sendTreeTransforms(KinematicGraphType graph) {
	if (graph.size() == 0)
		return;

	double latestTimestamp = stampedMarker.rbegin()->first;

	PoseMap observation = getObservation(latestTimestamp);
	PoseMap prediction = getPrediction(intersectionOfStamps().back(), graph,
			observation);

	// send transform for root node
	static tf::TransformBroadcaster br;
	const PoseStamped &latestPose =
			stampedMarker[latestTimestamp].begin()->second->pose;
	for (auto it = prediction.begin(); it != prediction.end(); it++) {
		br.sendTransform(tf::StampedTransform(poseToTransform(it->second),
				latestPose.header.stamp, "/camera", boost::str(boost::format(
						"/pred/%1%") % it->first)));
	}
}
Пример #3
0
void KinematicGraph::evaluateSingleStamp(double timestamp,
				double &avgPositionError,
				double &avgOrientationError,
				double &loglikelihood,KinematicParams &params,KinematicData &data) {

	if (size()==0) return;
	// compare observation versus prediction
	PoseMap observationMarkers = data.getObservation(timestamp);
	PoseMap predictedMarkers = getPrediction(timestamp,observationMarkers,observationMarkers,params,data);

	// store predicted poses
	for(PoseMap::iterator i=predictedMarkers.begin();i!=predictedMarkers.end();i++) {
		PoseStampedIdMsg p = *data.stampedMarker[timestamp][i->first];
		p.pose.pose = i->second;
		PoseStampedIdMsgConstPtr pptr = boost::make_shared<PoseStampedIdMsg>(p);
		data.stampedMarkerProjected[timestamp][i->first] = pptr;
	}

	// output data loglikelihood
	double sum_loglh = 0;
	double sum_pos2_err = 0;
	double sum_orient2_err = 0;

	double sigma2_position = SQR(params.sigmax_position);
	double sigma2_orientation = SQR(params.sigmax_orientation);

	for(PoseMap::iterator it = observationMarkers.begin();
			it != observationMarkers.end(); it++ ) {
		tf::Transform p1 = poseToTransform(observationMarkers[it->first]);
		tf::Transform p2 = poseToTransform(predictedMarkers[it->first]);

		tf::Transform diff = p1.inverseTimes(p2);
		double err_position2 = diff.getOrigin().length2();
		if(isnan(err_position2 )) err_position2 =0;
		double err_orientation2 = SQR(diff.getRotation().getAngle());
		if(isnan(err_orientation2 )) err_orientation2 =0;

		sum_pos2_err += err_position2;
		sum_orient2_err += err_orientation2;

		double loglikelihood =
				- log(2*M_PI * params.sigma_position*params.sigma_orientation)
				- 0.5*(
						( err_position2 / (sigma2_position) ) +
						( err_orientation2 / (sigma2_orientation) ) );	// 2-dim multivariate normal distribution
		sum_loglh += loglikelihood;
	}

	size_t n = observationMarkers.size();
	avgPositionError = sqrt(sum_pos2_err/n);
	avgOrientationError = sqrt(sum_orient2_err/n);
	loglikelihood = sum_loglh;
}
Пример #4
0
void parseSDFLink(RigidBodySystem& sys, string model_name, XMLElement* node,
                  PoseMap& pose_map) {
  const char* attr = node->Attribute("name");
  if (!attr) throw runtime_error("ERROR: link tag is missing name attribute");
  string link_name(attr);

  auto body = sys.getRigidBodyTree()->findLink(link_name, model_name);

  Isometry3d transform_link_to_model = Isometry3d::Identity();
  XMLElement* pose = node->FirstChildElement("pose");
  if (pose) {
    poseValueToTransform(pose, pose_map, transform_link_to_model);
    pose_map.insert(
        std::pair<string, Isometry3d>(body->linkname, transform_link_to_model));
  }

  for (XMLElement* elnode = node->FirstChildElement("sensor"); elnode;
       elnode = elnode->NextSiblingElement("sensor")) {
    attr = elnode->Attribute("name");
    if (!attr)
      throw runtime_error("ERROR: sensor tag is missing name attribute");
    string sensor_name(attr);

    attr = elnode->Attribute("type");
    if (!attr)
      throw runtime_error("ERROR: sensor tag is missing type attribute");
    string type(attr);

    Isometry3d transform_sensor_to_model = transform_link_to_model;
    XMLElement* pose = elnode->FirstChildElement("pose");
    if (pose) {
      poseValueToTransform(pose, pose_map, transform_sensor_to_model,
                           transform_link_to_model);
    }

    if (type.compare("ray") == 0) {
      auto frame = allocate_shared<RigidBodyFrame>(
          Eigen::aligned_allocator<RigidBodyFrame>(), sensor_name, body,
          transform_link_to_model.inverse() * transform_sensor_to_model);
      sys.getRigidBodyTree()->addFrame(frame);
      sys.addSensor(allocate_shared<RigidBodyDepthSensor>(
          Eigen::aligned_allocator<RigidBodyDepthSensor>(), sys, sensor_name,
          frame, elnode));
    }
  }
}
Пример #5
0
PoseMap hogman_solver(KinematicGraphType &graph, PoseIndex &poseIndex,
		PoseMap &observed, PoseMap &predictedEmpty, double sigma_position, double sigma_orientation,
		double timestamp) {
	bool visualize = false;
	bool verbose = false;
	bool guess = 0;
	int iterations = 10;

	GraphOptimizer3D* optimizer = new CholOptimizer3D();

	optimizer->verbose() = verbose;
	optimizer->visualizeToStdout() = visualize;
	optimizer->guessOnEdges() = guess;

	optimizer->clear();

	// reference
	tf::Transform root = poseToTransform(observed.begin()->second);

	int n=1000;

	// add predicted markers
	for (PoseMap::iterator it = predictedEmpty.begin(); it != predictedEmpty.end(); it++) {
		tf::Transform m = poseToTransform(it->second);
		add_vertex(optimizer, it->first, root.inverseTimes(m));
	}

	// add observed markers
	for (PoseMap::iterator it = observed.begin(); it != observed.end(); it++) {
		tf::Transform m = poseToTransform(it->second);
		add_vertex(optimizer, it->first + n, root.inverseTimes(m));
	}

	for (KinematicGraphType::iterator it = graph.begin(); it != graph.end(); it++) {
		int idx = poseIndex[it->first.first][it->first.second][timestamp];
		Pose& pose_pred = it->second->model.track.pose_projected[idx];

		ROS_ASSERT(!(isnan(pose_pred.position.x) || isnan(pose_pred.position.y) || isnan(pose_pred.position.z) ||
				isnan(pose_pred.orientation.x) || isnan(pose_pred.orientation.y) || isnan(pose_pred.orientation.z)|| isnan(pose_pred.orientation.w)));
		add_edge(optimizer, it->first.first, it->first.second, poseToTransform(
				pose_pred), sigma_position, sigma_orientation);
	}

	// add strong edges between observed markers (actually, the observed markers should be kept fixed)
	for (PoseMap::iterator it = observed.begin(); it != observed.end(); it++) {
		if (it == observed.begin())
			it++;// skip first
		// relative transformation between marker 0 and marker it
		tf::Transform m = poseToTransform(observed.begin()->second).inverseTimes(
				poseToTransform(it->second));
		add_edge(optimizer, observed.begin()->first + n,
				it->first + n, m, sigma_position / 100,
				sigma_orientation / 100);
	}

	// add weak edges between predicted markers and observed markers (keep them in place)
	for (PoseMap::iterator it = observed.begin(); it != observed.end(); it++) {
		// relative transformation between marker 0 and marker it
		tf::Transform m = tf::Transform::getIdentity();
		add_edge(optimizer, it->first, it->first + n, m,
				sigma_position * 100, sigma_orientation * 100);
	}

	optimizer->initialize(0);
	optimizer->optimize(iterations, false);

	PoseMap predictedMarkers;
	GraphOptimizer3D::VertexIDMap &vertices = optimizer->vertices();
	for (PoseMap::iterator it = predictedEmpty.begin(); it
			!= predictedEmpty.end(); it++) {
		const PoseGraph3D::Vertex* v;
		v= reinterpret_cast<const PoseGraph3D::Vertex*>(vertices[it->first]);
		if (v == 0) {
			cerr << "VERTEX not found!!" << endl;
		}
		Transformation3 t = v->transformation;
		Vector6 p = t.toVector();
		tf::Transform d = tf::Transform(RPY_to_MAT(p[3], p[4], p[5]), tf::Vector3(
				p[0], p[1], p[2]));
		predictedMarkers[it->first] = transformToPose(root * d);
	}

	const PoseGraph3D::Vertex* v = reinterpret_cast<const PoseGraph3D::Vertex*>(vertices[observed.begin()->first + n]);
	Transformation3 t = v->transformation;
	Vector6 p = t.toVector();
	tf::Transform d = tf::Transform(RPY_to_MAT(p[3], p[4], p[5]), tf::Vector3(p[0],
			p[1], p[2]));
	for (PoseMap::iterator it = predictedMarkers.begin(); it
			!= predictedMarkers.end(); it++) {
		// now correct
		it->second = transformToPose(root * d.inverseTimes(root.inverseTimes(
				poseToTransform(it->second))));
	}

	delete optimizer;

	return predictedMarkers;
}