Ejemplo n.º 1
0
void boxFilter(pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud, Pose pose){		
	//Transform the cloud
	//convert the tranform from our fiducial markers to the Eigen
    Eigen::Matrix<float, 3, 3> R;
    Eigen::Vector3f T;
    cv::cv2eigen(pose.getT(), T);
    cv::cv2eigen(pose.getR(), R);
    //get the inverse transform to bring the point cloud's into the
    //same coordinate frame
    Eigen::Affine3f transform;
    transform = Eigen::AngleAxisf(R.transpose());
    transform *= Eigen::Translation3f(-T);
    //transform the cloud in place
    pcl::transformPointCloud(*cloud, *cloud, transform);
	
	//Define the box
	float box = 200.00;
	pcl::PassThrough<pcl::PointXYZRGB> pass_z, pass_x, pass_y;
	//Filters in x
	pass_x.setFilterFieldName("x");
	pass_x.setFilterLimits(-box, box);
	pass_x.setInputCloud(cloud);
	pass_x.filter(*cloud);
	//Filters in y
	pass_y.setFilterFieldName("y");
	pass_y.setFilterLimits(-box, box);
	pass_y.setInputCloud(cloud);
	pass_y.filter(*cloud);
	//Filters in z
	pass_z.setFilterFieldName("z");
	pass_z.setFilterLimits(0,box);
	pass_z.setInputCloud(cloud);
	pass_z.filter(*cloud);	
}
Ejemplo n.º 2
0
	/** トラック間のモーションブレンドをした値を適用
	 *
	 * @param pose モーションを適用するPose
	 * @author SAM (T&GG, Org.)<*****@*****.**>
	 * @date 2004/10/05 0:18:09
	 * Copyright (C) 2001,2002,2003,2004 SAM (T&GG, Org.). All rights reserved.
	 */
	void MotionMixer::apply(Pose &pose) const
	{
		for(Pose::StanceIndex::iterator i = pose.begin(); i != pose.end(); ++i) {
			float weight_sum = 0;
			bool ok = false;
			for(MotionTrackVector::const_iterator j = tracks_.begin(); j != tracks_.end(); ++j) {
				if(j->setWork(i->second, i->first.c_str())) {
					weight_sum += j->weight();
					ok = true;
				}
			}
			if(ok) {
				bool first = true;
				for(MotionTrackVector::const_iterator j = tracks_.begin(); j != tracks_.end(); ++j) {
					if(j->work().type != CoordUnion::TYPE_NONE) {
						if(first) {
							i->second = j->work().stance()*(j->weight()/weight_sum);
							first = false;
						}
						else i->second += j->work().stance()*(j->weight()/weight_sum);
					}
				}
			}
		}
	}
    void Map::correctPositions(PointMatcherService<float> & pointMatcherService) {
        std::vector<Pose> correctedPoses;
        correctedPoses.push_back(Pose::origin());

        for(int i = 1; i < anchorPoints.size(); ++i) {
            auto it = anchorPoints.begin() + i;

            Pose previousOdometryEstimate = (it - 1)->getPosition();
            Pose currentOdometryEstimate = it->getPosition();
            Transform initialGuess = currentOdometryEstimate.transFromPose(previousOdometryEstimate);

            Transform icpResult = pointMatcherService.icp(*it, *(it - 1), initialGuess);

            Transform originToPreviousPose = correctedPoses[i-1].transFromPose(Pose::origin());
            Transform tFromOriginToCurrent = icpResult * originToPreviousPose;

            Pose poseOfCurrent = Pose::origin();
            poseOfCurrent.transform(tFromOriginToCurrent);
            correctedPoses.push_back(poseOfCurrent);
        }

        for(int i = 0; i < anchorPoints.size(); ++i) {
            anchorPoints[i].setPosition(correctedPoses[i]);
        }

    }
Ejemplo n.º 4
0
void testApp::updateModel() {
	updatePoseFromGui();
	
	Pose pose = bindPose;
	for(Pose::iterator i = pose.begin(); i != pose.end(); i++) {
		string name = i->first;
		
		float x = gui.exists(name + ".x") ? gui.get(name + ".x").getValue() : 0;
		float y = gui.exists(name + ".y") ? gui.get(name + ".y").getValue() : 0;
		float z = gui.exists(name + ".z") ? gui.get(name + ".z").getValue() : 0;
		
		aiMatrix4x4& bone = i->second;
		
		aiMatrix4x4 cur;
		ofMatrix4x4 mat;
		ofQuaternion quat(x, ofVec3f(1, 0, 0),
											y, ofVec3f(0, 1, 0),
											z, ofVec3f(0, 0, 1));
		quat.get(mat);
		cur = toAi(mat);
		
		bone *= cur;
	}
	model.setPose(pose);
}
// @param[in] isStereo		Whether scene is in stereo (widens near/far planes to fit both eyes)
void Lens::frustum(Frustumd& f, const Pose& p, double aspect) const {//, bool isStereo) const {

	Vec3d ur, uu, uf;
	p.directionVectors(ur, uu, uf);
	const Vec3d& pos = p.pos();

	double nh = heightAtDepth(near());
	double fh = heightAtDepth(far());

	double nw = nh * aspect;
	double fw = fh * aspect;

//	// This effectively creates a union between the near/far planes of the
//	// left and right eyes. The offsets are computed by using the law
//	// of similar triangles.
//	if(isStereo){
//		nw += fabs(0.5*eyeSep()*(focalLength()-near())/focalLength());
//		fw += fabs(0.5*eyeSep()*(focalLength()- far())/focalLength());
//	}

	Vec3d nc = pos + uf * near();	// center point of near plane
	Vec3d fc = pos + uf * far();	// center point of far plane

	f.ntl = nc + uu * nh - ur * nw;
	f.ntr = nc + uu * nh + ur * nw;
	f.nbl = nc - uu * nh - ur * nw;
	f.nbr = nc - uu * nh + ur * nw;

	f.ftl = fc + uu * fh - ur * fw;
	f.ftr = fc + uu * fh + ur * fw;
	f.fbl = fc - uu * fh - ur * fw;
	f.fbr = fc - uu * fh + ur * fw;

	f.computePlanes();
}
MiscData ControllerWrapper::genControls_(Pose s, int &vl, int &vr, double time, bool useTime) {
    assert(ctrlType == TRACKCTRL);
    Pose x = s;
    for(deque<pair<int,int> >::iterator it = uq.begin(); it != uq.end(); it++) {
        x.updateNoDelay(it->first, it->second, timeLC);
    }
    if (isFirstCall) {
        isFirstCall = false;
        gettimeofday(&startTime, NULL);
    }
    double elapsedS;
    if (!useTime) {
        struct timeval nowTime;
        gettimeofday(&nowTime, NULL);
        elapsedS = (nowTime.tv_sec-startTime.tv_sec)+(nowTime.tv_usec-startTime.tv_usec)/1000000.0;        
    } else {
        elapsedS = time;
    }
    // should offset elapsedS by the number of packet delay!!!
    elapsedS += k*timeLCMs/1000.;
    MiscData m = tracker.genControls(x, vl, vr, prevVl, prevVr, elapsedS);
    prevVl = vl; prevVr = vr;
    uq.push_back(make_pair<int,int>((int)vl, (int)vr));
    uq.pop_front();
    return m;
}
Pose ControllerWrapper::getPredictedPose(Pose s) {
    Pose x = s;
    for(deque<pair<int,int> >::iterator it = uq.begin(); it != uq.end(); it++) {
        x.updateNoDelay(it->first, it->second, timeLC);
    }
    return x;
}
Ejemplo n.º 8
0
void Joint::rotate(){
	//not sure if this is how it works...
	//local_pose.orientation = local_pose.orientation + rotate;
	//PoseEuler temp = PoseEuler(vertex3(), vertex3(0.0, 0.0, angle));
//	local_transformation = local_transformation * ( temp.getRotation()* angularVelocity);
	
	Pose currentPose = path->update();

	float x, y;
	x = currentPose.position.getx();
	y = currentPose.position.gety();
	angle = -PI/2.0 + (float) (std::atan2(y, x) - std::atan2(1.0, 0.0));


	//REALLY MEANS: angle = angularVelocity * dt + angle0;
	//angle += angularVelocity;
	//std::cout << "x:" << x << ", y:" << y << ", theta:" << angle << std::endl;
	Pose temp = Pose(vertex3(), quaternion(angle, 0.0, 0.0));
	//Pose temp = Pose(vertex3(), quaternion(0.0, angle, 0.0));
	local_transformation = temp.getRotation();

	//local_transformation = ( temp.getRotation() * angularVelocity);

	//if(!isNull()){
	//	for(unsigned int i=0; i<children.size(); i++){
	//		this->children.at(i)->child->rotate(rotate);
	//	}
	//}
}
	void capture(Graphics& gl, const Lens& cam, const Pose& pose, Drawable& draw) {
		validate();
		
		glPushAttrib(GL_ALL_ATTRIB_BITS);
		
		Vec3d pos = pose.pos();
		Vec3d ux, uy, uz; 
		pose.unitVectors(ux, uy, uz);
		mProjection = Matrix4d::perspective(90, 1, cam.near(), cam.far());
		
		glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, mFboId);
		for (int face=0; face<6; face++) {
			glDrawBuffer(GL_COLOR_ATTACHMENT0_EXT+face);
			
			gl.viewport(0, 0, resolution(), resolution());
			gl.clearColor(clearColor());
			gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
			
			switch (face) {
				case 0:
					// GL_TEXTURE_CUBE_MAP_POSITIVE_X   
					mModelView = Matrix4d::lookAt(uz, uy, -ux, pos);
					break;
				case 1:
					// GL_TEXTURE_CUBE_MAP_NEGATIVE_X   
					mModelView = Matrix4d::lookAt(-uz, uy, ux, pos);
					break;
				case 2:
					// GL_TEXTURE_CUBE_MAP_POSITIVE_Y   
					mModelView = Matrix4d::lookAt(ux, -uz, uy, pos);
					break;
				case 3:
					// GL_TEXTURE_CUBE_MAP_NEGATIVE_Y   
					mModelView = Matrix4d::lookAt(ux, uz, -uy, pos);
					break;
				case 4:
					// GL_TEXTURE_CUBE_MAP_POSITIVE_Z   
					mModelView = Matrix4d::lookAt(ux, uy, uz, pos);
					break;
				default:
					// GL_TEXTURE_CUBE_MAP_NEGATIVE_Z   
					mModelView = Matrix4d::lookAt(-ux, uy, -uz, pos);
					break;
			}
			
			// apply camera transform:
			gl.pushMatrix(gl.PROJECTION);
			gl.loadMatrix(mProjection);
			gl.pushMatrix(gl.MODELVIEW);
			gl.loadMatrix(mModelView);
			
			draw.onDraw(gl);
			
			gl.popMatrix(gl.PROJECTION);
			gl.popMatrix(gl.MODELVIEW);
		}
		glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0);
		
		glPopAttrib();
	}
Ejemplo n.º 10
0
void Joint::translate(const vertex3& movement){
	//how much to move, or where to move to?
	//simply add how much it moves to it's pose

	Pose temp = Pose(movement, vertex3());
	local_transformation = local_transformation * temp.translate_object();
	//local_pose.position = local_pose.position + movement;
}
Ejemplo n.º 11
0
qreal PoseMapping::value(Pose &pose, quint32 from) const
{
	const quint32 to=map(from);
	if(to>pose.size()) {
		return 0.0;
	}
	return pose.value(to);
}
Ejemplo n.º 12
0
void estimateError(const Pose& P_ref, const Pose& P_test, double *dRa, double *dta)
{
    Vector3d t_test = P_test.translation().normalized();
    Vector3d t_ref = P_ref.translation().normalized();

    *dRa = (P_test * P_ref.inverse()).getAngle(); // delta rotation angle
    *dta = fabs(t_test.dot(t_ref)); // cosine of the angle between translation directions
}
bool NullLocalization::run(const RobotState     & robotState,
                           const GameState      & gameState,
                           const VisionFeatures & visionFeatures,
                           Pose & pose) {
  pose.setPosition(0, 0);
  pose.setAngle(0);

  return false;
}
u_int32_t RoadSection::evaluation(const Pose& p)
{
    // kind elliptic distance function... TODO: improve this function!
    _position_type distance = Point(p.getX(), p.getY() *2).abs();

    if (distance > 0.3)
        return 0;

    return 0.4 - distance;
}
Ejemplo n.º 15
0
    bool Pose::operator==(const Pose& right) const {
        // invalid poses return false for comparison, even against identical invalid poses, like NaN
        if (!valid || !right.valid) {
            return false;
        }

        // FIXME add margin of error?  Or add an additional withinEpsilon function?
        return translation == right.getTranslation() && rotation == right.getRotation() &&
            velocity == right.getVelocity() && angularVelocity == right.getAngularVelocity();
    }
Ejemplo n.º 16
0
void videocallback(IplImage *image)
{
    static IplImage *rgba;
    bool flip_image = (image->origin?true:false);
    if (flip_image) {
        cvFlip(image);
        image->origin = !image->origin;
    }

    if (init) {
        init = false;
        cout<<"Loading calibration: "<<calibrationFilename.str();
        if (cam.SetCalib(calibrationFilename.str().c_str(), image->width, image->height)) {
            cout<<" [Ok]"<<endl;
        } else {
            cam.SetRes(image->width, image->height);
            cout<<" [Fail]"<<endl;
        }
        double p[16];
        cam.GetOpenglProjectionMatrix(p,image->width,image->height);
        GlutViewer::SetGlProjectionMatrix(p);
        for (int i=0; i<32; i++) {
            d[i].SetScale(marker_size);
        }
        rgba = CvTestbed::Instance().CreateImageWithProto("RGBA", image, 0, 4);
    }
    static MarkerDetector<MarkerData> marker_detector;
    marker_detector.SetMarkerSize(marker_size); // for marker ids larger than 255, set the content resolution accordingly
    //static MarkerDetector<MarkerArtoolkit> marker_detector;
    //marker_detector.SetMarkerSize(2.8, 3, 1.5);

    // Here we try to use RGBA just to make sure that also it works...
    //cvCvtColor(image, rgba, CV_RGB2RGBA);
    marker_detector.Detect(image, &cam, true, true);
    GlutViewer::DrawableClear();
    for (size_t i=0; i<marker_detector.markers->size(); i++) {
        if (i >= 32) break;
        
        Pose p = (*(marker_detector.markers))[i].pose;
        p.GetMatrixGL(d[i].gl_mat);

        int id = (*(marker_detector.markers))[i].GetId();
        double r = 1.0 - double(id+1)/32.0;
        double g = 1.0 - double(id*3%32+1)/32.0;
        double b = 1.0 - double(id*7%32+1)/32.0;
        d[i].SetColor(r, g, b);

        GlutViewer::DrawableAdd(&(d[i]));
    }

    if (flip_image) {
        cvFlip(image);
        image->origin = !image->origin;
    }
}
Ejemplo n.º 17
0
Pose Pose::inverse() {
    Pose ret;
    ret.setRotation(_R.inverse());
    ret.setAngle(normalize_angle(ret._R.angle()));
//#ifdef _MSC_VER
//  ret._t=ret._R*(Vector2d(_t*-1.));
//#else
    ret.setTranslation(ret._R*(_t*-1.));
//#endif
    return ret;
}
Ejemplo n.º 18
0
    Transform Pose::transFromPose(const Pose &otherPose) const {
        Eigen::Vector3f translationVector =
                mVector - otherPose.getVector();

        Eigen::Quaternionf rotationQuaternion =
                mRotation * otherPose.getRotation().conjugate();

        Transform returnValue(translationVector);

        return returnValue;
    }
Ejemplo n.º 19
0
void Line::paint(Pose *p){
  if(visible){
    XPoint xpt[2];
    if(p){
      Pose r = Pose(*p,pose);
      r.compute_res_vertex(xpt, vertex, 2);
    }
    else pose.compute_res_vertex(xpt, vertex, 2);

    drawLine(xpt[0], xpt[1], color, (int) thickness);    
  } 
}
Ejemplo n.º 20
0
Task::status Rotate::run(void * arg, int steps){

	GameStatePtr currGameState( new GameState );
	double lastSimTime=0;
	double currSimTime=0;
	currSimTime=video.updateGameState(currGameState);
	//biezaca pozycja robota
	Pose currRobotPose = (*currGameState).getRobotPos( robot->getRobotID() );
	if(this->targetRobotId != Robot::unknown)
		targetPosition = (*currGameState).getRobotPos(this->targetRobotId).getPosition();

	while( !this->stopTask && (steps--)!=0 ){

		if( lastSimTime < ( currSimTime=video.updateGameState(currGameState) ) ){
			lastSimTime = currSimTime;

			currRobotPose=currGameState->getRobotPos( robot->getRobotID() );
			bool haveBall = false;
			double angleToTarget = 0;
			double threshold = 0.017;

			if(this->predicates & Task::pass){
				bool canPass=this->evaluationModule.checkAngleToPass(targetPosition, currRobotPose, angleToTarget);
				threshold = 0.017 * currRobotPose.getPosition().distance(this->targetPosition);
				LOG_INFO(this->log,"set threshold to "<<threshold);

				if( canPass ){
					this->robot->stop();
					LOG_INFO(log,"Rotation to pass OK robot position "<<currRobotPose<<" target "<<targetPosition<<" angle to target "<<angleToTarget);
					return Task::ok;
				}
			}
			else{
				angleToTarget = calculateAngleToTarget( currRobotPose, Pose(targetPosition.x,targetPosition.y,0) );
				double threshold = 0.017; //1 stopien
				if( fabs( angleToTarget ) < threshold ){
					this->robot->stop();
					LOG_INFO(log,"Rotation OK robot position "<<currRobotPose<<" target "<<targetPosition<<" angle to target "<<angleToTarget);
					return Task::ok;
				}
			}
			double w = robot->calculateAngularVel( currRobotPose, targetPosition, currGameState->getSimTime(), haveBall );
			LOG_INFO(log,"move robot from"<<currRobotPose<<" to "<<targetPosition<<" setVel w "<<w<<" angle to target "<<angleToTarget);
			robot->setRelativeSpeed( Vector2D(0,0), w );
		}
		usleep(100);
	}

	if(this->stopTask)
		return Task::ok;

	return Task::not_completed;
}
Ejemplo n.º 21
0
void PoseEditor::loadProject() {
    QString filename = QFileDialog::getOpenFileName(this, tr("Load File"), QDir::currentPath(), tr("rcss3dPoseProject : ppj (*.ppj);;AllFiles(*.*)"));
    if(filename.isEmpty()) {
        qDebug() << "PoseEditor:loadProject : filename is empty";
        return;
    }
    if(!filename.endsWith(".ppj")) {
        qDebug() << "PoseEditor:loadProject : file type isn't \"ppj\"";
        return;
    }

    std::ifstream ifs(filename.toStdString().c_str());
    std::string str;

    while(std::getline(ifs, str)) { // for all line in file
        std::vector<std::string> splitLine;
        boost::algorithm::split(splitLine, str, boost::is_any_of("\t"));
        /*
                for(std::vector<std::string>::iterator it = splitLine.begin(); it != splitLine.end(); it++){
                    std::cout << *it << ":";
                }
                std::cout << std::endl;
        */
        std::vector<std::string>::iterator it = splitLine.begin();
        if(*it == "pose") {
            it++;
            int i = boost::lexical_cast<int>(*it);
            it++;
            if(*it == "name") {
                it++;
                std::string nameStr = *it;
                QString nameQStr(nameStr.c_str());
                posesList->item(i)->setText(nameQStr);
            } else if(*it == "gain") {
                it++;
                double gain = boost::lexical_cast<double>(*it);
                Pose p = posesList->item(i)->data(Qt::UserRole).value<Pose>();
                p.setGain(gain);
                posesList->item(i)->setData(Qt::UserRole, QVariant::fromValue(p));
            } else if(*it == "joint") {
                it++;
                int jointNum = boost::lexical_cast<int>(*it);
                it++;
                double target = boost::lexical_cast<double>(*it);
                Pose p = posesList->item(i)->data(Qt::UserRole).value<Pose>();
                p.setTarget(jointNum, target);
                posesList->item(i)->setData(Qt::UserRole, QVariant::fromValue(p));
//                posesList->item(i)->data(Qt::UserRole).value<Pose>().setTarget(jointNum, target); // not works
            }
        }
    }
    changePoseListRow(posesList->currentRow());
}
bool PoseOptimizationObjectiveFunction::getLocalHessian(numopt_common::SparseMatrix& hessian,
                                                        const numopt_common::Parameterization& params, bool newParams)
{
  // Numerical approach.
//  numopt_common::SparseMatrix numericalHessian(params.getLocalSize(), params.getLocalSize());
//  NonlinearObjectiveFunction::estimateLocalHessian(numericalHessian, params, 1.0e-6);
//  std::cout << "Numerical:\n" << numericalHessian << std::endl;

  // Analytical approach.
  Eigen::MatrixXd analyticalHessian(params.getLocalSize(), params.getLocalSize());
  analyticalHessian.setZero();
  const auto& poseParameterization = dynamic_cast<const PoseParameterization&>(params);
  const Pose pose = poseParameterization.getPose();
  const Eigen::Vector3d& p = pose.getPosition().vector();
  const Eigen::Matrix3d p_skew = kindr::getSkewMatrixFromVector(p);
  const RotationQuaternion Phi(pose.getRotation());

  // Default leg position.
  for (const auto& footPosition : stance_) {
    const Eigen::Vector3d& f_i = footPosition.second.vector();
    const Eigen::Matrix3d f_i_skew = kindr::getSkewMatrixFromVector(f_i);
    const Eigen::Vector3d Phi_d_i = pose.getRotation().rotate(nominalStanceInBaseFrame_.at(footPosition.first)).vector();
    const Eigen::Matrix3d Phi_d_i_skew = kindr::getSkewMatrixFromVector(Phi_d_i);
    analyticalHessian.topLeftCorner(3, 3) += Eigen::Matrix3d::Identity();
    analyticalHessian.topRightCorner(3, 3) += -Phi_d_i_skew;
    analyticalHessian.bottomLeftCorner(3, 3) += Phi_d_i_skew;
    analyticalHessian.bottomRightCorner(3, 3) += 0.5 * (p_skew * Phi_d_i_skew + Phi_d_i_skew * p_skew
                                                       -f_i_skew * Phi_d_i_skew - Phi_d_i_skew * f_i_skew);
  }

  // Center of mass.
  const Eigen::Vector3d p_bar(p.x(), p.y(), 0.0); // Projection.
  Eigen::Vector3d Phi_r_com = pose.getRotation().rotate(centerOfMassInBaseFrame_).vector();
  Phi_r_com.z() = 0.0;
  const Eigen::Matrix3d Phi_r_com_skew = kindr::getSkewMatrixFromVector(Phi_r_com);
  const grid_map::Position supportPolygonCentroid(supportRegion_.getCentroid());
  const Eigen::Vector3d r_centroid(supportPolygonCentroid.x(), supportPolygonCentroid.y(), 0.0);
  const Eigen::Matrix3d r_centroid_skew = kindr::getSkewMatrixFromVector(r_centroid);
  analyticalHessian.topLeftCorner(3, 3) += comWeight_ * Eigen::Vector3d(1.0, 1.0, 0.0).asDiagonal();
  analyticalHessian.topRightCorner(3, 3) += -comWeight_ * Phi_r_com_skew;
  analyticalHessian.bottomLeftCorner(3, 3) += comWeight_ * Phi_r_com_skew;
  analyticalHessian.bottomRightCorner(3, 3) += 0.5 * comWeight_ * (p_skew * Phi_r_com_skew + Phi_r_com_skew * p_skew
                                                                  -r_centroid_skew * Phi_r_com_skew - Phi_r_com_skew * r_centroid_skew);

  // Factorized with 2.0 (not weight!).
  analyticalHessian = 2.0 * analyticalHessian;
//  std::cout << "Analytical:\n" << analyticalHessian << std::endl << std::endl;

  // Return solution.
//  hessian = numericalHessian;
  hessian = analyticalHessian.sparseView(1e-10);
  return true;
}
void videocallback(IplImage *image)
{
    bool flip_image = (image->origin?true:false);
    if (flip_image) {
        cvFlip(image);
        image->origin = !image->origin;
    }

    if (init) {
        init = false;
        cout << "Loading calibration: " << calibrationFilename.str();
        if (fernEstimator.setCalibration(calibrationFilename.str(), image->width, image->height)) {
            cout << " [Ok]" << endl;
        } else {
            fernEstimator.setResolution(image->width, image->height);
            cout << " [Fail]" << endl;
        }
        double p[16];
        fernEstimator.camera().GetOpenglProjectionMatrix(p, image->width, image->height);
        GlutViewer::SetGlProjectionMatrix(p);
        d.SetScale(10);
        gray = cv::Mat(image);
    }

    if (image->nChannels == 3) {
        cv::Mat img = cvarrToMat(image);
        cv::cvtColor(img, gray, CV_RGB2GRAY);
    }
    else {
        gray = image;
    }

    vector<CvPoint2D64f> ipts;
    vector<CvPoint3D64f> mpts;

    fernDetector.findFeatures(gray, true);
    fernDetector.imagePoints(ipts);
    fernDetector.modelPoints(mpts, true);
    double test = fernDetector.inlierRatio();
    if (test > 0.15 && mpts.size() > 4) {
        fernEstimator.calculateFromPointCorrespondences(mpts, ipts);
    }

    GlutViewer::DrawableClear();
    Pose pose = fernEstimator.pose();
    pose.GetMatrixGL(d.gl_mat);
    GlutViewer::DrawableAdd(&d);

    if (flip_image) {
        cvFlip(image);
        image->origin = !image->origin;
    }
}
int main(int argc, char const *argv[])
{
	// What's the focal length / format of the depth images?

	Mat sizeable = imread("xyz_sanitycheck/c1.png", CV_LOAD_IMAGE_GRAYSCALE);
	Tracking tracker(525.0, sizeable.cols, sizeable.rows);
	namedWindow( "Display window", WINDOW_NORMAL );// Create a window for display.

	Matx31d _t(1.1406, 0.6213, 1.4205);
	Matx33d _R( 0.6869071621348888,   0.02406923471782091, -0.7263464893202772,
			   -0.7266645567199284,   0.03763380653472703, -0.6859608725085831,
				0.010824630008607339, 0.9990016859574387, 	0.04334119102391609);

	//Matx33d _R(1, 0, 0, 0, 1, 0, 0, 0, 1);

	Pose initial(_t, _R);
	int frame_count = 19;
	tic();
	for (int frame = 1; frame < frame_count; frame++)
	{
		// read rgb
		char color_files[30]; // use string type
		sprintf(color_files, "xyz_sanitycheck/c%d.png", frame);
		Mat current_grayscale = imread(color_files, CV_LOAD_IMAGE_GRAYSCALE);
		current_grayscale.convertTo(current_grayscale, CV_64FC1);

		// read next rgb
		sprintf(color_files, "xyz_sanitycheck/c%d.png", frame + 1);
		Mat new_grayscale = imread(color_files, CV_LOAD_IMAGE_GRAYSCALE);
		new_grayscale.convertTo(new_grayscale, CV_64FC1);

		// read [inverse] depth
		sprintf(color_files, "xyz_sanitycheck/d%d.png", frame);
		Mat inverse_depth_and_variance = imread(color_files, CV_LOAD_IMAGE_GRAYSCALE | CV_LOAD_IMAGE_ANYDEPTH);
		inverse_depth_and_variance.convertTo(inverse_depth_and_variance, CV_64FC1);
		inverse_depth_and_variance = 5000.0 / inverse_depth_and_variance; // so 1.0 / 0 returns 0 in the mat? *shudder*

		Pose step = tracker.track(current_grayscale, inverse_depth_and_variance, new_grayscale);
		initial = step.compose_with(initial);
		initial.print();
		printf("%s%f\n", "got here - ", toc()/1000.0);
	}
	

	// initialize

	tic();

	

	return 0;
}	
Status KeyframeVisualOdometry::EstimatePose(ViewIndex view_index) {
  View::Ptr view = View::GetView(view_index);
  CHECK_NOTNULL(view.get());

  // Use PnP RANSAC to find the pose of this camera using the 2D<-->3D matches.
  PnPRansacProblem pnp_problem;
  pnp_problem.SetIntrinsics(intrinsics_);

  std::vector<Observation::Ptr> matched_observations;
  view->MatchedObservations(&matched_observations);
  printf("(4) at the top, had %lu matches\n", matched_observations.size());

  // Make sure the observations are only of triangulated landmarks.
  std::vector<Observation::Ptr> valid_observations;
  for (const auto& observation : matched_observations) {
    CHECK_NOTNULL(observation.get());
    if (observation->GetLandmark()->IsEstimated())
      valid_observations.push_back(observation);
  }
  pnp_problem.SetData(valid_observations);
  printf("valid observations: %lu\n", valid_observations.size());

  Ransac<Observation::Ptr, PnPRansacModel> pnp_solver;
  pnp_solver.SetOptions(options_.pnp_ransac_options);
  pnp_solver.Run(pnp_problem);

  // If RANSAC fails, set that the next frame should be a keyframe and return.
  if (!pnp_problem.SolutionFound()) {
    initialize_new_keyframe_ = true;
    return Status::Cancelled(
        "Failed to compute new camera pose with PnP RANSAC.");
  }

  // Get the camera pose from RANSAC.
  const CameraExtrinsics& computed_extrinsics =
      pnp_problem.Model().camera_.Extrinsics();
  view->MutableCamera().SetExtrinsics(computed_extrinsics);

  // If the computed relative translation from the last keyframe is large
  // enough, it's time to initialize a new keyframe on the next iteration.
  View::Ptr keyframe = View::GetView(current_keyframe_);
  CHECK_NOTNULL(keyframe.get());
  const Pose T1 = keyframe->Camera().Extrinsics().WorldToCamera();
  const Pose T2 = computed_extrinsics.WorldToCamera();
  const Pose delta = T1.Delta(T2);
  if (delta.Translation().norm() > options_.min_keyframe_translation ||
      delta.AxisAngle().norm() > options_.min_keyframe_rotation) {
    initialize_new_keyframe_ = true;
  }

  return Status::Ok();
}
Ejemplo n.º 26
0
inline Pose Pose::operator*(const Pose& other) const
{
	// Convention: Pose * PoseOther
	//              x' = (Pose * PoseOther).map(x)
	//              x' = Pose.map(PoseOther.map(x))
	//              x' = R * (Rother * x + Tother) + T
	//              x' = R * Rother * x + (R * Tother + T)

	Quaterniond orientation = (m_orientation * other.orientation());
	Vector3d position = m_orientation * other.position() + m_position;

	return Pose(orientation, position);
}
Ejemplo n.º 27
0
int main(int argc,char **argv)
{
  Target<Pose> target(TargetUid(1,1));
  Pose pose;

  for (int i = 0; i < 10; ++i)
  {
    pose = target.getUnseen();
    cout << static_cast<Position>(pose) << '\t' << pose.theta() << endl;
  }

  return 0;
}
Ejemplo n.º 28
0
void InitIncrementalPose::initPose(const Matrix33& R, const Vector31& T, Pose& pose) const {
	pose.sett(RealPoint3D<double>(T.GetX(), T.GetY(), T.GetZ()));

	Matrix<double>& pR = pose.R();
	pR[0][0] = R.Get00(); pR[1][0] = R.Get01(); pR[2][0] = R.Get02();
	pR[0][1] = R.Get10(); pR[1][1] = R.Get11(); pR[2][1] = R.Get12();
	pR[0][2] = R.Get20(); pR[1][2] = R.Get21(); pR[2][2] = R.Get22();

	pose.calcEulerAngles();

	pose.setorientationSynchronWithAngles(true);
	pose.setderivationsSynchronWithAngles(false);
}
Ejemplo n.º 29
0
void Model::getPose(Pose& pose)
{
	ASSERT(pose.getCount() == getBoneCount());
	Vec3* pos = pose.getPositions();
	Quat* rot = pose.getRotations();
	Matrix mtx;
	for (int i = 0, c = getBoneCount(); i < c; ++i)
	{
		mtx = m_bones[i].inv_bind_matrix;
		mtx.fastInverse();
		mtx.getTranslation(pos[i]);
		mtx.getRotation(rot[i]);
	}
}
Ejemplo n.º 30
0
int main (int argc,char* argv[]){
	if (argc != 2 && argc != 3){
		printf("usage:\n %s /path/to/recoding/filename.oni\n",argv[0]);
		return 0;
	}
	Xn_sensor sensor(WIDTH,HEIGHT);
	sensor.play(argv[1],false);
	cvNamedWindow( "Model Extractor Viewer", 1 );
	IplImage* rgb_image = cvCreateImageHeader(cvSize(WIDTH,HEIGHT), 8, 3);
	IplImage* test = cvCreateImageHeader(cvSize(WIDTH,HEIGHT), 8, 3);
	IplImage* gray = cvCreateImage(cvSize(WIDTH,HEIGHT), 8, 1);
	Mat img;
	pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZRGB>);
	pcl::PointCloud<pcl::PointXYZRGB>::Ptr model (new pcl::PointCloud<pcl::PointXYZRGB>);
	
	//pcl::visualization::CloudViewer viewer("Model Extractor Viewer");

	
	//Read Fiducial from file
	Fiducial fiducial("fiducial.yml");
	Pose pose;
	  while(/*!viewer.wasStopped() && */!sensor.endPlaying()){
		//Get the frame 
		sensor.update();
		sensor.getPCL(cloud);
		cvSetData(rgb_image,sensor.rgb,rgb_image->widthStep);
		//Estimate Camera Pose from fiducial
		cvCvtColor(rgb_image,gray,CV_BGR2GRAY);
		if (fiducial.find(gray,true)){
			pose.estimate(gray,fiducial);
			//fiducial.draw(rgb_image);
		}
		if (pose.isFound()){
			printf("Rotation");
			printMat<double>(pose.getR());
			printf("Translation");
			printMat<double>(pose.getT());
			//Segment volume around the fiducial
			boxFilter(cloud,pose);
			//Create 3D model
			buildModel(cloud,model);
		}
		//viewer.showCloud (model);
		
	}
	pcl::io::savePCDFileBinary ("model.pcd", *model);
	sensor.shutdown();
	return 0;
}