Example #1
0
bool MotionEstimator::solveRelativeRT(const vector<pair<Vector3d, Vector3d>> &corres, Matrix3d &Rotation, Vector3d &Translation)
{
    if (corres.size() >= 15)
    {
        vector<cv::Point2f> ll, rr;
        for (int i = 0; i < int(corres.size()); i++)
        {
            ll.push_back(cv::Point2f(corres[i].first(0), corres[i].first(1)));
            rr.push_back(cv::Point2f(corres[i].second(0), corres[i].second(1)));
        }
        cv::Mat mask;
        cv::Mat E = cv::findFundamentalMat(ll, rr, cv::FM_RANSAC, 0.3 / 460, 0.99, mask);
        cv::Mat cameraMatrix = (cv::Mat_<double>(3, 3) << 1, 0, 0, 0, 1, 0, 0, 0, 1);
        cv::Mat rot, trans;
        int inlier_cnt = cv::recoverPose(E, ll, rr, cameraMatrix, rot, trans, mask);
        //cout << "inlier_cnt " << inlier_cnt << endl;

        Eigen::Matrix3d R;
        Eigen::Vector3d T;
        for (int i = 0; i < 3; i++)
        {   
            T(i) = trans.at<double>(i, 0);
            for (int j = 0; j < 3; j++)
                R(i, j) = rot.at<double>(i, j);
        }

        Rotation = R.transpose();
        Translation = -R.transpose() * T;
        if(inlier_cnt > 12)
            return true;
        else
            return false;
    }
    return false;
}
double GreenStrain_LIMSolver3D::computeFunction(const Eigen::Matrix<double,Eigen::Dynamic,1>& x)
{
  // green strain energy
  double shape = 0;
  Eigen::Matrix3d I = Eigen::Matrix3d::Identity();
  for(int t=0;t<mesh->Tetrahedra->rows();t++)
  {
    Eigen::Vector3d A(x[TetrahedronVertexIdx.coeff(0,t)],x[TetrahedronVertexIdx.coeff(1,t)],x[TetrahedronVertexIdx.coeff(2,t)]);
    Eigen::Vector3d B(x[TetrahedronVertexIdx.coeff(3,t)],x[TetrahedronVertexIdx.coeff(4,t)],x[TetrahedronVertexIdx.coeff(5,t)]);
    Eigen::Vector3d C(x[TetrahedronVertexIdx.coeff(6,t)],x[TetrahedronVertexIdx.coeff(7,t)],x[TetrahedronVertexIdx.coeff(8,t)]);
    Eigen::Vector3d D(x[TetrahedronVertexIdx.coeff(9,t)],x[TetrahedronVertexIdx.coeff(10,t)],x[TetrahedronVertexIdx.coeff(11,t)]);

    Eigen::Matrix<double,3,4> V;
    V.col(0) = A;
    V.col(1) = B;
    V.col(2) = C;
    V.col(3) = D;

    Eigen::Matrix3d F = V*Ms.block<4,3>(0,3*t);
    Eigen::Matrix3d E = (F.transpose()*F - I);
    shape += E.squaredNorm()*Divider;
  }

  return shape;
}
Example #3
0
bool CGEOM::generate_scene_trans
( const SceneGeneratorOptions& sc_opts,
  Eigen::MatrixXd& mP3D,
  Eigen::MatrixXd& mMeasT,
  Eigen::MatrixXd& mMeasN,
  Eigen::Matrix3d& mR,
  Eigen::Vector3d& mt
) {
    if( !generate_scene( sc_opts,
                         mP3D, mMeasT, mMeasN
                       ) ) {
        return false;
    }

    // Create random transform
    mt = mP3D.rowwise().mean();
    const double drotx = rand_range_d( -45., 45. )*3.14159/180.;
    const double droty = rand_range_d( -45., 45. )*3.14159/180.;
    const double drotz = rand_range_d( -45., 45. )*3.14159/180.;
#if 1
    mR =
        ( CEIGEN::skew_rot<Eigen::Matrix3d,double>( drotx, 0., 0. ) +
          CEIGEN::skew_rot<Eigen::Matrix3d,double>( 0., droty , 0.) +
          CEIGEN::skew_rot<Eigen::Matrix3d,double>( 0., 0., drotz ) ).exp();
#else
    mR = Eigen::Matrix3d::Identity();
#endif

    mP3D.colwise() -= mt;
    mP3D = mR.transpose() * mP3D;

    return true;
}
Example #4
0
  // Assume t = double[3], q = double[4]
  void EstimateTfSVD(double* t, double* q)
  {
    // Assemble the correlation matrix H = target * reference'
    Eigen::Matrix3d H = (cloud_tgt_demean * cloud_ref_demean.transpose ()).topLeftCorner<3, 3>();

    // Compute the Singular Value Decomposition
    Eigen::JacobiSVD<Eigen::Matrix3d> svd (H, Eigen::ComputeFullU | Eigen::ComputeFullV);
    Eigen::Matrix3d u = svd.matrixU ();
    Eigen::Matrix3d v = svd.matrixV ();

    // Compute R = V * U'
    if (u.determinant () * v.determinant () < 0)
      {
	for (int i = 0; i < 3; ++i)
	  v (i, 2) *= -1;
      }

    //    std::cout<< "centroid_src: "<<centroid_src(0) <<" "<< centroid_src(1) <<" "<< centroid_src(2) << " "<< centroid_src(3)<<std::endl;
    //    std::cout<< "centroid_tgt: "<<centroid_tgt(0) <<" "<< centroid_tgt(1) <<" "<< centroid_tgt(2) << " "<< centroid_tgt(3)<<std::endl;
    
    Eigen::Matrix3d R = v * u.transpose ();

    const Eigen::Vector3d Rc (R * centroid_tgt.head<3> ());
    Eigen::Vector3d T = centroid_ref.head<3> () - Rc;

    // Make sure these memory locations are valid
    assert(t != NULL && q!=NULL);
    Eigen::Quaterniond Q(R);
    t[0] = T(0);  t[1] = T(1);  t[2] = T(2);
    q[0] = Q.w(); q[1] = Q.x(); q[2] = Q.y(); q[3] = Q.z();
  }
Example #5
0
//==============================================================================
// Note: Taken from Springer Handbook, chapter 2.2.11
void Inertia::computeSpatialTensor()
{
  Eigen::Matrix3d C = math::makeSkewSymmetric(mCenterOfMass);

  // Top left
  mSpatialTensor.block<3,3>(0,0) = getMoment() + mMass*C*C.transpose();

  // Bottom left
  mSpatialTensor.block<3,3>(3,0) = mMass*C.transpose();

  // Top right
  mSpatialTensor.block<3,3>(0,3) = mMass*C;

  // Bottom right
  mSpatialTensor.block<3,3>(3,3) = mMass*Eigen::Matrix3d::Identity();
}
Eigen::Matrix3d LinearAlgebra::transposeMatrixM(const Eigen::Matrix3d& M) {
	Eigen::Matrix3d result;

	// TODO: return the transpose of matrix M
	result = M.transpose();

	return result;
}
Example #7
0
//==============================================================================
Eigen::Vector3d BallJoint::getPositionDifferencesStatic(
    const Eigen::Vector3d& _q2, const Eigen::Vector3d& _q1) const
{
  const Eigen::Matrix3d R1 = convertToRotation(_q1);
  const Eigen::Matrix3d R2 = convertToRotation(_q2);

  return convertToPositions(R1.transpose() * R2);
}
Example #8
0
void fundamental2essential( Eigen::Matrix3d &Fundamental, Eigen::Matrix3d &kalibration,
		        Eigen::Matrix3d &Essential)
{
    Essential = kalibration.transpose()*(Fundamental*kalibration);
    Essential = Essential/Essential(2,2);
    
    return;
}
const Eigen::Matrix3d CMiniVisionToolbox::getFundamental( const Eigen::Isometry3d& p_matTransformationFrom, const Eigen::Isometry3d& p_matTransformationTo, const Eigen::Matrix3d& p_matIntrinsic )
{
    //ds get essential matrix
    const Eigen::Matrix3d matEssential( CMiniVisionToolbox::getEssential( p_matTransformationFrom, p_matTransformationTo ) );

    //ds compute fundamental matrix: http://en.wikipedia.org/wiki/Fundamental_matrix_%28computer_vision%29
    const Eigen::Matrix3d matIntrinsicInverse( p_matIntrinsic.inverse( ) );
    return matIntrinsicInverse.transpose( )*matEssential*matIntrinsicInverse;
}
void pose_estimation_3d3d (
    const vector<Point3f>& pts1,
    const vector<Point3f>& pts2,
    Mat& R, Mat& t
)
{
    Point3f p1, p2;     // center of mass
    int N = pts1.size();
    for ( int i=0; i<N; i++ )
    {
        p1 += pts1[i];
        p2 += pts2[i];
    }
    p1 = Point3f( Vec3f(p1) /  N);
    p2 = Point3f( Vec3f(p2) / N);
    vector<Point3f>     q1 ( N ), q2 ( N ); // remove the center
    for ( int i=0; i<N; i++ )
    {
        q1[i] = pts1[i] - p1;
        q2[i] = pts2[i] - p2;
    }

    // compute q1*q2^T
    Eigen::Matrix3d W = Eigen::Matrix3d::Zero();
    for ( int i=0; i<N; i++ )
    {
        W += Eigen::Vector3d ( q1[i].x, q1[i].y, q1[i].z ) * Eigen::Vector3d ( q2[i].x, q2[i].y, q2[i].z ).transpose();
    }
    cout<<"W="<<W<<endl;

    // SVD on W
    Eigen::JacobiSVD<Eigen::Matrix3d> svd ( W, Eigen::ComputeFullU|Eigen::ComputeFullV );
    Eigen::Matrix3d U = svd.matrixU();
    Eigen::Matrix3d V = svd.matrixV();
    
    if (U.determinant() * V.determinant() < 0)
	{
        for (int x = 0; x < 3; ++x)
        {
            U(x, 2) *= -1;
        }
	}
    
    cout<<"U="<<U<<endl;
    cout<<"V="<<V<<endl;

    Eigen::Matrix3d R_ = U* ( V.transpose() );
    Eigen::Vector3d t_ = Eigen::Vector3d ( p1.x, p1.y, p1.z ) - R_ * Eigen::Vector3d ( p2.x, p2.y, p2.z );

    // convert to cv::Mat
    R = ( Mat_<double> ( 3,3 ) <<
          R_ ( 0,0 ), R_ ( 0,1 ), R_ ( 0,2 ),
          R_ ( 1,0 ), R_ ( 1,1 ), R_ ( 1,2 ),
          R_ ( 2,0 ), R_ ( 2,1 ), R_ ( 2,2 )
        );
    t = ( Mat_<double> ( 3,1 ) << t_ ( 0,0 ), t_ ( 1,0 ), t_ ( 2,0 ) );
}
void BoundingBox::computeOrientedBox(std::vector<Vertex>& vertices)
{
    type = "Oriented";
    orientedPoints.clear();
    
    // compute mean
    Eigen::Vector3d center;
    center.setZero();
    for (VertexCIter v = vertices.begin(); v != vertices.end(); v++) {
        center += v->position;
    }
    center /= (double)vertices.size();
    
    // adjust for mean and compute covariance
    Eigen::Matrix3d covariance;
    covariance.setZero();
    for (VertexIter v = vertices.begin(); v != vertices.end(); v++) {
        Eigen::Vector3d pAdg = v->position - center;
        covariance += pAdg * pAdg.transpose();
    }
    covariance /= (double)vertices.size();

    // compute eigenvectors for the covariance matrix
    Eigen::EigenSolver<Eigen::Matrix3d> solver(covariance);
    Eigen::Matrix3d eigenVectors = solver.eigenvectors().real();

    // project min and max points on each principal axis
    double min1 = INFINITY, max1 = -INFINITY;
    double min2 = INFINITY, max2 = -INFINITY;
    double min3 = INFINITY, max3 = -INFINITY;
    double d = 0.0;
    eigenVectors.transpose();
    for (VertexIter v = vertices.begin(); v != vertices.end(); v++) {
        d = eigenVectors.row(0).dot(v->position);
        if (min1 > d) min1 = d;
        if (max1 < d) max1 = d;
        
        d = eigenVectors.row(1).dot(v->position);
        if (min2 > d) min2 = d;
        if (max2 < d) max2 = d;
        
        d = eigenVectors.row(2).dot(v->position);
        if (min3 > d) min3 = d;
        if (max3 < d) max3 = d;
    }
    
    // add points to vector
    orientedPoints.push_back(eigenVectors.row(0) * min1);
    orientedPoints.push_back(eigenVectors.row(0) * max1);
    orientedPoints.push_back(eigenVectors.row(1) * min2);
    orientedPoints.push_back(eigenVectors.row(1) * max2);
    orientedPoints.push_back(eigenVectors.row(2) * min3);
    orientedPoints.push_back(eigenVectors.row(2) * max3);
}
Example #12
0
ELASStereo::ELASStereo(calibu::CameraRig& rig, const unsigned int width,
                       const unsigned int height)
  : m_dDisparity(width, height), m_dDepth(width, height) {
  if (rig.cameras.size() != 2) {
    std::cerr << "Two camera models are required to run this program!"
              << std::endl;
    exit(1);
  }

  m_width = width;
  m_height = height;

  Eigen::Matrix3f CamModel0 = rig.cameras[0].camera.K().cast<float>();
  Eigen::Matrix3f CamModel1 = rig.cameras[1].camera.K().cast<float>();

  roo::ImageIntrinsics camMod[] = {
    {CamModel0(0, 0), CamModel0(1, 1), CamModel0(0, 2), CamModel0(1, 2)},
    {CamModel1(0, 0), CamModel1(1, 1), CamModel1(0, 2), CamModel1(1, 2)}};

  m_Kl = camMod[0][0].Matrix();

  // print selected camera model
  std::cout << "Camera Model used: " << std::endl << camMod[0][0].Matrix()
      << std::endl;

  Eigen::Matrix3d RDFvision;
  RDFvision << 1, 0, 0, 0, 1, 0, 0, 0, 1;
  Eigen::Matrix3d RDFrobot;
  RDFrobot << 0, 1, 0, 0, 0, 1, 1, 0, 0;
  Eigen::Matrix4d T_vis_ro = Eigen::Matrix4d::Identity();
  T_vis_ro.block<3, 3>(0, 0) = RDFvision.transpose() * RDFrobot;
  Eigen::Matrix4d T_ro_vis = Eigen::Matrix4d::Identity();
  T_ro_vis.block<3, 3>(0, 0) = RDFrobot.transpose() * RDFvision;

  const Sophus::SE3d T_rl =
      T_rlFromCamModelRDF(rig.cameras[0], rig.cameras[1], RDFvision);
  m_baseline = T_rl.translation().norm();

  std::cout << "Baseline is: " << m_baseline << std::endl;
}
Example #13
0
void BoundingBox::computeOrientedBox(std::vector<Eigen::Vector3d>& positions)
{
    // compute mean
    Eigen::Vector3d cm;
    cm.setZero();
    for (size_t i = 0; i < positions.size(); i++) {
        cm += positions[i];
    }
    cm /= (double)positions.size();
    
    // adjust for mean and compute covariance matrix
    Eigen::Matrix3d covariance;
    covariance.setZero();
    for (size_t i = 0; i < positions.size(); i++) {
        Eigen::Vector3d pAdg = positions[i] - cm;
        covariance += pAdg * pAdg.transpose();
    }
    covariance /= (double)positions.size();
    
    // compute eigenvectors for covariance matrix
    Eigen::EigenSolver<Eigen::Matrix3d> solver(covariance);
    Eigen::Matrix3d eigenVectors = solver.eigenvectors().real();
    
    // set axes
    eigenVectors.transpose();
    xAxis = eigenVectors.row(0);
    yAxis = eigenVectors.row(1);
    zAxis = eigenVectors.row(2);
    
    // project min and max points on each principal axis
    double min1 = INF, max1 = -INF;
    double min2 = INF, max2 = -INF;
    double min3 = INF, max3 = -INF;
    double d = 0.0;
    for (size_t i = 0; i < positions.size(); i++) {
        d = xAxis.dot(positions[i]);
        if (min1 > d) min1 = d;
        if (max1 < d) max1 = d;
        
        d = yAxis.dot(positions[i]);
        if (min2 > d) min2 = d;
        if (max2 < d) max2 = d;
        
        d = zAxis.dot(positions[i]);
        if (min3 > d) min3 = d;
        if (max3 < d) max3 = d;
    }
    
    // set center and halflengths
    center = (xAxis*(min1 + max1) + yAxis*(min2 + max2) + zAxis*(min3 + max3)) /2;
    halfLx = (max1 - min1)/2; halfLy = (max2 - min2)/2; halfLz = (max3 - min3)/2;
}
Example #14
0
void poseJacobian(const Eigen::Matrix3d& rotation, Eigen::Matrix<double, 6, 6>& jac, const double rot_angle_threshold)
{
	Eigen::Matrix3d i3 = Eigen::Matrix3d::Identity();

	// convert rotation matrix into axis-angle representation
	double rot_angle;
	Eigen::Vector3d rot_axis;
	getAngleAxis(rotation.transpose(), rot_angle, rot_axis);

	// create the rotation jacobian
	Eigen::Matrix3d L_theta_u;

	double sinc_part;
	sinc_part = sva::sinc(rot_angle)/std::pow(sva::sinc(rot_angle/2.), 2);

	Eigen::Matrix3d axis_antisym;
	getSkewSym(rot_axis, axis_antisym);

	L_theta_u = i3 - rot_angle*0.5*axis_antisym + (1-(sinc_part))*axis_antisym*axis_antisym;
	jac << L_theta_u, Eigen::Matrix3d::Zero(),
				 Eigen::Matrix3d::Zero(), rotation.transpose();
}
Example #15
0
IMPALGEBRA_BEGIN_NAMESPACE

Eigen::Matrix3d get_covariance(const Gaussian3D &g) {
  Transformation3D trans = g.get_reference_frame().get_transformation_to();
  Vector3D center = trans.get_translation();
  Vector4D iq = trans.get_rotation().get_quaternion();
  Eigen::Quaterniond q(iq[0], iq[1], iq[2], iq[3]);
  Eigen::Matrix3d rot = q.toRotationMatrix();
  Vector3D variances = g.get_variances();
  Eigen::Matrix3d rad = Eigen::Vector3d(variances[0], variances[1],
                                                variances[2]).asDiagonal();
  Eigen::Matrix3d covar = rot * (rad * rot.transpose());
  return covar;
}
Example #16
0
Eigen::Matrix3d Bd970::getPositionUncertainty(void)
{
    Eigen::Matrix3d position_uncertainty;

    Eigen::Vector3d eigenvalues; // eigen values are the square of the std deviation
    eigenvalues << m_current_nmea.data_gst.semi_major_axis_sigma_error *
                m_current_nmea.data_gst.semi_major_axis_sigma_error,
                m_current_nmea.data_gst.semi_minor_axis_sigma_error *
                m_current_nmea.data_gst.semi_minor_axis_sigma_error,
                m_current_nmea.data_gst.height_sigma_error *
                m_current_nmea.data_gst.height_sigma_error;

    Eigen::Matrix3d U = this->getOrientation().matrix();
    position_uncertainty = U * eigenvalues.array().matrix().asDiagonal() * U.transpose();

    return position_uncertainty;
}
Example #17
0
void SFMViewer::update(std::vector<cv::Point3d> pcld,
		std::vector<cv::Vec3b> pcldrgb,
		std::vector<cv::Point3d> pcld_alternate,
		std::vector<cv::Vec3b> pcldrgb_alternate,
		std::vector<cv::Matx34d> cameras) {
	m_pcld = pcld;
	m_pcldrgb = pcldrgb;
	m_cameras = cameras;

	//get the scale of the result cloud using PCA
	{
		cv::Mat_<double> cldm(pcld.size(), 3);
		for (unsigned int i = 0; i < pcld.size(); i++) {
			cldm.row(i)(0) = pcld[i].x;
			cldm.row(i)(1) = pcld[i].y;
			cldm.row(i)(2) = pcld[i].z;
		}
		cv::Mat_<double> mean; //cv::reduce(cldm,mean,0,CV_REDUCE_AVG);
		cv::PCA pca(cldm, mean, CV_PCA_DATA_AS_ROW);
		scale_cameras_down = 1.0 / (3.0 * sqrt(pca.eigenvalues.at<double> (0)));
//		std::cout << "emean " << mean << std::endl;
//		m_global_transform = Eigen::Translation<double,3>(-Eigen::Map<Eigen::Vector3d>(mean[0]));
	}

	//compute transformation to place cameras in world
	m_cameras_transforms.resize(m_cameras.size());
	Eigen::Vector3d c_sum(0,0,0);
	for (int i = 0; i < m_cameras.size(); ++i) {
		Eigen::Matrix<double, 3, 4> P = Eigen::Map<Eigen::Matrix<double, 3, 4,
				Eigen::RowMajor> >(m_cameras[i].val);
		Eigen::Matrix3d R = P.block(0, 0, 3, 3);
		Eigen::Vector3d t = P.block(0, 3, 3, 1);
		Eigen::Vector3d c = -R.transpose() * t;
		c_sum += c;
		m_cameras_transforms[i] =
				Eigen::Translation<double, 3>(c) *
				Eigen::Quaterniond(R) *
				Eigen::UniformScaling<double>(scale_cameras_down)
				;
	}

	m_global_transform = Eigen::Translation<double,3>(-c_sum / (double)(m_cameras.size()));
//	m_global_transform = m_cameras_transforms[0].inverse();

	updateGL();
}
Eigen::Matrix3d Reconstruction3D::computeEpipoleMat(const Eigen::Matrix3d& F)
{
	Eigen::JacobiSVD<Eigen::MatrixXd> svd(F.transpose(), Eigen::ComputeFullV);
	Eigen::MatrixXd kernel = svd.matrixV().col(svd.matrixV().cols() - 1);

	Eigen::Matrix3d e(3, 3);
	e(0, 0) = kernel(0);
	e(0, 1) = kernel(1);
	e(0, 2) = kernel(2);
	e(1, 0) = kernel(3);
	e(1, 1) = kernel(4);
	e(1, 2) = kernel(5);
	e(2, 0) = kernel(6);
	e(2, 1) = kernel(7);
	e(2, 2) = kernel(8);

	return e;
}
Example #19
0
void Win3D::drag(int _x, int _y) {
  double deltaX = _x - mMouseX;
  double deltaY = _y - mMouseY;

  mMouseX = _x;
  mMouseY = _y;

  if (mRotate) {
    if (deltaX != 0 || deltaY != 0)
      mTrackBall.updateBall(_x, mWinHeight - _y);
  }
  if (mTranslate) {
    Eigen::Matrix3d rot = mTrackBall.getRotationMatrix();
    mTrans += rot.transpose()*Eigen::Vector3d(deltaX, -deltaY, 0.0);
  }
  if (mZooming) {
    mZoom += deltaY*0.01;
  }
  glutPostRedisplay();
}
Example #20
0
	void Simulation :: dampVelocities( void )
	{
		// [ Mueller - 2007 Sec: 3.5 ]
		for( unsigned mIdx = 0; mIdx < m_meshes.size(); ++mIdx ){

			Vector x_cm;
			Vector v_cm;
			double sumMass = 0.;
		    for( VertexIter v = m_meshes[mIdx]->vertices.begin(); v != m_meshes[mIdx]->vertices.end(); ++v ){
		    	sumMass += v->mass;
		    	x_cm += v->position * v->mass;
		    	v_cm += v->velocity * v->mass;
		    }
			x_cm /= sumMass;
			v_cm /= sumMass;

			Vector r;
			Vector L;
			Eigen::Matrix3d I = Eigen::Matrix3d::Zero();
		    for( VertexIter v = m_meshes[mIdx]->vertices.begin(); v != m_meshes[mIdx]->vertices.end(); ++v ){
		    	r = v->position - x_cm;
		    	L += cross( r, v->mass * v->velocity );
		    	Eigen::Matrix3d singleI = Eigen::Matrix3d::Zero();
		    	singleI << 0.,  r[2], -r[1],
		    			-r[2],    0.,  r[0],
		    			 r[2], -r[0],    0.;
		    	I += singleI * singleI.transpose() * v->mass;
			}
			Eigen::Vector3d L_tmp ( L[0], L[1], L[2] );
			Eigen::Vector3d omegaTemp = I.inverse() * L_tmp;
			Vector ang_vel_omega( omegaTemp[0], omegaTemp[1], omegaTemp[2] );

		    for( VertexIter v = m_meshes[mIdx]->vertices.begin(); v != m_meshes[mIdx]->vertices.end(); ++v ){
		    	r = v->position - x_cm;
		    	Vector delta_v = v_cm + cross( ang_vel_omega, r ) - v->velocity;
		    	v->velocity = v->velocity + ( m_meshes[mIdx]->dampingStiffness() * delta_v );
			}

		}
	}
// Implementation from the T. Lee et al. paper
// Control of complex maneuvers for a quadrotor UAV using geometric methods on SE(3)
void LeePositionController::ComputeDesiredAngularAcc(const Eigen::Vector3d& acceleration,
                                                     Eigen::Vector3d* angular_acceleration) const {
  assert(angular_acceleration);

  Eigen::Matrix3d R = odometry_.orientation.toRotationMatrix();

  // Get the desired rotation matrix.
  Eigen::Vector3d b1_des;
  double yaw = command_trajectory_.getYaw();
  b1_des << cos(yaw), sin(yaw), 0;

  Eigen::Vector3d b3_des;
  b3_des = -acceleration / acceleration.norm();

  Eigen::Vector3d b2_des;
  b2_des = b3_des.cross(b1_des);
  b2_des.normalize();

  Eigen::Matrix3d R_des;
  R_des.col(0) = b2_des.cross(b3_des);
  R_des.col(1) = b2_des;
  R_des.col(2) = b3_des;

  // Angle error according to lee et al.
  Eigen::Matrix3d angle_error_matrix = 0.5 * (R_des.transpose() * R - R.transpose() * R_des);
  Eigen::Vector3d angle_error;
  vectorFromSkewMatrix(angle_error_matrix, &angle_error);

  // TODO(burrimi) include angular rate references at some point.
  Eigen::Vector3d angular_rate_des(Eigen::Vector3d::Zero());
  angular_rate_des[2] = command_trajectory_.getYawRate();

  Eigen::Vector3d angular_rate_error = odometry_.angular_velocity - R_des.transpose() * R * angular_rate_des;

  *angular_acceleration = -1 * angle_error.cwiseProduct(normalized_attitude_gain_)
                           - angular_rate_error.cwiseProduct(normalized_angular_rate_gain_)
                           + odometry_.angular_velocity.cross(odometry_.angular_velocity); // we don't need the inertia matrix here
}
Example #22
0
File: gicp.hpp Project: 87west/pcl
template <typename PointSource, typename PointTarget> inline void
pcl::GeneralizedIterativeClosestPoint<PointSource, PointTarget>::computeTransformation (PointCloudSource &output, const Eigen::Matrix4f& guess)
{
  pcl::IterativeClosestPoint<PointSource, PointTarget>::initComputeReciprocal ();
  using namespace std;
  // Difference between consecutive transforms
  double delta = 0;
  // Get the size of the target
  const size_t N = indices_->size ();
  // Set the mahalanobis matrices to identity
  mahalanobis_.resize (N, Eigen::Matrix3d::Identity ());
  // Compute target cloud covariance matrices
  if (target_covariances_.empty ())
    computeCovariances<PointTarget> (target_, tree_, target_covariances_);
  // Compute input cloud covariance matrices
  if (input_covariances_.empty ())
    computeCovariances<PointSource> (input_, tree_reciprocal_, input_covariances_);

  base_transformation_ = guess;
  nr_iterations_ = 0;
  converged_ = false;
  double dist_threshold = corr_dist_threshold_ * corr_dist_threshold_;
  std::vector<int> nn_indices (1);
  std::vector<float> nn_dists (1);

  while(!converged_)
  {
    size_t cnt = 0;
    std::vector<int> source_indices (indices_->size ());
    std::vector<int> target_indices (indices_->size ());

    // guess corresponds to base_t and transformation_ to t
    Eigen::Matrix4d transform_R = Eigen::Matrix4d::Zero ();
    for(size_t i = 0; i < 4; i++)
      for(size_t j = 0; j < 4; j++)
        for(size_t k = 0; k < 4; k++)
          transform_R(i,j)+= double(transformation_(i,k)) * double(guess(k,j));

    Eigen::Matrix3d R = transform_R.topLeftCorner<3,3> ();

    for (size_t i = 0; i < N; i++)
    {
      PointSource query = output[i];
      query.getVector4fMap () = guess * query.getVector4fMap ();
      query.getVector4fMap () = transformation_ * query.getVector4fMap ();

      if (!searchForNeighbors (query, nn_indices, nn_dists))
      {
        PCL_ERROR ("[pcl::%s::computeTransformation] Unable to find a nearest neighbor in the target dataset for point %d in the source!\n", getClassName ().c_str (), (*indices_)[i]);
        return;
      }
      
      // Check if the distance to the nearest neighbor is smaller than the user imposed threshold
      if (nn_dists[0] < dist_threshold)
      {
        Eigen::Matrix3d &C1 = input_covariances_[i];
        Eigen::Matrix3d &C2 = target_covariances_[nn_indices[0]];
        Eigen::Matrix3d &M = mahalanobis_[i];
        // M = R*C1
        M = R * C1;
        // temp = M*R' + C2 = R*C1*R' + C2
        Eigen::Matrix3d temp = M * R.transpose();        
        temp+= C2;
        // M = temp^-1
        M = temp.inverse ();
        source_indices[cnt] = static_cast<int> (i);
        target_indices[cnt] = nn_indices[0];
        cnt++;
      }
    }
    // Resize to the actual number of valid correspondences
    source_indices.resize(cnt); target_indices.resize(cnt);
    /* optimize transformation using the current assignment and Mahalanobis metrics*/
    previous_transformation_ = transformation_;
    //optimization right here
    try
    {
      rigid_transformation_estimation_(output, source_indices, *target_, target_indices, transformation_);
      /* compute the delta from this iteration */
      delta = 0.;
      for(int k = 0; k < 4; k++) {
        for(int l = 0; l < 4; l++) {
          double ratio = 1;
          if(k < 3 && l < 3) // rotation part of the transform
            ratio = 1./rotation_epsilon_;
          else
            ratio = 1./transformation_epsilon_;
          double c_delta = ratio*fabs(previous_transformation_(k,l) - transformation_(k,l));
          if(c_delta > delta)
            delta = c_delta;
        }
      }
    } 
    catch (PCLException &e)
    {
      PCL_DEBUG ("[pcl::%s::computeTransformation] Optimization issue %s\n", getClassName ().c_str (), e.what ());
      break;
    }
    nr_iterations_++;
    // Check for convergence
    if (nr_iterations_ >= max_iterations_ || delta < 1)
    {
      converged_ = true;
      previous_transformation_ = transformation_;
      PCL_DEBUG ("[pcl::%s::computeTransformation] Convergence reached. Number of iterations: %d out of %d. Transformation difference: %f\n",
                 getClassName ().c_str (), nr_iterations_, max_iterations_, (transformation_ - previous_transformation_).array ().abs ().sum ());
    } 
    else
      PCL_DEBUG ("[pcl::%s::computeTransformation] Convergence failed\n", getClassName ().c_str ());
  }
  //for some reason the static equivalent methode raises an error
  // final_transformation_.block<3,3> (0,0) = (transformation_.block<3,3> (0,0)) * (guess.block<3,3> (0,0));
  // final_transformation_.block <3, 1> (0, 3) = transformation_.block <3, 1> (0, 3) + guess.rightCols<1>.block <3, 1> (0, 3);
  final_transformation_.topLeftCorner (3,3) = previous_transformation_.topLeftCorner (3,3) * guess.topLeftCorner (3,3);
  final_transformation_(0,3) = previous_transformation_(0,3) + guess(0,3);
  final_transformation_(1,3) = previous_transformation_(1,3) + guess(1,3);
  final_transformation_(2,3) = previous_transformation_(2,3) + guess(2,3);
}
Example #23
0
File: ray.hpp Project: sfegan/calin
 void derotate(const Eigen::Matrix3d& rot) {
   pos_ = rot.transpose() * pos_; dir_ = rot.transpose() * dir_; }
Example #24
0
int main( int argc, char* argv[] )
{
    // Initialise window
    pangolin::View& container = SetupPangoGLWithCuda(1024, 768);
    size_t cu_mem_start, cu_mem_end, cu_mem_total;
    cudaMemGetInfo( &cu_mem_start, &cu_mem_total );
    glClearColor(1,1,1,0);

    // Open video device
    hal::Camera video = OpenRpgCamera(argc,argv);

    // Capture first image
    pb::ImageArray images;

    // N cameras, each w*h in dimension, greyscale
    const size_t N = video.NumChannels();
    if( N != 2 ) {
        std::cerr << "Two images are required to run this program!" << std::endl;
        exit(1);
    }
    const size_t nw = video.Width();
    const size_t nh = video.Height();

    // Capture first image
    video.Capture(images);

    // Downsample this image to process less pixels
    const int max_levels = 6;
    const int level = roo::GetLevelFromMaxPixels( nw, nh, 640*480 );
//    const int level = 4;
    assert(level <= max_levels);

    // Find centered image crop which aligns to 16 pixels at given level
    const NppiRect roi = roo::GetCenteredAlignedRegion(nw,nh,16 << level,16 << level);

    // Load Camera intrinsics from file
    GetPot clArgs( argc, argv );
    const std::string filename = clArgs.follow("","-cmod");
    if( filename.empty() ) {
        std::cerr << "Camera models file is required!" << std::endl;
        exit(1);
    }
    const calibu::CameraRig rig = calibu::ReadXmlRig(filename);

    if( rig.cameras.size() != 2 ) {
        std::cerr << "Two camera models are required to run this program!" << std::endl;
        exit(1);
    }

    Eigen::Matrix3f CamModel0 = rig.cameras[0].camera.K().cast<float>();
    Eigen::Matrix3f CamModel1 = rig.cameras[1].camera.K().cast<float>();

    roo::ImageIntrinsics camMod[] = {
        {CamModel0(0,0),CamModel0(1,1),CamModel0(0,2),CamModel0(1,2)},
        {CamModel1(0,0),CamModel1(1,1),CamModel1(0,2),CamModel1(1,2)}
    };

    for(int i=0; i<2; ++i ) {
        // Adjust to match camera image dimensions
        const double scale = nw / rig.cameras[i].camera.Width();
        roo::ImageIntrinsics camModel = camMod[i].Scale( scale );

        // Adjust to match cropped aligned image
        camModel = camModel.CropToROI( roi );

        camMod[i] = camModel;
    }

    const unsigned int w = roi.width;
    const unsigned int h = roi.height;
    const unsigned int lw = w >> level;
    const unsigned int lh = h >> level;

    const Eigen::Matrix3d& K0 = camMod[0].Matrix();
    const Eigen::Matrix3d& Kl = camMod[0][level].Matrix();

    std::cout << "K Matrix: " << std::endl << K0 << std::endl;
    std::cout << "K Matrix - Level: " << std::endl << Kl << std::endl;

    std::cout << "Video stream dimensions: " << nw << "x" << nh << std::endl;
    std::cout << "Chosen Level: " << level << std::endl;
    std::cout << "Processing dimensions: " << lw << "x" << lh << std::endl;
    std::cout << "Offset: " << roi.x << "x" << roi.y << std::endl;

    // print selected camera model
    std::cout << "Camera Model used: " << std::endl << camMod[0][level].Matrix() << std::endl;

    Eigen::Matrix3d RDFvision;RDFvision<< 1,0,0,  0,1,0,  0,0,1;
    Eigen::Matrix3d RDFrobot; RDFrobot << 0,1,0,  0,0, 1,  1,0,0;
    Eigen::Matrix4d T_vis_ro = Eigen::Matrix4d::Identity();
    T_vis_ro.block<3,3>(0,0) = RDFvision.transpose() * RDFrobot;
    Eigen::Matrix4d T_ro_vis = Eigen::Matrix4d::Identity();
    T_ro_vis.block<3,3>(0,0) = RDFrobot.transpose() * RDFvision;

    const Sophus::SE3d T_rl_orig = T_rlFromCamModelRDF(rig.cameras[0], rig.cameras[1], RDFvision);

    // TODO(jmf): For now, assume cameras are rectified. Later allow unrectified cameras.
    /*
    double k1 = 0;
    double k2 = 0;

    if(cam[0].Type() == MVL_CAMERA_WARPED)
    {
        k1 = cam[0].GetModel()->warped.kappa1;
        k2 = cam[0].GetModel()->warped.kappa2;
    }
    */

    const bool rectify = false;
    if(!rectify) {
        std::cout << "Using pre-rectified images" << std::endl;
    }

    // Check we received at least two images
    if(images.Size() < 2) {
        std::cerr << "Failed to capture first stereo pair from camera" << std::endl;
        return -1;
    }

    // Define Camera Render Object (for view / scene browsing)
    pangolin::OpenGlRenderState s_cam(
        pangolin::ProjectionMatrixRDF_TopLeft(w,h,K0(0,0),K0(1,1),K0(0,2),K0(1,2),0.1,10000),
        pangolin::IdentityMatrix(pangolin::GlModelViewStack)
    );

    pangolin::GlBufferCudaPtr vbo(pangolin::GlArrayBuffer, lw*lh,GL_FLOAT, 4, cudaGraphicsMapFlagsWriteDiscard, GL_STREAM_DRAW );
    pangolin::GlBufferCudaPtr cbo(pangolin::GlArrayBuffer, lw*lh,GL_UNSIGNED_BYTE, 4, cudaGraphicsMapFlagsWriteDiscard, GL_STREAM_DRAW );
    pangolin::GlBuffer ibo = pangolin::MakeTriangleStripIboForVbo(lw,lh);

    // Allocate Camera Images on device for processing
    roo::Image<unsigned char, roo::TargetHost, roo::DontManage> hCamImg[] = {{0,nw,nh},{0,nw,nh}};
    roo::Image<float2, roo::TargetDevice, roo::Manage> dLookup[] = {{w,h},{w,h}};

    roo::Image<unsigned char, roo::TargetDevice, roo::Manage> upload(w,h);
    roo::Pyramid<unsigned char, max_levels, roo::TargetDevice, roo::Manage> img_pyr[] = {{w,h},{w,h}};

    roo::Image<float, roo::TargetDevice, roo::Manage> img[] = {{lw,lh},{lw,lh}};
    roo::Volume<float, roo::TargetDevice, roo::Manage> vol[] = {{lw,lh,MAXD},{lw,lh,MAXD}};
    roo::Image<float, roo::TargetDevice, roo::Manage>  disp[] = {{lw,lh},{lw,lh}};
    roo::Image<float, roo::TargetDevice, roo::Manage> meanI(lw,lh);
    roo::Image<float, roo::TargetDevice, roo::Manage> varI(lw,lh);
    roo::Image<float, roo::TargetDevice, roo::Manage> temp[] = {{lw,lh},{lw,lh},{lw,lh},{lw,lh},{lw,lh}};

    roo::Image<float,roo::TargetDevice, roo::Manage>& imgd = disp[0];
    roo::Image<float,roo::TargetDevice, roo::Manage> depthmap(lw,lh);
    roo::Image<float,roo::TargetDevice, roo::Manage> imga(lw,lh);
    roo::Image<float2,roo::TargetDevice, roo::Manage> imgq(lw,lh);
    roo::Image<float,roo::TargetDevice, roo::Manage> imgw(lw,lh);

    roo::Image<float4, roo::TargetDevice, roo::Manage>  d3d(lw,lh);
    roo::Image<unsigned char, roo::TargetDevice,roo::Manage> Scratch(lw*sizeof(roo::LeastSquaresSystem<float,6>),lh);

    typedef ulong4 census_t;
    roo::Image<census_t, roo::TargetDevice, roo::Manage> census[] = {{lw,lh},{lw,lh}};

    // Stereo transformation (post-rectification)
    Sophus::SE3d T_rl = T_rl_orig;

    const double baseline = T_rl.translation().norm();
    std::cout << "Baseline: " << baseline << std::endl;

    cudaMemGetInfo( &cu_mem_end, &cu_mem_total );
    std::cout << "CuTotal: " << cu_mem_total/(1024*1024) << ", Available: " << cu_mem_end/(1024*1024) << ", Used: " << (cu_mem_start-cu_mem_end)/(1024*1024) << std::endl;

    pangolin::Var<bool> step("ui.step", false, false);
    pangolin::Var<bool> run("ui.run", false, true);
    pangolin::Var<bool> lockToCam("ui.Lock to cam", false, true);
    pangolin::Var<int> show_slice("ui.show slice",MAXD/2, 0, MAXD-1);

    pangolin::Var<int> maxdisp("ui.maxdisp",MAXD, 0, MAXD);
    pangolin::Var<bool> subpix("ui.subpix", true, true);

    pangolin::Var<bool> use_census("ui.use census", true, true);
    pangolin::Var<int> avg_rad("ui.avg_rad",0, 0, 100);

    pangolin::Var<bool> do_dtam("ui.do dtam", false, true);
    pangolin::Var<bool> dtam_reset("ui.reset", false, false);

    pangolin::Var<float> g_alpha("ui.g alpha", 14, 0,4);
    pangolin::Var<float> g_beta("ui.g beta", 2.5, 0,2);


    pangolin::Var<float> theta("ui.theta", 100, 0,100);
    pangolin::Var<float> lambda("ui.lambda", 20, 0,20);
    pangolin::Var<float> sigma_q("ui.sigma q", 0.7, 0, 1);
    pangolin::Var<float> sigma_d("ui.sigma d", 0.7, 0, 1);
    pangolin::Var<float> huber_alpha("ui.huber alpha", 0.002, 0, 0.01);
    pangolin::Var<float> beta("ui.beta", 0.00001, 0, 0.01);

    pangolin::Var<float> alpha("ui.alpha", 0.9, 0,1);
    pangolin::Var<float> r1("ui.r1", 100, 0,0.01);
    pangolin::Var<float> r2("ui.r2", 100, 0,0.01);

    pangolin::Var<bool> filter("ui.filter", false, true);
    pangolin::Var<float> eps("ui.eps",0.01*0.01, 0, 0.01);
    pangolin::Var<int> rad("ui.radius",9, 1, 20);

    pangolin::Var<bool> leftrightcheck("ui.left-right check", false, true);
    pangolin::Var<float> maxdispdiff("ui.maxdispdiff",1, 0, 5);

    pangolin::Var<int> domedits("ui.median its",1, 1, 10);
    pangolin::Var<bool> domed9x9("ui.median 9x9", false, true);
    pangolin::Var<bool> domed7x7("ui.median 7x7", false, true);
    pangolin::Var<bool> domed5x5("ui.median 5x5", false, true);
    pangolin::Var<int> medi("ui.medi",12, 0, 24);

    pangolin::Var<float> filtgradthresh("ui.filt grad thresh", 0, 0, 20);

    pangolin::Var<bool> save_depthmaps("ui.save_depthmaps", false, true);

    int jump_frames = 0;

    pangolin::RegisterKeyPressCallback(' ', [&run](){run = !run;} );
    pangolin::RegisterKeyPressCallback('l', [&lockToCam](){lockToCam = !lockToCam;} );
    pangolin::RegisterKeyPressCallback(pangolin::PANGO_SPECIAL + GLUT_KEY_RIGHT, [&step](){step=true;} );
    pangolin::RegisterKeyPressCallback(']', [&jump_frames](){jump_frames=100;} );
    pangolin::RegisterKeyPressCallback('}', [&jump_frames](){jump_frames=1000;} );

    pangolin::Handler2dImageSelect handler2d(lw,lh,level);
//    ActivateDrawPyramid<unsigned char,max_levels> adleft(img_pyr[0],GL_LUMINANCE8, false, true);
//    ActivateDrawPyramid<unsigned char,max_levels> adright(img_pyr[1],GL_LUMINANCE8, false, true);
    pangolin::ActivateDrawImage<float> adleft(img[0],GL_LUMINANCE32F_ARB, false, true);
    pangolin::ActivateDrawImage<float> adright(img[1],GL_LUMINANCE32F_ARB, false, true);
    pangolin::ActivateDrawImage<float> adisp(disp[0],GL_LUMINANCE32F_ARB, false, true);
    pangolin::ActivateDrawImage<float> adw(imgw,GL_LUMINANCE32F_ARB, false, true);
//    ActivateDrawImage<float> adCrossSection(dCrossSection,GL_RGBA_FLOAT32_APPLE, false, true);
    pangolin::ActivateDrawImage<float> adVol(vol[0].ImageXY(show_slice),GL_LUMINANCE32F_ARB, false, true);

    SceneGraph::GLSceneGraph graph;
    SceneGraph::GLVbo glvbo(&vbo,&ibo,&cbo);
    graph.AddChild(&glvbo);

    SetupContainer(container, 6, (float)w/h);
    container[0].SetDrawFunction(boost::ref(adleft)).SetHandler(&handler2d);
    container[1].SetDrawFunction(boost::ref(adright)).SetHandler(&handler2d);
    container[2].SetDrawFunction(boost::ref(adisp)).SetHandler(&handler2d);
    container[3].SetDrawFunction(boost::ref(adVol)).SetHandler(&handler2d);
    container[4].SetDrawFunction(SceneGraph::ActivateDrawFunctor(graph, s_cam))
                .SetHandler( new pangolin::Handler3D(s_cam, pangolin::AxisNone) );
    container[5].SetDrawFunction(boost::ref(adw)).SetHandler(&handler2d);

    for(unsigned long frame=0; !pangolin::ShouldQuit();)
    {
        bool go = frame==0 || jump_frames > 0 || run || Pushed(step);

        for(; jump_frames > 0; jump_frames--) {
            video.Capture(images);
        }

        if(go) {
            if(frame>0) {
                if( video.Capture(images) == false) {
                    exit(1);
                }
            }

            frame++;

            /////////////////////////////////////////////////////////////
            // Upload images to device (Warp / Decimate if necessery)
            for(int i=0; i<2; ++i ) {
                hCamImg[i].ptr = images[i].data();

                if(rectify) {
                    upload.CopyFrom(hCamImg[i].SubImage(roi));
                    Warp(img_pyr[i][0], upload, dLookup[i]);
                }else{
                    img_pyr[i][0].CopyFrom(hCamImg[i].SubImage(roi));
                }

                roo::BoxReduce<unsigned char, max_levels, unsigned int>(img_pyr[i]);
            }
        }

        go |= avg_rad.GuiChanged() | use_census.GuiChanged();
        if( go ) {
            for(int i=0; i<2; ++i ) {
                roo::ElementwiseScaleBias<float,unsigned char,float>(img[i], img_pyr[i][level],1.0f/255.0f);
                if(avg_rad > 0 ) {
                    roo::BoxFilter<float,float,float>(temp[0],img[i],Scratch,avg_rad);
                    roo::ElementwiseAdd<float,float,float,float>(img[i], img[i], temp[0], 1, -1, 0.5);
                }
                if(use_census) {
                    Census(census[i], img[i]);
                }
            }
        }

        if( go | g_alpha.GuiChanged() || g_beta.GuiChanged() ) {
            ExponentialEdgeWeight(imgw, img[0], g_alpha, g_beta);
        }

        go |= filter.GuiChanged() | leftrightcheck.GuiChanged() | rad.GuiChanged() | eps.GuiChanged() | alpha.GuiChanged() | r1.GuiChanged() | r2.GuiChanged();
        if(go) {
            if(use_census) {
                roo::CensusStereoVolume<float, census_t>(vol[0], census[0], census[1], maxdisp, -1);
                if(leftrightcheck) roo::CensusStereoVolume<float, census_t>(vol[1], census[1], census[0], maxdisp, +1);
            }else{
                CostVolumeFromStereoTruncatedAbsAndGrad(vol[0], img[0], img[1], -1, alpha, r1, r2);
                if(leftrightcheck) CostVolumeFromStereoTruncatedAbsAndGrad(vol[1], img[1], img[0], +1, alpha, r1, r2);
            }

            if(filter) {
                // Filter Cost volume
                for(int v=0; v<(leftrightcheck?2:1); ++v)
                {
                    roo::Image<float, roo::TargetDevice, roo::Manage>& I = img[v];
                    roo::ComputeMeanVarience<float,float,float>(varI, temp[0], meanI, I, Scratch, rad);

                    for(int d=0; d<maxdisp; ++d)
                    {
                        roo::Image<float> P = vol[v].ImageXY(d);
                        roo::ComputeCovariance(temp[0],temp[2],temp[1],P,meanI,I,Scratch,rad);
                        GuidedFilter(P,temp[0],varI,temp[1],meanI,I,Scratch,temp[2],temp[3],temp[4],rad,eps);
                    }
                }
            }
        }

        static int n = 0;
//        static float theta = 0;
//        go |= Pushed(dtam_reset);
//        if(go )
        if(Pushed(dtam_reset))
        {
            n = 0;
            theta.Reset();

            // Initialise primal and auxillary variables
            CostVolMinimumSubpix(imgd,vol[0], maxdisp,-1);
            imga.CopyFrom(imgd);

            // Initialise dual variable
            imgq.Memset(0);
        }

        const double min_theta = 1E-0;
        if(do_dtam && theta > min_theta)
        {
            for(int i=0; i<5; ++i ) {
                // Auxillary exhaustive search
                CostVolMinimumSquarePenaltySubpix(imga, vol[0], imgd, maxdisp, -1, lambda, (theta) );

                // Dual Ascent
                roo::WeightedHuberGradU_DualAscentP(imgq, imgd, imgw, sigma_q, huber_alpha);

                // Primal Descent
                roo::WeightedL2_u_minus_g_PrimalDescent(imgd, imgq, imga, imgw, sigma_d, 1.0f / (theta) );

                theta= theta * (1-beta*n);
                ++n;
            }
            if( theta <= min_theta && save_depthmaps ) {
                cv::Mat dmap = cv::Mat( lh, lw, CV_32FC1 );
                // convert disparity to depth
                roo::Disp2Depth(imgd, depthmap, Kl(0,0), baseline );
                depthmap.MemcpyToHost( dmap.data );

                // save depth image
                char            Index[10];
                sprintf( Index, "%05d", frame );
                std::string DepthPrefix = "SDepth-";
                std::string DepthFile;
                DepthFile = DepthPrefix + Index + ".pdm";
                std::cout << "Depth File: " << DepthFile << std::endl;
                std::ofstream pDFile( DepthFile.c_str(), std::ios::out | std::ios::binary );
                pDFile << "P7" << std::endl;
                pDFile << dmap.cols << " " << dmap.rows << std::endl;
                unsigned int Size = dmap.elemSize1() * dmap.rows * dmap.cols;
                pDFile << 4294967295 << std::endl;
                pDFile.write( (const char*)dmap.data, Size );
                pDFile.close();

                // save grey image
                std::string GreyPrefix = "Left-";
                std::string GreyFile;
                GreyFile = GreyPrefix + Index + ".pgm";
                std::cout << "Grey File: " << GreyFile << std::endl;
                cv::Mat gimg = cv::Mat( lh, lw, CV_8UC1 );
                img_pyr[0][level].MemcpyToHost( gimg.data );
                cv::imwrite( GreyFile, gimg );

                 // reset
                step = true;
                dtam_reset = true;
            }
        }

        go |= pangolin::GuiVarHasChanged();
//        if(go) {
//            if(subpix) {
//                CostVolMinimumSubpix(disp[0],vol[0], maxdisp,-1);
//                if(leftrightcheck) CostVolMinimumSubpix(disp[1],vol[1], maxdisp,+1);
//            }else{
//                CostVolMinimum<float,float>(disp[0],vol[0], maxdisp);
//                if(leftrightcheck) CostVolMinimum<float,float>(disp[1],vol[1], maxdisp);
//            }

//        }

        if(go) {
            for(int di=0; di<(leftrightcheck?2:1); ++di) {
                for(int i=0; i < domedits; ++i ) {
                    if(domed9x9) MedianFilterRejectNegative9x9(disp[di],disp[di], medi);
                    if(domed7x7) MedianFilterRejectNegative7x7(disp[di],disp[di], medi);
                    if(domed5x5) MedianFilterRejectNegative5x5(disp[di],disp[di], medi);
                }
            }

            if(leftrightcheck ) {
                LeftRightCheck(disp[1], disp[0], +1, maxdispdiff);
                LeftRightCheck(disp[0], disp[1], -1, maxdispdiff);
            }

            if(filtgradthresh > 0) {
                FilterDispGrad(disp[0], disp[0], filtgradthresh);
            }
        }

//        if(go)
        {
            // Generate point cloud from disparity image
            DisparityImageToVbo(d3d, disp[0], baseline, Kl(0,0), Kl(1,1), Kl(0,2), Kl(1,2) );

//            if(container[3].IsShown())
            {
                // Copy point cloud into VBO
                {
                    pangolin::CudaScopedMappedPtr var(vbo);
                    roo::Image<float4> dVbo((float4*)*var,lw,lh);
                    dVbo.CopyFrom(d3d);
                }

                // Generate CBO
                {
                    pangolin::CudaScopedMappedPtr var(cbo);
                    roo::Image<uchar4> dCbo((uchar4*)*var,lw,lh);
                    roo::ConvertImage<uchar4,unsigned char>(dCbo, img_pyr[0][level]);
                }
            }

            // Update texture views
            adisp.SetImageScale(1.0f/maxdisp);
//            adleft.SetLevel(show_level);
//            adright.SetLevel(show_level);
            adVol.SetImage(vol[0].ImageXY(show_slice));
        }

        /////////////////////////////////////////////////////////////
        // Draw

        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
        glColor3f(1,1,1);

        pangolin::FinishFrame();
    }
}
Example #25
0
void ShapeMatching<PFP>::shapeMatch()
{
    // p_{i}
    std::vector<Eigen::Vector3d> m_p;

    m_p.reserve(m_position.nbElements());

    //1.
    Eigen::Vector3d xcm = massCenter();

    for(unsigned int i = m_position.begin() ; i < m_position.end() ; m_position.next(i))
    {
        Eigen::Vector3d tmp ;
        for (unsigned int j = 0 ; j < 3 ; ++j)
            tmp(j) = m_position[i][j] ;

        Eigen::Vector3d pi = tmp - xcm ;

        m_p.push_back(pi) ; //p_{i} = x_{i} - x_{cm}

    }

    //2.
    Eigen::Matrix3d apq = Eigen::Matrix3d::Zero();

    for(unsigned int i=0 ; i < m_p.size() ; ++i)
    {
        apq(0,0) += m_p[i][0] * m_q[i][0];
        apq(0,1) += m_p[i][0] * m_q[i][1];
        apq(0,2) += m_p[i][0] * m_q[i][2];

        apq(1,0) += m_p[i][1] * m_q[i][0];
        apq(1,1) += m_p[i][1] * m_q[i][1];
        apq(1,2) += m_p[i][1] * m_q[i][2];

        apq(2,0) += m_p[i][2] * m_q[i][0];
        apq(2,1) += m_p[i][2] * m_q[i][1];
        apq(2,2) += m_p[i][2] * m_q[i][2];
    }

    Eigen::Matrix3d S = apq.transpose() * apq ; //symmetric matrix

    //3. Jacobi Diagonalisation
    Eigen::EigenSolver<Eigen::Matrix3d> es(S);

    //V * D * V^(-1)
    Eigen::Matrix3d D =  es.pseudoEigenvalueMatrix();
    Eigen::Matrix3d U =  es.pseudoEigenvectors() ;

    for(int j = 0; j < 3; j++)
    {
        if(D(j,j) <= 0)
        {
            D(j,j) = 0.05f;
        }
        D(j,j) = 1.0f/sqrt(D(j,j));
    }

    S = U * D * U.transpose();

    // Now we can get the rotation part
    Eigen::Matrix3d R = apq * S; //S^{-1}

    //4.
    for(unsigned int i = m_goal.begin() ; i < m_goal.end() ; m_goal.next(i))
    {
       Eigen::Vector3d tmp = R * m_q[i] + xcm; // g_{i} = R * q_i + x_{cm}

       VEC3 g;
       for (unsigned int j = 0 ; j < 3 ; ++j)
            g[j] = tmp(j);

       m_goal[i] = g;
    }
}
Example #26
0
MatrixXd Tetrahedra<ConcreteShape>::buildStiffnessMatrix(const Ref<const VectorXd>& vp2) {
  
  Eigen::Matrix3d invJ;
  double detJ;
  std::tie(invJ,detJ) = ConcreteShape::inverseJacobian(mVtxCrd);

  // mGradientPhi_dr_t = mGradientPhi_dr.transpose();
  // mGradientPhi_ds_t = mGradientPhi_ds.transpose();
  // mGradientPhi_dt_t = mGradientPhi_dt.transpose();
  
  // save for later
  mInvJac = invJ;
  mInvJacT_x_invJac = invJ.transpose() * invJ;
  mInvJacT = invJ.transpose();
  mDetJac = detJ;
  //Jinv= rx, sx, tx,
  //      ry, sy, ty,
  //      rz, sz, tz;
  auto drdx = invJ(0,0);
  auto dsdx = invJ(0,1);
  auto dtdx = invJ(0,2);

  auto drdy = invJ(1,0);
  auto dsdy = invJ(1,1);
  auto dtdy = invJ(1,2);

  auto drdz = invJ(2,0);
  auto dsdz = invJ(2,1);
  auto dtdz = invJ(2,2);
  
  // build material on all nodes
  MatrixXd elementStiffnessMatrix(mNumIntPnt,mNumIntPnt);

  mGradientPhi_dx.resize(mNumIntPnt,mNumIntPnt);
  mGradientPhi_dy.resize(mNumIntPnt,mNumIntPnt);
  mGradientPhi_dz.resize(mNumIntPnt,mNumIntPnt);

  mWiDPhi_x.resize(mNumIntPnt,mNumIntPnt);
  mWiDPhi_y.resize(mNumIntPnt,mNumIntPnt);
  mWiDPhi_z.resize(mNumIntPnt,mNumIntPnt);

  // Stiffness Matrix
  // Dx = rx*Dr + sx*Ds + tx*Dt;
  // Dy = ry*Dr + sy*Ds + ty*Dt;
  // Dz = rz*Dr + sz*Ds + tz*Dt;
  // Kxx = detJ*Dx'*Me*Dx
  // Kyy = detJ*Dy'*Me*Dy
  // Kzz = detJ*Dz'*Me*Dz
  // K = Kxx+Kyy+Kzz

  // alternate method
  mGradientPhi_dx =
    (drdx*mGradientPhi_dr +
    dsdx*mGradientPhi_ds +
     dtdx*mGradientPhi_dt).transpose();
  mGradientPhi_dy =
    (drdy*mGradientPhi_dr +
    dsdy*mGradientPhi_ds +
     dtdy*mGradientPhi_dt).transpose();
  mGradientPhi_dz =
    (drdz*mGradientPhi_dr +
    dsdz*mGradientPhi_ds +
     dtdz*mGradientPhi_dt).transpose();
  
  // Me*Dx, Me*Dy, Me*Dz
  RealVec wi_vp2 = mIntegrationWeights * vp2;
  mWiDPhi_x = (wi_vp2).asDiagonal() * mGradientPhi_dx;
  mWiDPhi_y = (wi_vp2).asDiagonal() * mGradientPhi_dy;
  mWiDPhi_z = (wi_vp2).asDiagonal() * mGradientPhi_dz;
  
  // for(int i=0;i<mNumIntPnt;i++) {
  //   mWiDPhi_x.row(i) = mGradientPhi_dx.row(i)*mIntegrationWeights[i];
  //   mWiDPhi_y.row(i) = mGradientPhi_dy.row(i)*mIntegrationWeights[i];
  //   mWiDPhi_z.row(i) = mGradientPhi_dz.row(i)*mIntegrationWeights[i];
  // }
    
  elementStiffnessMatrix = (detJ*mGradientPhi_dx.transpose()*mWiDPhi_x) + (detJ*mGradientPhi_dy.transpose()*mWiDPhi_y) + (detJ*mGradientPhi_dz.transpose()*mWiDPhi_z);
  
  // build transpose as well
  // mGradientPhi_dx_t = mGradientPhi_dx.transpose();
  // mGradientPhi_dy_t = mGradientPhi_dy.transpose();
  // mGradientPhi_dz_t = mGradientPhi_dz.transpose();
  
  return elementStiffnessMatrix;
}
//DEPRECATED???
double
NDTMatcherFeatureD2D::scoreNDT(std::vector<NDTCell*> &sourceNDT, NDTMap &targetNDT,
        Eigen::Transform<double,3,Eigen::Affine,Eigen::ColMajor>& T)
{
    NUMBER_OF_ACTIVE_CELLS = 0;
    double score_here = 0;
    double det = 0;
    bool exists = false;
    NDTCell *cell;
    Eigen::Matrix3d covCombined, icov;
    Eigen::Vector3d meanFixed;
    Eigen::Vector3d meanMoving;
    Eigen::Matrix3d R = T.rotation();
    std::vector<std::pair<unsigned int, double> > scores;
    for(unsigned int j=0; j<_corr.size(); j++)
    {
        unsigned int i = _corr[j].second;
        if (_corr[j].second >= (int)sourceNDT.size())
        {
            std::cout << "second correspondance : " << _corr[j].second << ", " << sourceNDT.size() << std::endl;
        }
        if (sourceNDT[i] == NULL)
        {
            std::cout << "sourceNDT[i] == NULL!" << std::endl;
        }
        meanMoving = T*sourceNDT[i]->getMean();

        cell = targetNDT.getCellIdx(_corr[j].first);
        {

            if(cell == NULL)
            {
                std::cout << "cell== NULL!!!" << std::endl;
            }
            else
            {
                if(cell->hasGaussian_)
                {
                    meanFixed = cell->getMean();
                    covCombined = cell->getCov() + R.transpose()*sourceNDT[i]->getCov()*R;
                    covCombined.computeInverseAndDetWithCheck(icov,det,exists);
                    if(!exists) continue;
                    double l = (meanMoving-meanFixed).dot(icov*(meanMoving-meanFixed));
                    if(l*0 != 0) continue;
                    if(l > 120) continue;

                    double sh = -lfd1*(exp(-lfd2*l/2));

                    if(fabsf(sh) > 1e-10)
                    {
                        NUMBER_OF_ACTIVE_CELLS++;
                    }
                    scores.push_back(std::pair<unsigned int, double>(j, sh));
                    score_here += sh;
                    //score_here += l;
                }
            }
        }
    }

    if (_trimFactor == 1.)
    {
        return score_here;
    }
    else
    {
        // Determine the score value
        if (scores.empty()) // TODO, this happens(!), why??!??
            return score_here;

        score_here = 0.;
        unsigned int index = static_cast<unsigned int>(_trimFactor * (scores.size() - 1));
        //	std::nth_element (scores.begin(), scores.begin()+index, scores.end(), sort_scores()); //boost::bind(&std::pair<unsigned int, double>::second, _1) < boost::bind(&std::pair<unsigned int, double>::second, _2));
        std::nth_element (scores.begin(), scores.begin()+index, scores.end(), boost::bind(&std::pair<unsigned int, double>::second, _1) < boost::bind(&std::pair<unsigned int, double>::second, _2));
        std::fill(_goodCorr.begin(), _goodCorr.end(), false);
        //	std::cout << "_goodCorr.size() : " << _goodCorr.size() << " scores.size() : " << scores.size() << " index : " << index << std::endl;
        for (unsigned int i = 0; i < _goodCorr.size(); i++)
        {
            if (i <= index)
            {
                score_here += scores[i].second;
                _goodCorr[scores[i].first] = true;
            }
        }
        return score_here;
    }
}
int P3P::computePoses(const Eigen::Matrix3d & feature_vectors, const Eigen::Matrix3d & world_points,
                      Eigen::Matrix<Eigen::Matrix<double, 3, 4>, 4, 1> & solutions)
{
  // Extraction of world points
  Eigen::Vector3d P1 = world_points.col(0);
  Eigen::Vector3d P2 = world_points.col(1);
  Eigen::Vector3d P3 = world_points.col(2);

  // Verification that world points are not colinear
  Eigen::Vector3d temp1 = P2 - P1;
  Eigen::Vector3d temp2 = P3 - P1;

  if (temp1.cross(temp2).norm() == 0)
  {
    return -1;
  }

  // Extraction of feature vectors
  Eigen::Vector3d f1 = feature_vectors.col(0);
  Eigen::Vector3d f2 = feature_vectors.col(1);
  Eigen::Vector3d f3 = feature_vectors.col(2);

  // Creation of intermediate camera frame
  Eigen::Vector3d e1 = f1;
  Eigen::Vector3d e3 = f1.cross(f2);
  e3 = e3 / e3.norm();
  Eigen::Vector3d e2 = e3.cross(e1);

  Eigen::Matrix3d T;
  T.row(0) = e1.transpose();
  T.row(1) = e2.transpose();
  T.row(2) = e3.transpose();

  f3 = T * f3;

  // Reinforce that f3(2,0) > 0 for having theta in [0;pi]
  if (f3(2, 0) > 0)
  {
    f1 = feature_vectors.col(1);
    f2 = feature_vectors.col(0);
    f3 = feature_vectors.col(2);

    e1 = f1;
    e3 = f1.cross(f2);
    e3 = e3 / e3.norm();
    e2 = e3.cross(e1);

    T.row(0) = e1.transpose();
    T.row(1) = e2.transpose();
    T.row(2) = e3.transpose();

    f3 = T * f3;

    P1 = world_points.col(1);
    P2 = world_points.col(0);
    P3 = world_points.col(2);
  }

  // Creation of intermediate world frame
  Eigen::Vector3d n1 = P2 - P1;
  n1 = n1 / n1.norm();
  Eigen::Vector3d n3 = n1.cross(P3 - P1);
  n3 = n3 / n3.norm();
  Eigen::Vector3d n2 = n3.cross(n1);

  Eigen::Matrix3d N;
  N.row(0) = n1.transpose();
  N.row(1) = n2.transpose();
  N.row(2) = n3.transpose();

  // Extraction of known parameters
  P3 = N * (P3 - P1);

  double d_12 = (P2 - P1).norm();
  double f_1 = f3(0, 0) / f3(2, 0);
  double f_2 = f3(1, 0) / f3(2, 0);
  double p_1 = P3(0, 0);
  double p_2 = P3(1, 0);

  double cos_beta = f1.dot(f2);
  double b = 1 / (1 - pow(cos_beta, 2)) - 1;

  if (cos_beta < 0)
  {
    b = -sqrt(b);
  }
  else
  {
    b = sqrt(b);
  }

  // Definition of temporary variables for avoiding multiple computation
  double f_1_pw2 = pow(f_1, 2);
  double f_2_pw2 = pow(f_2, 2);
  double p_1_pw2 = pow(p_1, 2);
  double p_1_pw3 = p_1_pw2 * p_1;
  double p_1_pw4 = p_1_pw3 * p_1;
  double p_2_pw2 = pow(p_2, 2);
  double p_2_pw3 = p_2_pw2 * p_2;
  double p_2_pw4 = p_2_pw3 * p_2;
  double d_12_pw2 = pow(d_12, 2);
  double b_pw2 = pow(b, 2);

  // Computation of factors of 4th degree polynomial
  Eigen::Matrix<double, 5, 1> factors;

  factors(0) = -f_2_pw2 * p_2_pw4 - p_2_pw4 * f_1_pw2 - p_2_pw4;

  factors(1) = 2 * p_2_pw3 * d_12 * b + 2 * f_2_pw2 * p_2_pw3 * d_12 * b - 2 * f_2 * p_2_pw3 * f_1 * d_12;

  factors(2) = -f_2_pw2 * p_2_pw2 * p_1_pw2 - f_2_pw2 * p_2_pw2 * d_12_pw2 * b_pw2 - f_2_pw2 * p_2_pw2 * d_12_pw2
      + f_2_pw2 * p_2_pw4 + p_2_pw4 * f_1_pw2 + 2 * p_1 * p_2_pw2 * d_12 + 2 * f_1 * f_2 * p_1 * p_2_pw2 * d_12 * b
      - p_2_pw2 * p_1_pw2 * f_1_pw2 + 2 * p_1 * p_2_pw2 * f_2_pw2 * d_12 - p_2_pw2 * d_12_pw2 * b_pw2
      - 2 * p_1_pw2 * p_2_pw2;

  factors(3) = 2 * p_1_pw2 * p_2 * d_12 * b + 2 * f_2 * p_2_pw3 * f_1 * d_12 - 2 * f_2_pw2 * p_2_pw3 * d_12 * b
      - 2 * p_1 * p_2 * d_12_pw2 * b;

  factors(4) = -2 * f_2 * p_2_pw2 * f_1 * p_1 * d_12 * b + f_2_pw2 * p_2_pw2 * d_12_pw2 + 2 * p_1_pw3 * d_12
      - p_1_pw2 * d_12_pw2 + f_2_pw2 * p_2_pw2 * p_1_pw2 - p_1_pw4 - 2 * f_2_pw2 * p_2_pw2 * p_1 * d_12
      + p_2_pw2 * f_1_pw2 * p_1_pw2 + f_2_pw2 * p_2_pw2 * d_12_pw2 * b_pw2;

  // Computation of roots
  Eigen::Matrix<double, 4, 1> realRoots;

  P3P::solveQuartic(factors, realRoots);

  // Backsubstitution of each solution
  for (int i = 0; i < 4; ++i)
  {
    double cot_alpha = (-f_1 * p_1 / f_2 - realRoots(i) * p_2 + d_12 * b)
        / (-f_1 * realRoots(i) * p_2 / f_2 + p_1 - d_12);

    double cos_theta = realRoots(i);
    double sin_theta = sqrt(1 - pow((double)realRoots(i), 2));
    double sin_alpha = sqrt(1 / (pow(cot_alpha, 2) + 1));
    double cos_alpha = sqrt(1 - pow(sin_alpha, 2));

    if (cot_alpha < 0)
    {
      cos_alpha = -cos_alpha;
    }

    Eigen::Vector3d C;
    C(0) = d_12 * cos_alpha * (sin_alpha * b + cos_alpha);
    C(1) = cos_theta * d_12 * sin_alpha * (sin_alpha * b + cos_alpha);
    C(2) = sin_theta * d_12 * sin_alpha * (sin_alpha * b + cos_alpha);

    C = P1 + N.transpose() * C;

    Eigen::Matrix3d R;
    R(0, 0) = -cos_alpha;
    R(0, 1) = -sin_alpha * cos_theta;
    R(0, 2) = -sin_alpha * sin_theta;
    R(1, 0) = sin_alpha;
    R(1, 1) = -cos_alpha * cos_theta;
    R(1, 2) = -cos_alpha * sin_theta;
    R(2, 0) = 0;
    R(2, 1) = -sin_theta;
    R(2, 2) = cos_theta;

    R = N.transpose() * R.transpose() * T;

    Eigen::Matrix<double, 3, 4> solution;
    solution.block<3, 3>(0, 0) = R;
    solution.col(3) = C;

    solutions(i) = solution;
  }

  return 0;
}
int main(int /*argc*/, char** /*argv*/)
{
  try
  {
    imp::ImageCv32fC1::Ptr cv_im0;
    imp::cvBridgeLoad(cv_im0,
                      "/home/mwerlberger/data/epipolar_stereo_test/00000.png",
                      imp::PixelOrder::gray);

    imp::ImageCv32fC1::Ptr cv_im1;
    imp::cvBridgeLoad(cv_im1,
                      "/home/mwerlberger/data/epipolar_stereo_test/00001.png",
                      imp::PixelOrder::gray);

    // rectify images for testing

    imp::cu::ImageGpu32fC1::Ptr im0 = std::make_shared<imp::cu::ImageGpu32fC1>(*cv_im0);
    imp::cu::ImageGpu32fC1::Ptr im1 = std::make_shared<imp::cu::ImageGpu32fC1>(*cv_im1);


    Eigen::Quaterniond q_world_im0(0.14062777, 0.98558398, 0.02351040, -0.09107859);
    Eigen::Quaterniond q_world_im1(0.14118687, 0.98569744, 0.01930722, -0.08996696);
    Eigen::Vector3d t_world_im0(-0.12617580, 0.50447008, 0.15342121);
    Eigen::Vector3d t_world_im1(-0.11031053, 0.50314023, 0.15158643);

    imp::cu::PinholeCamera cu_cam(414.09, 413.799, 355.567, 246.337);

    // im0: fixed image; im1: moving image
    imp::cu::Matrix3f F_fix_mov;
    imp::cu::Matrix3f F_mov_fix;
    Eigen::Matrix3d F_fm, F_mf;
    { // compute fundamental matrix
      Eigen::Matrix3d R_world_mov = q_world_im1.matrix();
      Eigen::Matrix3d R_world_fix = q_world_im0.matrix();
      Eigen::Matrix3d R_fix_mov = R_world_fix.inverse()*R_world_mov;

      // in ref coordinates
      Eigen::Vector3d t_fix_mov = R_world_fix.inverse()*(-t_world_im0 + t_world_im1);

      Eigen::Matrix3d tx_fix_mov;
      tx_fix_mov << 0, -t_fix_mov[2], t_fix_mov[1],
          t_fix_mov[2], 0, -t_fix_mov[0],
          -t_fix_mov[1], t_fix_mov[0], 0;
      Eigen::Matrix3d E_fix_mov = tx_fix_mov * R_fix_mov;
      Eigen::Matrix3d K;
      K << cu_cam.fx(), 0, cu_cam.cx(),
          0, cu_cam.fy(), cu_cam.cy(),
          0, 0, 1;

      Eigen::Matrix3d Kinv = K.inverse();
      F_fm = Kinv.transpose() * E_fix_mov * Kinv;
      F_mf = F_fm.transpose();
    } // end .. compute fundamental matrix
    // convert the Eigen-thingy to something that we can use in CUDA
    for(size_t row=0; row<F_fix_mov.rows(); ++row)
    {
      for(size_t col=0; col<F_fix_mov.cols(); ++col)
      {
        F_fix_mov(row,col) = (float)F_fm(row,col);
        F_mov_fix(row,col) = (float)F_mf(row,col);
      }
    }

    // compute SE3 transformation
    imp::cu::SE3<float> T_world_fix(
          static_cast<float>(q_world_im0.w()), static_cast<float>(q_world_im0.x()),
          static_cast<float>(q_world_im0.y()), static_cast<float>(q_world_im0.z()),
          static_cast<float>(t_world_im0.x()), static_cast<float>(t_world_im0.y()),
          static_cast<float>(t_world_im0.z()));
    imp::cu::SE3<float> T_world_mov(
          static_cast<float>(q_world_im1.w()), static_cast<float>(q_world_im1.x()),
          static_cast<float>(q_world_im1.y()), static_cast<float>(q_world_im1.z()),
          static_cast<float>(t_world_im1.x()), static_cast<float>(t_world_im1.y()),
          static_cast<float>(t_world_im1.z()));
    imp::cu::SE3<float> T_mov_fix = T_world_mov.inv() * T_world_fix;
    // end .. compute SE3 transformation

    std::cout << "T_mov_fix:\n" << T_mov_fix << std::endl;


    std::unique_ptr<imp::cu::VariationalEpipolarStereo> stereo(
          new imp::cu::VariationalEpipolarStereo());

    stereo->parameters()->verbose = 1;
    stereo->parameters()->solver = imp::cu::StereoPDSolver::EpipolarPrecondHuberL1;
    stereo->parameters()->ctf.scale_factor = 0.8f;
    stereo->parameters()->ctf.iters = 30;
    stereo->parameters()->ctf.warps  = 5;
    stereo->parameters()->ctf.apply_median_filter = true;
    stereo->parameters()->lambda = 20;

    stereo->addImage(im0);
    stereo->addImage(im1);

    imp::cu::ImageGpu32fC1::Ptr cu_mu = std::make_shared<imp::cu::ImageGpu32fC1>(im0->size());
    imp::cu::ImageGpu32fC1::Ptr cu_sigma2 = std::make_shared<imp::cu::ImageGpu32fC1>(im0->size());
    cu_mu->setValue(-5.f);
    cu_sigma2->setValue(0.0f);

    stereo->setFundamentalMatrix(F_mov_fix);
    stereo->setIntrinsics({cu_cam, cu_cam});
    stereo->setExtrinsics(T_mov_fix);
    stereo->setDepthProposal(cu_mu, cu_sigma2);
    
    stereo->solve();

    std::shared_ptr<imp::cu::ImageGpu32fC1> d_disp = stereo->getDisparities();


    {
      imp::Pixel32fC1 min_val,max_val;
      imp::cu::minMax(*d_disp, min_val, max_val);
      std::cout << "disp: min: " << min_val.x << " max: " << max_val.x << std::endl;
    }

    imp::cu::cvBridgeShow("im0", *im0);
    imp::cu::cvBridgeShow("im1", *im1);
//    *d_disp *= -1;
    {
      imp::Pixel32fC1 min_val,max_val;
      imp::cu::minMax(*d_disp, min_val, max_val);
      std::cout << "disp: min: " << min_val.x << " max: " << max_val.x << std::endl;
    }

    imp::cu::cvBridgeShow("disparities", *d_disp, -18.0f, 11.0f);
    imp::cu::cvBridgeShow("disparities minmax", *d_disp, true);
    cv::waitKey();
  }
  catch (std::exception& e)
  {
    std::cout << "[exception] " << e.what() << std::endl;
    assert(false);
  }

  return EXIT_SUCCESS;

}
      Eigen::Matrix3d get()
      {
	Eigen::Matrix3d interim = worker.get();
	Eigen::Matrix3d retval = interim.transpose();
	return retval;
      }