示例#1
0
Foam::triad::triad(const quaternion& q)
{
    tensor Rt(q.R().T());
    x() = Rt.x();
    y() = Rt.y();
    z() = Rt.z();
}
示例#2
0
vctFrame4x4<double>
robSAHThumb::ForwardKinematics( const vctFixedSizeVector<double,4>& q, 
				robSAHThumb::Phalanx phalanx ) const {

  switch( phalanx ){
  case robSAHThumb::BASE:
    return Rtw0;

  case robSAHThumb::METACARPUS:
    {

      vctMatrixRotation3<double> Rb0;
      vctFixedSizeVector<double,3> tb0( -3.0/1000.0, 27.1/1000.0, 0.0 );
      vctFrame4x4<double> Rtb0(Rb0,tb0);

      vctMatrixRotation3<double> Rb1( 0.00000, -1,  0.00000,
				      0.57358,  0, -0.81915,
				      0.81915,  0,  0.57358,
				      VCT_NORMALIZE );
      vctFixedSizeVector<double,3> tb1( -9.0/1000.0, 114.0/1000.0, 97.0/1000.0);
      vctFrame4x4<double> Rtb1( Rb1, tb1 );

      vctFrame4x4<double> Rt0b( Rtb0 );
      Rt0b.InverseSelf();

      vctFrame4x4<double> Rt01 = Rt0b * Rtb1;

      
      vctMatrixRotation3<double> R( cos( -q[0] ), -sin( -q[0] ), 0.0,
				    sin( -q[0] ),  cos( -q[0] ), 0.0,
				    0.0,           0.0,          1.0,
				    VCT_NORMALIZE );
      vctFixedSizeVector<double,3> t(0.0);
      vctFrame4x4<double> Rt( R, t );
      
      return ForwardKinematics( q, robSAHThumb::BASE ) * Rtb0 * Rt * Rt01;

    }

  case robSAHThumb::MCP:
    {
      return ( ForwardKinematics( q, robSAHThumb::METACARPUS ) *
	       links[0].ForwardKinematics( q[1] ) );
    }

  case robSAHThumb::PROXIMAL:
    return ( ForwardKinematics( q, robSAHThumb::MCP ) *
	     links[1].ForwardKinematics( q[2] ) );

  case robSAHThumb::INTERMEDIATE:
    return ( ForwardKinematics( q, robSAHThumb::PROXIMAL ) * 
	     links[2].ForwardKinematics( q[3] ) );

  case robSAHThumb::DISTAL:
    return ( ForwardKinematics( q, robSAHThumb::INTERMEDIATE ) * 
	     links[3].ForwardKinematics( q[3] ) );
  }

}
::CORBA::Boolean ForwardKinematics::getRelativeCurrentPosition(const char* linknameFrom, const char* linknameTo, const OpenHRP::ForwardKinematicsService::position target, OpenHRP::ForwardKinematicsService::position result)
{
    Guard guard(m_bodyMutex);
    hrp::Link *from = m_actBody->link(linknameFrom);
    hrp::Link *to = m_actBody->link(linknameTo);
    if (!from || !to) return false;
    hrp::Vector3 targetPrel(target[0], target[1], target[2]);
    hrp::Vector3 targetPabs(to->p+to->attitude()*targetPrel);
    hrp::Matrix33 Rt(from->attitude().transpose());
    hrp::Vector3 p(Rt*(targetPabs - from->p));
    result[ 0]=p(0);result[ 1]=p(1);result[ 2]=p(2);
    return true;
}
示例#4
0
//------------------------------------------------------------------------------
void Triangulation::stereo_triangulation(const double xL[2], const double xR[2],
										 const cv::Mat& R, const cv::Mat& T, const double fc_left[2], const double cc_left[2],
										 const double kc_left[5], const double fc_right[2], const double cc_right[2],
										 const double kc_right[5], double XW[3], const bool undistortPoint)
{
	// Normalize the image projection according to the intrinsic parameters of the left
	// and right cameras.
	double xt_[2], xtt_[2];
	normalize_pixel(xL, fc_left, cc_left, kc_left, xt_, undistortPoint);
	normalize_pixel(xR, fc_right, cc_right, kc_right, xtt_, undistortPoint);
	
	// Extend the normalized projections in homogeneous coordinates
	cv::Mat xt(3, 1, CV_64FC1);
	cv::Mat xtt(3, 1, CV_64FC1);
	xt.at<double>(0,0) = xt_[0];  xt.at<double>(1,0) = xt_[1];  xt.at<double>(2,0) = 1.0;
	xtt.at<double>(0,0) = xtt_[0];  xtt.at<double>(1,0) = xtt_[1];  xtt.at<double>(2,0) = 1.0;
	
	// Triangulation of the rays in 3D space:
	cv::Mat u = R * xt;
	double n_xt2 = xt.dot(xt);
	double n_xtt2 = xtt.dot(xtt);
	
	double dot_uxtt = u.dot(xtt);
	double DD = n_xt2 * n_xtt2 - dot_uxtt*dot_uxtt;
	
	double dot_uT = u.dot(T);
	double dot_xttT = xtt.dot(T);
	double dot_xttu = u.dot(xtt);
	
	double NN1 = dot_xttu*dot_xttT - n_xtt2 * dot_uT;
	double NN2 = n_xt2*dot_xttT - dot_uT*dot_xttu;
	
	double Zt = NN1/DD;
	double Ztt = NN2/DD;
	
	cv::Mat X1 = xt * Zt;
	
	// Tranpose of rotation matrix
	cv::Mat Rt(3, 3, CV_64FC1);
	cv::transpose(R, Rt);
	
	cv::Mat X2 = Rt * (xtt*Ztt - T);
	
	cv::Mat Xworld = 0.5 * (X1 + X2);
	XW[0] = Xworld.at<double>(0,0);
	XW[1] = Xworld.at<double>(1,0);
	XW[2] = Xworld.at<double>(2,0);
}
示例#5
0
void triangulate_init( cv::Mat R, cv::Mat t,const std::vector< cv::Point2f > points_1,
        const std::vector< cv::Point2f > points_2,cv::Mat& points4D,std::vector< cv::Point2f > mask3D[]){

    cv::Mat projMatr(3,4,CV_32F);

    cv::Mat Rt(3,4,CV_32F);  // Rt = [R | t]
    R.convertTo(R,CV_32F);
    t.convertTo(t,CV_32F);
    hconcat(R, t, Rt);  // Rt concatenate
    projMatr = camIntrinsic * Rt;

    points4D=cv::Mat_<float>(4,points_1.size());
    cv::triangulatePoints(projMatr0, projMatr, points_1, points_2, points4D);
    cv::Point2f x(-1,-1);
    for(int i = 0;i<points4D.cols;i++){
        mask3D[0].push_back(points_1[i]);
        mask3D[1].push_back(points_2[i]);
        for (int j=2;j<m;j++)
            mask3D[j].push_back(x);
    }

}
示例#6
0
tmp<volScalarField> gammaSST<BasicTurbulenceModel>::Fonset(const volScalarField& S) const
{
    return tmp<volScalarField>
	(
	    new volScalarField
     	    (
                IOobject
       	        (
	            "Fonset",
    	            this->runTime_.timeName(),
	            this->mesh_,
	            IOobject::NO_READ,
	            IOobject::NO_WRITE
	        ),
		max
		(
                    min(Fonset1(S), 2.0) - max(1.0-pow3(Rt()/3.5),0.0),
                    0.0
		)
   	    )
	);
}
示例#7
0
void frameSolver2d::computeStiffnessMatrix(int iBeam, fullMatrix<double> &K)
{
  const gmshBeam2d &b = _beams[iBeam];
  const double BS = b._e * b._i / (b._l * b._l * b._l);
  const double TS = b._e * b._a / b._l;
  const MVertex *v1 = b._element->getVertex(0);
  const MVertex *v2 = b._element->getVertex(1);
  const double alpha = atan2(v2->y() - v1->y(), v2->x() - v1->x());
  const double C = cos(alpha);
  const double S = sin(alpha);

  printf("beam %d %g %g %g\n", iBeam, alpha, C, S);

  fullMatrix<double> R(6, 6);
  R(0, 0) = R(1, 1) = R(3, 3) = R(4, 4) = C;
  R(0, 1) = R(3, 4) = S;
  R(1, 0) = R(4, 3) = -S;
  R(2, 2) = R(5, 5) = 1.0;

  fullMatrix<double> k(6, 6);

  // tensile stiffness
  k(0, 0) = k(3, 3) = TS;
  k(0, 3) = k(3, 0) = -TS;
  // bending stiffness
  k(1, 1) = k(4, 4) = 12 * BS;
  k(2, 2) = k(5, 5) = 4. * BS * b._l * b._l;
  k(1, 2) = k(2, 1) = k(1, 5) = k(5, 1) = 6 * BS * b._l;
  k(4, 2) = k(2, 4) = k(4, 5) = k(5, 4) = -6 * BS * b._l;
  k(4, 1) = k(1, 4) = -12 * BS;
  k(5, 2) = k(2, 5) = -2 * BS * b._l * b._l;

  fullMatrix<double> Rt(R), temp(6, 6);
  Rt.transposeInPlace();
  Rt.mult(k, temp);
  temp.mult(R, K);
}
示例#8
0
文件: spe.hpp 项目: cwidmer/shogun
EmbeddingResult spe_embedding(RandomAccessIterator begin, RandomAccessIterator end,
		PairwiseCallback callback, const Neighbors& neighbors,
		unsigned int target_dimension, bool global_strategy,
		DefaultScalarType tolerance, int nupdates)
{
	timed_context context("SPE embedding computation");
	unsigned int k = 0;
	if (!global_strategy)
		k = neighbors[0].size();

	// The number of data points
	int N = end-begin;
	while (nupdates > N/2)
		nupdates = N/2;

	// Look for the maximum distance
	DefaultScalarType max = 0.0;
	for (RandomAccessIterator i_iter=begin; i_iter!=end; ++i_iter)
	{
		for (RandomAccessIterator j_iter=i_iter+1; j_iter!=end; ++j_iter)
		{
			max = std::max(max, callback(*i_iter,*j_iter));
		}
	}

	// Distances normalizer used in global strategy
	DefaultScalarType alpha = 0.0;
	if (global_strategy)
		alpha = 1.0 / max * std::sqrt(2.0);

	// Random embedding initialization, Y is the short for embedding_feature_matrix
	std::srand(std::time(0));
	DenseMatrix Y = (DenseMatrix::Random(target_dimension,N)
		       + DenseMatrix::Ones(target_dimension,N)) / 2;
	// Auxiliary diffference embedding feature matrix
	DenseMatrix Yd(target_dimension,nupdates);

	// SPE's main loop
	
	typedef std::vector<int> Indices;
	typedef std::vector<int>::iterator IndexIterator;

	// Maximum number of iterations
	int max_iter = 2000 + round(0.04 * N*N);
	if (!global_strategy)
		max_iter *= 3;
	// Learning parameter
	DefaultScalarType lambda = 1.0;
	// Vector of indices used for shuffling
	Indices indices(N);
	for (int i=0; i<N; ++i)
		indices[i] = i;
	// Vector with distances in the original space of the points to update
	DenseVector Rt(nupdates);
	DenseVector scale(nupdates);
	DenseVector D(nupdates);
	// Pointers to the indices of the elements to update
	IndexIterator ind1;
	IndexIterator ind2;
	// Helper used in local strategy
	Indices ind1Neighbors;
	if (!global_strategy)
		ind1Neighbors.resize(k*nupdates);

	for (int i=0; i<max_iter; ++i)
	{
		// Shuffle to select the vectors to update in this iteration
		std::random_shuffle(indices.begin(),indices.end());

		ind1 = indices.begin();
		ind2 = indices.begin()+nupdates;

		// With local strategy, the seecond set of indices is selected among
		// neighbors of the first set
		if (!global_strategy)
		{
			// Neighbors of interest
			for(int j=0; j<nupdates; ++j)
			{
				const LocalNeighbors& current_neighbors =
					neighbors[*ind1++];

				for(unsigned int kk=0; kk<k; ++kk)
					ind1Neighbors[kk + j*k] = current_neighbors[kk];
			}
			// Restore ind1
			ind1 = indices.begin();

			// Generate pseudo-random indices and select final indices
			for(int j=0; j<nupdates; ++j)
			{
				unsigned int r = round( std::rand()*1.0/RAND_MAX*(k-1) ) + k*j;
				indices[nupdates+j] = ind1Neighbors[r];
			}
		}


		// Compute distances between the selected points in the embedded space
		for(int j=0; j<nupdates; ++j)
		{
			//FIXME it seems that here Euclidean distance is forced
			D[j] = (Y.col(*ind1) - Y.col(*ind2)).norm();
			++ind1, ++ind2;
		}

		// Get the corresponding distances in the original space
		if (global_strategy)
			Rt.fill(alpha);
		else // local_strategy
			Rt.fill(1);

		ind1 = indices.begin();
		ind2 = indices.begin()+nupdates;
		for (int j=0; j<nupdates; ++j)
			Rt[j] *= callback(*(begin + *ind1++), *(begin + *ind2++));

		// Compute some terms for update

		// Scale factor
		D.array() += tolerance;
		scale = (Rt-D).cwiseQuotient(D);

		ind1 = indices.begin();
		ind2 = indices.begin()+nupdates;
		// Difference matrix
		for (int j=0; j<nupdates; ++j)
		{
			Yd.col(j).noalias() = Y.col(*ind1) - Y.col(*ind2);

			++ind1, ++ind2;
		}

		ind1 = indices.begin();
		ind2 = indices.begin()+nupdates;
		// Update the location of the vectors in the embedded space
		for (int j=0; j<nupdates; ++j)
		{
			Y.col(*ind1) += lambda / 2 * scale[j] * Yd.col(j);
			Y.col(*ind2) -= lambda / 2 * scale[j] * Yd.col(j);

			++ind1, ++ind2;
		}

		// Update the learning parameter
		lambda = lambda - ( lambda / max_iter );
	}

	return EmbeddingResult(Y.transpose(),DenseVector());
};
int main(){

  cmnLogger::SetMask( CMN_LOG_ALLOW_ALL );
  cmnLogger::SetMaskFunction( CMN_LOG_ALLOW_ALL );
  cmnLogger::SetMaskDefaultLog( CMN_LOG_ALLOW_ALL );

  osg::ref_ptr< osaOSGWorld > world = new osaOSGWorld;

  // Create a camera
  int x = 0, y = 0;
  int width = 320, height = 240;
  double Znear = 0.1, Zfar = 10.0;
  osg::ref_ptr< osaOSGCamera > camera;
  camera = new osaOSGStereo( world,
			     x, y, width, height,
			     55.0, ((double)width)/((double)height),
			     Znear, Zfar,
			     0.10,
			     false );
  camera->Initialize( std::string( "Stereo-" ) );

  // Create the objects
  cmnPath path;
  path.AddRelativeToCisstShare("/models");
  path.AddRelativeToCisstShare("/models/hubble");

  vctFrame4x4<double> Rt(  vctMatrixRotation3<double>(),
			   vctFixedSizeVector<double,3>( 0.0, 0.0, 0.5 ) );

  osg::ref_ptr< osaOSGBody > hubble;
  hubble = new osaOSGBody( path.Find("hst.3ds"), world, Rt );

  osg::ref_ptr< osaOSGBody > background;
  background = new osaOSGBody( path.Find("background.3ds"), world,
			       vctFrame4x4<double>() );

  std::cout << "ESC to quit" << std::endl;

  // animate and render
  double theta=1.0;
  while( !camera->done() ){

    // rotate hubble
    vctFixedSizeVector<double,3> u( 0.0, 0.0, 1.0 );
    vctAxisAngleRotation3<double> Rwh( u, theta );
    vctFrame4x4<double> Rtwh(  Rwh,
			       vctFixedSizeVector<double,3>( 0.0, 0.0, 0.5 ) );
    hubble->SetTransform( Rtwh );

    // zoom out the camera
    vctFrame4x4<double> Rtwc( vctMatrixRotation3<double>(),
			      vctFixedSizeVector<double,3>(0.0, 0.0, theta ) );
    camera->SetTransform( Rtwc );

    camera->frame();

    theta += 0.001;

  }

  return 0;

}
void Calib::computeProjectionMatrix()
{
    std::cout << "1111111111***********************************************************************************\n";
//    for(int i = 0;i < imagePoints.size();i++)
//    {
//        std::cout<<" imagePoints[i]= "<< imagePoints[i]<<std::endl;
//        std::cout<<" objectPoints[i]= "<< objectPoints[i]<<std::endl;
//    }

    if (objectPoints.size() != imagePoints.size())
        return;

    int n = objectPoints.size();

    Eigen::Vector2d avg2;
    Eigen::Vector3d avg3;

    for (unsigned int i = 0; i < n; i++)
    {
        avg2 += imagePoints[i];
        avg3 += objectPoints[i];
        std::cout << i << " " << objectPoints[i](0) << " " << objectPoints[i](1) << " " << objectPoints[i](2) << std::endl;
    }
    avg2 = avg2 / n;
    avg3 = avg3 / n;

    std::cout << "avg2 = " << avg2 << std::endl;
    std::cout << "avg3 = " << avg3 << std::endl;

    /* *******************************************************************************
  *  Data normalization
  *  Translates and normalises a set of 2D homogeneous points so that their centroid is at the origin and their mean distance from the origin is sqrt(2).
  */
    float meandist2 = 0;
    float meandist3 = 0;

    imagePoints_t.resize(n);
    objectPoints_t.resize(n);

    for (unsigned int i = 0; i < n; i++)
    {
        imagePoints_t[i] = imagePoints[i] - avg2;
        objectPoints_t[i] = objectPoints[i] - avg3;
//        std::cout << "1 imagePoints_t[i] = " << imagePoints_t[i] << std::endl;
//        std::cout << "1 objectPoints_t[i] = " << objectPoints_t[i] << std::endl;

        meandist2 += sqrt(imagePoints_t[i](0) * imagePoints_t[i](0) + imagePoints_t[i](1) * imagePoints_t[i](1));
        meandist3 += sqrt(objectPoints_t[i](0) * objectPoints_t[i](0) + objectPoints_t[i](1) * objectPoints_t[i](1)
                          + objectPoints_t[i](2) * objectPoints_t[i](2));
    }
    meandist2 /= n;
    meandist3 /= n;

    std::cout << "meandist2 = " << meandist2 << std::endl;
    std::cout << "meandist3 = " << meandist3 << std::endl;

    float scale2 = sqrt(2) / meandist2;
    float scale3 = sqrt(3) / meandist3;


    std::cout << "2222222222222***********************************************************************************\n";
    for (unsigned int i = 0; i < n; i++)
    {
        imagePoints_t[i] = scale2 * imagePoints_t[i];
        objectPoints_t[i] = scale3 * objectPoints_t[i];

//        std::cout << "imagePoints_t[i] = " << imagePoints_t[i] << std::endl;
//        std::cout << "objectPoints_t[i] = " << objectPoints_t[i] << std::endl;

    }

    //    std::cout<<avg3<<std::endl;
    /* *******************************************************************************
  * Similarity transformation T1 to normalize the image points,
  * and a second similarity transformation T2 to normalize the space points.
  * Page 181 in Multiple_View_Geometry_in_Computer_Vision__2nd_Edition
  */
    Eigen::Matrix3d T1;
    T1 << scale2, 0, -scale2 * avg2(0), 0, scale2, -scale2 * avg2(1), 0, 0, 1;

    Eigen::MatrixXd T2(4, 4);
    T2 << scale3, 0, 0, -scale3 * avg3(0), 0, scale3, 0, -scale3 * avg3(1), 0, 0, scale3, -scale3 * avg3(2), 0, 0, 0, 1;


    // use Eigen
    Eigen::MatrixXd A(2 * n, 12);
    A.setZero(2 * n, 12);

    for (int i = 0; i < n; i++)
    {

        A(2 * i, 0) = objectPoints_t[i](0);
        A(2 * i, 1) = objectPoints_t[i](1);
        A(2 * i, 2) = objectPoints_t[i](2);
        A(2 * i, 3) = 1;
        A(2 * i, 4) = 0;
        A(2 * i, 5) = 0;
        A(2 * i, 6) = 0;
        A(2 * i, 7) = 0;
        A(2 * i, 8) = -imagePoints_t[i](0) * objectPoints_t[i](0);
        A(2 * i, 9) = -imagePoints_t[i](0) * objectPoints_t[i](1);
        A(2 * i, 10) = -imagePoints_t[i](0) * objectPoints_t[i](2);
        A(2 * i, 11) = -imagePoints_t[i](0) * 1;

        A(2 * i + 1, 0) = 0;
        A(2 * i + 1, 1) = 0;
        A(2 * i + 1, 2) = 0;
        A(2 * i + 1, 3) = 0;
        A(2 * i + 1, 4) = 1 * objectPoints_t[i](0);
        A(2 * i + 1, 5) = 1 * objectPoints_t[i](1);
        A(2 * i + 1, 6) = 1 * objectPoints_t[i](2);
        A(2 * i + 1, 7) = 1;
        A(2 * i + 1, 8) = -imagePoints_t[i](1) * objectPoints_t[i](0);
        A(2 * i + 1, 9) = -imagePoints_t[i](1) * objectPoints_t[i](1);
        A(2 * i + 1, 10) = -imagePoints_t[i](1) * objectPoints_t[i](2);
        A(2 * i + 1, 11) = -imagePoints_t[i](1) * 1;

    }

    Eigen::JacobiSVD<Eigen::MatrixXd> svd(A, Eigen::ComputeFullU | Eigen::ComputeFullV);
    const Eigen::VectorXd & v1 = svd.matrixV().col(11);

    this->Pt << v1(0), v1(1), v1(2), v1(3), v1(4), v1(5), v1(6), v1(7), v1(8), v1(9), v1(10), v1(11);
//    std::cout<<"A= \n"<< A<<std::endl;
//    std::cout<<Pt<<std::endl;
    P = T1.inverse() * Pt * T2;


    //Decompose the projection matrix
    cv::Mat Pr(3, 4, cv::DataType<float>::type);
    cv::Mat Mt(3, 3, cv::DataType<float>::type);
    cv::Mat Rt(3, 3, cv::DataType<float>::type);
    cv::Mat Tt(4, 1, cv::DataType<float>::type);

    for (unsigned i = 0; i < 3; i++)
    {
        for (unsigned int j = 0; j < 4; j++)
        {
            Pr.at<float> (i, j) = P(i, j);
        }
    }
    cv::decomposeProjectionMatrix(Pr, Mt, Rt, Tt);

    //scale: Mt(2,2) should = 1; so update the projection matrix and decomposition again
    float k = (1 / (Mt.at<float> (2, 2)));

    /* ****************************************************************************************
      * Upate the projection matrix
      * Decomposition again to get new intrinsic matrix and rotation matrix
      */
    this->P = k * P;

    cv::Mat Pro(3, 4, cv::DataType<float>::type);
    cv::Mat Mc(3, 3, cv::DataType<float>::type); // intrinsic parameter matrix
    cv::Mat Rc(3, 3, cv::DataType<float>::type); // rotation matrix
    cv::Mat Tc(4, 1, cv::DataType<float>::type); // translation vector

    for (unsigned i = 0; i < 3; i++)
    {
        for (unsigned int j = 0; j < 4; j++)
        {
            Pro.at<float> (i, j) = P(i, j);
        }
    }
    cv::decomposeProjectionMatrix(Pro, Mc, Rc, Tc);

    // Change from OpenCV varibles to Eigen
    for (unsigned i = 0; i < 3; i++)
    {
        for (unsigned int j = 0; j < 3; j++)
        {
            M(i, j) = Mc.at<float> (i, j) ;
        }
    }


    /* ****************************************************************************************
      * Compute te rotation matrix R and translation vector T
      */
    P_temp = M.inverse() * P;
    this->R = this->P_temp.block(0,0,3,3);
    this->T = this->P_temp.block(0,3,3,1);


    std::cout << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n";
//    std::cout << "T1 =\n " << T2 << std::endl;
//    std::cout << "T2 =\n " << T1 << std::endl;
//    std::cout << "A =\n " << A << std::endl;
//    std::cout << "svd.matrixV() =\n " << svd.matrixV() << std::endl;
//    std::cout << "Pt =\n " << Pt << std::endl;
    std::cout << "P =\n " << P << std::endl;
    std::cout << "M =\n " << M << std::endl;
    std::cout << "R =\n " << R << std::endl;
    std::cout << "Rc =\n " << Rc << std::endl;
    std::cout << "T =\n " << T << std::endl;
    std::cout << "Mt(2,2) = " << Mt.at<float> (2, 2) << std::endl;

}
示例#11
0
void add_Points( cv::Mat R[],cv::Mat t[],const std::vector< cv::Point2f > points_1[],
        const std::vector< cv::Point2f > points_2[],cv::Mat& points3D,const int add,std::vector< cv::Point2f > mask3D[],cv::Mat& img_matches){

    // obtain mask on new matches by two-view constraint
    cv::Mat mask;
    findEssentialMat(points_1[add-1], points_2[add-1], camIntrinsic, cv::RANSAC, 0.999, 0.25, mask);
    std::vector< cv::Point2f > pointsx,pointsComparex;
    for(int i=0;i<points_1[add-1].size();i++){
        if (mask.at<uchar>(i,0) != 0){
            pointsx.push_back(points_1[add-1][i]);
            pointsComparex.push_back(points_2[add-1][i]);
        }
    }
    std::cout<<"mask result: "<<points_1[add-1].size()<<"	"<<pointsx.size()<<std::endl;

    
    // draw red circles on new points, blue circles on points masked out
    for (int j=0; j<points_1[add-1].size();j++){
        if (mask.at<uchar>(j,0) != 0)
            circle(img_matches, points_1[add-1][j], 10, cv::Scalar(0,0,255), 3);
        else
            circle(img_matches, points_1[add-1][j], 10, cv::Scalar(255,0,0), 3);
    }
    cv::namedWindow("Matches", CV_WINDOW_NORMAL);
    cv::imshow("Matches", img_matches);
    cv::waitKey();
       
    // form projection matrix of the 2nd last camera
    cv::Mat projMatr1(3,4,CV_32F);
    cv::Mat Rt1(3,4,CV_32F);  // Rt = [R | t]
    R[add-1].convertTo(R[add-1],CV_32F);
    t[add-1].convertTo(t[add-1],CV_32F);
    hconcat(R[add-1], t[add-1], Rt1);  // Rt concatenate
    projMatr1 = camIntrinsic * Rt1;
    // form projection matrix of the last camera
    cv::Mat projMatr2(3,4,CV_32F);
    cv::Mat Rt(3,4,CV_32F);  // Rt = [R | t]
    R[add].convertTo(R[add],CV_32F);
    t[add].convertTo(t[add],CV_32F);
    hconcat(R[add], t[add], Rt);  // Rt concatenate
    projMatr2 = camIntrinsic * Rt;

    // triangulation on the masked matches
    cv::Mat points4Dtemp=cv::Mat_<float>(4,points_1[add-1].size());
    cv::triangulatePoints(projMatr1, projMatr2, pointsx, pointsComparex, points4Dtemp);
    
    // fill in new parts of mask3D with 2D points
    cv::Point2f x(-1,-1);
    for(int i = 0;i<points4Dtemp.cols;i++){
        for (int j=0;j<m;j++)
        {
            if (j == (add-1))
                mask3D[j].push_back(pointsx[i]);
            else if (j == add)
                mask3D[j].push_back(pointsComparex[i]);
            else
                mask3D[j].push_back(x);
        }
    }

    cv::Mat points3Dtemp(3,pointsx.size(),CV_32F);
    for (int i=0; i<pointsx.size(); i++)
    {
        float x = points4Dtemp.at<float>(3,i);
        points3Dtemp.at<float>(0,i) = points4Dtemp.at<float>(0,i) / x;
        points3Dtemp.at<float>(1,i) = points4Dtemp.at<float>(1,i) / x;
        points3Dtemp.at<float>(2,i) = points4Dtemp.at<float>(2,i) / x;
    }
    hconcat(points3D,points3Dtemp,points3D);
    n = points3D.cols;
}
示例#12
0
void Assembler::tbnz(Instruction* at, const Register& rt, unsigned bit_pos, int imm14) {
  VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
  EmitBranch(at, TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
}
示例#13
0
void PnP(const std::vector< cv::DMatch > good_matches[],const int add,const std::vector<cv::KeyPoint> keypoints[],
        cv::Mat R[], cv::Mat t[],std::vector< cv::Point2f > points_1[], std::vector< cv::Point2f > points_2[],std::vector< cv::Point2f > mask3D[],cv::Mat& img_matches){

    std::vector<int> indicator(5,-1);
    std::vector<cv::Point2f> points1,points2,points3;

    // image 1 & 2
    // write in indicator for last 2 images
    for(int i = 0; i < good_matches[add-2].size();i++){
        if(good_matches[add-2][i].trainIdx>=indicator.size()){
            indicator.resize(good_matches[add-2][i].trainIdx+1,-1);
            indicator[good_matches[add-2][i].trainIdx] = i;
        }
        else{
            indicator[good_matches[add-2][i].trainIdx] = i;
        }
    }

    // images 2 & 3
    std::cout<< "number of matches between "<<add+1<<" pic and last pic: "<<points_1[add-1].size()<<std::endl;   // how many matches initially

    // assign points1, points2, points3; find common matches and update mask3d and points_2
    for(int i = 0; i < good_matches[add-1].size();i++){

        if(good_matches[add-1][i].queryIdx<indicator.size()){
            int ind = good_matches[add-1][i].queryIdx;

            if(indicator[ind]!=-1){

                cv::Point2f temppoint = keypoints[add-2][good_matches[add-2][indicator[ind]].queryIdx].pt ;
                points1.push_back(temppoint);

                temppoint =keypoints[add-1][good_matches[add-2][indicator[ind]].trainIdx].pt;
                std::vector<cv::Point2f>::iterator p = std::find(points_1[add-1].begin(), points_1[add-1].end(), temppoint);
                std::vector<cv::Point2f>::iterator _p = std::find(mask3D[add-1].begin(), mask3D[add-1].end(), temppoint);
                points2.push_back(temppoint);

                temppoint = keypoints[add][good_matches[add-1][i].trainIdx].pt;
                std::vector<cv::Point2f>::iterator t = std::find(points_2[add-1].begin(), points_2[add-1].end(), temppoint);
                points3.push_back(temppoint);

                // update mask3d info 3rd cam
                if(_p!=mask3D[add-1].end()){
                    int nPosition = distance (mask3D[add-1].begin(), _p);
                    mask3D[add][nPosition] = temppoint;
                }

                // delete common matches across 3 pics from points_1 and points_2
                if(p!=points_1[add-1].end()){
                    int nPosition = distance (points_1[add-1].begin(), p);
                    points_1[add-1].erase(p);
                    points_2[add-1].erase(points_2[add-1].begin()+nPosition);
                }
                //if(t!=points_2[add-1].end()){
                //	points_2[add-1].erase(t);
                //}
            }
        }
    }


/*
    for (int j=0; j<points_1[add-1].size();j++){

        circle(img_matches, points_1[add-1][j], 10, cv::Scalar(0,0,255), 3);
    }

    cv::namedWindow("Matches", CV_WINDOW_NORMAL);
    cv::imshow("Matches", img_matches);
    cv::waitKey();
*/



    std::cout<<"number of common matches found across 3 pics: "<<points1.size()<<std::endl;
    std::cout<<"number of matches between "<<add+1<<" pic and last pic after deduction: "<<points_1[add-1].size()<<"	"<<points_2[add-1].size()<<std::endl;

    // reconstruct 3d points of common matches from last pics for pnp

    cv::Mat projMatr1(3,4,CV_32F);
    if(add==2){
        projMatr1=projMatr0;
    }
    else{
        cv::Mat Rt1(3,4,CV_32F);  // Rt = [R | t]
        R[add-2].convertTo(R[add-2],CV_32F);
        t[add-2].convertTo(t[add-2],CV_32F);
        hconcat(R[add-2], t[add-2], Rt1);  // Rt concatenate

        projMatr1 = camIntrinsic * Rt1;
    }
    cv::Mat projMatr2(3,4,CV_32F);
    cv::Mat Rt(3,4,CV_32F);  // Rt = [R | t]
    R[add-1].convertTo(R[add-1],CV_32F);
    t[add-1].convertTo(t[add-1],CV_32F);
    hconcat(R[add-1], t[add-1], Rt);  // Rt concatenate
    projMatr2 = camIntrinsic * Rt;
    //	        std::cout<<Rt<<std::endl;

    cv::Mat points4Dtemp=cv::Mat_<float>(4,points1.size());

    /*
       cv::Mat mask1,mask2;
       cv::findEssentialMat(points1, points2, camIntrinsic, cv::RANSAC, 0.999, 1.0, mask1);
       std::vector< cv::Point2f > points1x, points2x, points3x;
       for(int i=0;i<points1.size();i++){
       if (mask1.at<uchar>(i,0) != 0){
       points1x.push_back(points1[i]);
       points2x.push_back(points2[i]);
       points3x.push_back(points3[i]);
       }
       }

       cv::findEssentialMat(points2x, points3x, camIntrinsic, cv::RANSAC, 0.999, 1.0, mask2);
       std::vector< cv::Point2f > points1xx, points2xx, points3xx;
       for(int i=0;i<points1x.size();i++){
       if (mask2.at<uchar>(i,0) != 0){
       points1xx.push_back(points1x[i]);
       points2xx.push_back(points2x[i]);
       points3xx.push_back(points3x[i]);
       }
       }

       std::cout<<"number of common matches after ransac: "<<points1xx.size()<<std::endl;
       */


    cv::triangulatePoints(projMatr1, projMatr2, points1, points2, points4Dtemp);
    std::vector<cv::Point3f> points3Dtemp;
    for (int i=0; i < points1.size(); i++)
    {
        float x = points4Dtemp.at<float>(3,i);
        cv::Point3f p;
        p.x = points4Dtemp.at<float>(0,i) / x;
        p.y = points4Dtemp.at<float>(1,i) / x;
        p.z = points4Dtemp.at<float>(2,i) / x;
        points3Dtemp.push_back(p);
    }

    //std::cout<<points3Dtemp.rows<<"		"<<std::endl;
    //std::cout<<"6666666666666666666666666666666"<<std::endl;

    // PnP to get R and t of current pic

    cv::Mat Rv;
    cv::Mat inlier;
    solvePnPRansac(points3Dtemp,points3,camIntrinsic,dist,Rv,t[add],false,3,5,0.99,inlier,CV_ITERATIVE);
    int n_inl = countNonZero(inlier);
    std::cout<<"number of inliers in PnP: "<<n_inl<<std::endl;

    Rodrigues(Rv,R[add]);

    //std::cout<<"5555555555555555555555555555555"<<std::endl;
    //	        std::cout<<"R after pnp: "<<R[add]<<std::endl;
    //	        std::cout<<"t after pnp: "<<t[add-1]<<std::endl;

}
示例#14
0
void Assembler::cbnz(const Register& rt, int imm19) {
  EmitBranch(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
}
示例#15
0
tmp<volScalarField> gammaSST<BasicTurbulenceModel>::Fturb() const
{
    return exp(-pow4(Rt()/2));
}
示例#16
0
int main(int argc, char* argv[])
{
	std::string ymlfile;
		//Parse Input 1
	if(argc>1)
		ymlfile.assign(argv[1]);
	else
		ymlfile.assign("wiiCalib.yml");

	cv::Mat intrinsic;
	cv::Mat distcoeff;

	{
	cv::FileStorage fs;
	try{
		fs.open(ymlfile, cv::FileStorage::READ);	
		fs["camera_matrix"] >> intrinsic;
		fs["distortion_coefficients"] >> distcoeff;
	}
	catch(cv::Exception& e){
		std::cout << e.msg << std::endl;
		return 1;
	}
	}

	upmc::wiiPoseSolver wii;
	wii.load(ymlfile);

    //Simulate Rotation and Tranlsation
    
    //ROTATION
  cv::Mat Rt(3,4,CV_64F,cv::Scalar(0));

  Rt( cv::Range::all(), cv::Range(0,3) ) 
    = rotox(deg2rad(10))*rotoy(deg2rad(0))*rotoz(deg2rad(10));

    //TRANSLATION
    double T[]={0.25, -0.25, 1.75};
    
    cv::Mat tvecTest(3,1, CV_64F, T);
    
  Rt.at<double>(0,3) = tvecTest.at<double>(0);
  Rt.at<double>(1,3) = tvecTest.at<double>(1);
  Rt.at<double>(2,3) = tvecTest.at<double>(2);
    
    cv::Mat rvecTest;
    cv::Rodrigues(Rt, rvecTest);
    
  
    //DEFINE MODEL POINTS IN 3D
  std::vector<cv::Point3f> model_pts;
  model_pts.push_back(cv::Point3f(-0.25f, -.25f, 0.0f));
  model_pts.push_back(cv::Point3f(0.25f, -.25f, 0.0f));
  model_pts.push_back(cv::Point3f(.25f, .25f ,0.0f));
  model_pts.push_back(cv::Point3f(-.25, .25, 0.0f));
  cv::Mat M(model_pts);

    //CAMERA PROJECTION
    cv::Mat imagePoints;
    cv::projectPoints( M, rvecTest, tvecTest, intrinsic,  
                  distcoeff, imagePoints);
    
  //cv::Mat m = P*M_resh;
  std::cout << "Image points"<< std::endl << imagePoints << std::endl << "rows: " << imagePoints.rows << "\t" << "cols: " << imagePoints.cols << "\t"<<", channels "<< imagePoints.channels() << ", npoints= " << imagePoints.checkVector(3,CV_64F) <<" type: "<< imagePoints.type()<< std::endl;


  cv::Mat rvec, tvec;
      
//    cv::solvePnPRansac(M, imagePoints, intrinsic, distcoeff, rvec, tvec);
//    cv::Mat R_est;
//    cv::Rodrigues(rvec, R_est);
//    
//    cv::Mat difR = Rt( cv::Range::all(), cv::Range(0,3) ) - R_est;
//    
//    std::cout << "RANSAC: difR"<<  std::endl << difR << std::endl;
//    
//    float norm_difference =  cv::norm(difR);
//    std::cout << "t: " << tvec << std::endl;
//    std::cout << "difference: " << norm_difference << std::endl<< std::endl;

  
  cv::Mat wiiR;
  wii.solve(M, imagePoints, wiiR, tvec);

    cv::Mat difR = Rt( cv::Range::all(), cv::Range(0,3) ) - wiiR;

  std::cout << "PnP: difR"<<  std::endl << difR << std::endl;

    float norm_difference =  cv::norm(difR);  
    std::cout << "t: " << tvec << std::endl;
  std::cout << "difference: " << norm_difference << std::endl;

  std::cout << "Press Return to quit" << std::endl;
  getchar();
  
}
示例#17
0
文件: widget.cpp 项目: Kupchinsky/MV
void Widget::on_pushButtonLagra_clicked()
{
    ui->widget->clearGraphs();

    //
    Graphf* gf = new Graphf();
    gf->setStartX(ui->lineEditLagraStart->text().toDouble());
    gf->setFinishX(ui->lineEditLagraFinish->text().toDouble());
    gf->setPointsCount((gf->finishX - gf->startX) * 10 + 1);
    gf->setStepY((gf->finishX - gf->startX) / (gf->points_count - 1));
    gf->calcf();

    QCPGraph *graph = ui->widget->addGraph();
    graph->setData(gf->fX, gf->fY);
    graph->setPen(QPen(Qt::red, 1));
    graph->setName("f(x)");
    //

    //
    Lagra* l = new Lagra(ui->lineEditLagraPointsCount->text().toInt() + 1);
    l->calcF(gf);

    graph = ui->widget->addGraph();
    graph->setData(gf->fX, l->result);
    graph->setPen(QPen(Qt::blue, 1));
    graph->setName("L(x)");
    //

    //
    QVector<double> Rt(gf->points_count);
    for (int i = 0; i < Rt.size(); i++)
        Rt[i] = qFabs(gf->fY[i] - l->result[i]);

    qDebug() << Rt;
    graph = ui->widget->addGraph();
    graph->setData(gf->fX, Rt);
    graph->setPen(QPen(Qt::black, 1));
    graph->setName("R(x)");
    //

    //
    LagraRp* lrp = NULL;

    int unit_points = l->lX.size();
    if (unit_points - 1 == 5)
    {
        lrp = new LagraRp();
        lrp->calcRp(gf, l);

        graph = ui->widget->addGraph();
        graph->setData(gf->fX, lrp->result);
        graph->setPen(QPen(Qt::green, 1));
        graph->setName("Теоретическая погрешность");
    }
    //

    ui->widget->xAxis->setRange(gf->startX, gf->finishX);
    ui->widget->yAxis->setRange(0, 10);
    ui->widget->replot(); /* Рисуем */

    if (lrp != NULL)
        delete lrp;

    delete l;
    delete gf;
}
示例#18
0
void Assembler::hint(Instruction* at, SystemHint code) {
  Emit(at, HINT | ImmHint(code) | Rt(xzr));
}
示例#19
0
BufferOffset Assembler::hint(SystemHint code) {
  return Emit(HINT | ImmHint(code) | Rt(xzr));
}
示例#20
0
void Assembler::ldr(Instruction* at, const CPURegister& rt, int imm19) {
  LoadLiteralOp op = LoadLiteralOpFor(rt);
  Emit(at, op | ImmLLiteral(imm19) | Rt(rt));
}
示例#21
0
void Robot::dqp_torque(const ColumnVector & q, const ColumnVector & qp,
                     const ColumnVector & dqp,
                     ColumnVector & ltorque, ColumnVector & dtorque)
{
   int i;
   ColumnVector z0(3);
   Matrix Rt, temp;
   Matrix Q(3,3);
   ColumnVector *w, *wp, *vp, *a, *f, *n, *F, *N, *p;
   ColumnVector *dw, *dwp, *dvp, *da, *df, *dn, *dF, *dN, *dp;
   if(q.Ncols() != 1 || q.Nrows() != dof) error("q has wrong dimension");
   if(qp.Ncols() != 1 || qp.Nrows() != dof) error("qp has wrong dimension");
   ltorque = ColumnVector(dof);
   dtorque = ColumnVector(dof);
   set_q(q);
   w = new ColumnVector[dof+1];
   wp = new ColumnVector[dof+1];
   vp = new ColumnVector[dof+1];
   a = new ColumnVector[dof+1];
   f = new ColumnVector[dof+1];
   n = new ColumnVector[dof+1];
   F = new ColumnVector[dof+1];
   N = new ColumnVector[dof+1];
   p = new ColumnVector[dof+1];
   dw = new ColumnVector[dof+1];
   dwp = new ColumnVector[dof+1];
   dvp = new ColumnVector[dof+1];
   da = new ColumnVector[dof+1];
   df = new ColumnVector[dof+1];
   dn = new ColumnVector[dof+1];
   dF = new ColumnVector[dof+1];
   dN = new ColumnVector[dof+1];
   dp = new ColumnVector[dof+1];
   w[0] = ColumnVector(3);
   wp[0] = ColumnVector(3);
   vp[0] = gravity;
   dw[0] = ColumnVector(3);
   dwp[0] = ColumnVector(3);
   dvp[0] = ColumnVector(3);
   z0 = 0.0;
   Q = 0.0;
   Q(1,2) = -1.0;
   Q(2,1) = 1.0;
   z0(3) = 1.0;
   w[0] = 0.0;
   wp[0] = 0.0;
   dw[0] = 0.0;
   dwp[0] = 0.0;
   dvp[0] = 0.0;
   for(i = 1; i <= dof; i++) {
      Rt = links[i].R.t();
      p[i] = ColumnVector(3);
      p[i](1) = links[i].get_a();
      p[i](2) = links[i].get_d() * Rt(2,3);
      p[i](3) = links[i].get_d() * Rt(3,3);
      if(links[i].get_joint_type() != 0) {
         dp[i] = ColumnVector(3);
         dp[i](1) = 0.0;
         dp[i](2) = Rt(2,3);
         dp[i](3) = Rt(3,3);
      }
      if(links[i].get_joint_type() == 0) {
         w[i] = Rt*(w[i-1] + z0*qp(i));
         dw[i] = Rt*(dw[i-1] + z0*dqp(i));
         wp[i] = Rt*(wp[i-1] + vec_x_prod(w[i-1],z0*qp(i)));
         dwp[i] = Rt*(dwp[i-1]
               + vec_x_prod(dw[i-1],z0*qp(i))
               + vec_x_prod(w[i-1],z0*dqp(i))
               );
         vp[i] = vec_x_prod(wp[i],p[i])
               + vec_x_prod(w[i],vec_x_prod(w[i],p[i]))
               + Rt*(vp[i-1]);
         dvp[i] = vec_x_prod(dwp[i],p[i])
               + vec_x_prod(dw[i],vec_x_prod(w[i],p[i]))
               + vec_x_prod(w[i],vec_x_prod(dw[i],p[i]))
               + Rt*dvp[i-1];
      } else {
         w[i] = Rt*w[i-1];
         dw[i] = Rt*dw[i-1];
         wp[i] = Rt*wp[i-1];
         dwp[i] = Rt*dwp[i-1];
         vp[i] = Rt*(vp[i-1]
               + vec_x_prod(w[i],z0*qp(i))) * 2.0
               + vec_x_prod(wp[i],p[i])
               + vec_x_prod(w[i],vec_x_prod(w[i],p[i]));
         dvp[i] = Rt*(dvp[i-1]
               + (vec_x_prod(dw[i],z0*qp(i)) * 2.0
               + vec_x_prod(w[i],z0*dqp(i))))
               + vec_x_prod(dwp[i],p[i])
               + vec_x_prod(dw[i],vec_x_prod(w[i],p[i]))
               + vec_x_prod(w[i],vec_x_prod(dw[i],p[i]));
      }
      a[i] = vec_x_prod(wp[i],links[i].r)
            + vec_x_prod(w[i],vec_x_prod(w[i],links[i].r))
            + vp[i];
      da[i] = vec_x_prod(dwp[i],links[i].r)
            + vec_x_prod(dw[i],vec_x_prod(w[i],links[i].r))
            + vec_x_prod(w[i],vec_x_prod(dw[i],links[i].r))
            + dvp[i];
   }

   for(i = dof; i >= 1; i--) {
      F[i] = a[i] * links[i].m;
      N[i] = links[i].I*wp[i] + vec_x_prod(w[i],links[i].I*w[i]);
      dF[i] = da[i] * links[i].m;
      dN[i] = links[i].I*dwp[i] + vec_x_prod(dw[i],links[i].I*w[i])
            + vec_x_prod(w[i],links[i].I*dw[i]);
      if(i == dof) {
         f[i] = F[i];
         n[i] = vec_x_prod(p[i],f[i])
               + vec_x_prod(links[i].r,F[i]) + N[i];
         df[i] = dF[i];
         dn[i] = vec_x_prod(p[i],df[i])
               + vec_x_prod(links[i].r,dF[i]) + dN[i];
      } else {
         f[i] = links[i+1].R*f[i+1] + F[i];
         n[i] = links[i+1].R*n[i+1] + vec_x_prod(p[i],f[i])
               + vec_x_prod(links[i].r,F[i]) + N[i];
         df[i] = links[i+1].R*df[i+1] + dF[i];
         dn[i] = links[i+1].R*dn[i+1] + vec_x_prod(p[i],df[i])
               + vec_x_prod(links[i].r,dF[i]) + dN[i];
      }
      if(links[i].get_joint_type() == 0) {
         temp = ((z0.t()*links[i].R)*n[i]);
         ltorque(i) = temp(1,1);
         temp = ((z0.t()*links[i].R)*dn[i]);
         dtorque(i) = temp(1,1);
      } else {
         temp = ((z0.t()*links[i].R)*f[i]);
         ltorque(i) = temp(1,1);
         temp = ((z0.t()*links[i].R)*df[i]);
         dtorque(i) = temp(1,1);
      }
   }

   delete []dp;
   delete []dN;
   delete []dF;
   delete []dn;
   delete []df;
   delete []da;
   delete []dvp;
   delete []dwp;
   delete []dw;
   delete []p;
   delete []N;
   delete []F;
   delete []n;
   delete []f;
   delete []a;
   delete []vp;
   delete []wp;
   delete []w;
}
//\fn void ExtrinsicParam::changePanTilt(double pan, double tilt);
///\brief This function computes the new rotation matrix and the new translation vector of the extrinsic parameters when the camera has changed its position.
///\param pan Value of the new camera panoramique.
///\param tilt Value of the new camera tilt.
void ExtrinsicParam::changePanTilt(double pan, double tilt)
{
  // Compute the rotation matrices with the new values of pan and tilt
  Eigen::Matrix3d Rx, Ry;
  Rx.setZero();
  Ry.setZero();
  Rx(0,0) = 1;
  Rx(1,1) = cos((-(tilt-this->tilt))*PI/180.);
  Rx(1,2) = -sin((-(tilt-this->tilt))*PI/180.);
  Rx(2,1) = sin((-(tilt-this->tilt))*PI/180.);
  Rx(2,2) = cos((-(tilt-this->tilt))*PI/180.);
  Ry(0,0) = cos((-(pan-this->pan))*PI/180.);
  Ry(0,2) = sin((-(pan-this->pan))*PI/180.);
  Ry(1,1) = 1;
  Ry(2,0) = -sin((-(pan-this->pan))*PI/180.);
  Ry(2,2) = cos((-(pan-this->pan))*PI/180.);

  Eigen::Vector3d Tx, Ty;
  Tx.setZero();
  Ty.setZero();
  Tx(0) = 2*3.3*sin(0.5*(this->pan-pan)*PI/180.)*cos(0.5*(this->pan-pan)*PI/180.);
  Tx(2) = -2*3.3*sin(0.5*(this->pan-pan)*PI/180.)*cos((90.-0.5*(this->pan-pan))*PI/180.);
  Ty(1) = 2*3.3*sin(0.5*(this->tilt-tilt)*PI/180.)*cos(0.5*(this->tilt-tilt)*PI/180.);
  Ty(2) = -2*3.3*sin(0.5*(this->tilt-tilt)*PI/180.)*cos((90.-0.5*(this->tilt-tilt))*PI/180.);

  // Compute the new values of the extrinsic parameters
  Eigen::Matrix4d Rx1, Ry1, Rt;
  Rt << initial_rotation, initial_translation,
    0,0,0,1;
  Rx1 << Rx, Tx,
    0,0,0,1;
  Ry1 << Ry, Ty,
    0,0,0,1;
  Rt.noalias() = Rx1*Ry1*Rt;
  rotation(0,0) = Rt(0,0);rotation(0,1) = Rt(0,1);rotation(0,2) = Rt(0,2);
  rotation(1,0) = Rt(1,0);rotation(1,1) = Rt(1,1);rotation(1,2) = Rt(1,2);
  rotation(2,0) = Rt(2,0);rotation(2,1) = Rt(2,1);rotation(2,2) = Rt(2,2);
  translation(0) = Rt(0,3);
  translation(1) = Rt(1,3);
  translation(2) = Rt(2,3);
}
int main(){

  mtsTaskManager* taskManager = mtsTaskManager::GetInstance();

  cmnLogger::SetMask( CMN_LOG_ALLOW_ALL );
  cmnLogger::SetMaskFunction( CMN_LOG_ALLOW_ALL );
  cmnLogger::SetMaskDefaultLog( CMN_LOG_ALLOW_ALL );

  osg::ref_ptr< osaOSGWorld > world = new osaOSGWorld;

  // Create a camera
  int x = 0, y = 0;
  int width = 320, height = 240;
  double Znear = 0.1, Zfar = 10.0;
  mtsOSGMono* camera;
  camera = new mtsOSGMono( "camera",
			   world,
			   x, y,
			   width, height,
			   55.0, ((double)width)/((double)height),
			   Znear, Zfar,
			   false );
  taskManager->AddComponent( camera );

  // create the camera motion
  CameraMotion cmotion;
  taskManager->AddComponent( &cmotion );

  // create the hubble motion
  HubbleMotion hmotion;
  taskManager->AddComponent( &hmotion );

  cmnPath path;
  path.AddRelativeToCisstShare("/models");
  path.AddRelativeToCisstShare("/models/hubble");

  vctFrame4x4<double> Rt(  vctMatrixRotation3<double>(),
			   vctFixedSizeVector<double,3>( 0.0, 0.0, 0.5 ) );
  mtsOSGBody* hubble;
  hubble = new mtsOSGBody( "hubble", path.Find("hst.3ds"), world, Rt, 0.8 );
  taskManager->AddComponent( hubble );

  osg::ref_ptr< osaOSGBody > background;
  background = new osaOSGBody( path.Find("background.3ds"), world,
			       vctFrame4x4<double>() );

  taskManager->Connect( camera->GetName(), "Input",
			cmotion.GetName(), "Output" );

  taskManager->Connect( hubble->GetName(), "Input",
			hmotion.GetName(), "Output" );

  taskManager->CreateAll();
  taskManager->WaitForStateAll( mtsComponentState::READY );

  taskManager->StartAll();
  taskManager->WaitForStateAll( mtsComponentState::ACTIVE );

  cmnGetChar();
  std::cout << "ENTER to quit" << std::endl;
  cmnGetChar();

  taskManager->KillAll();
  taskManager->Cleanup();

  return 0;

}
	void main(void *arg)
	{
		const color WHITE(1.0f);
		//-----------------------------------------
		const color Cs(i_color());
		color diffuse_color(1.0f, 1.0f, 1.0f);
		scalar diffuse_weight = i_diffuse();
		color specular_color(i_specularColor());
		scalar specular_weight= 0.2f;//cosinePower
		scalar roughness = 0.0f;
		int specular_mode = 1;//[0,3]
		scalar glossiness = 1.0f;
		color reflection_color(i_reflectedColor());
		scalar reflection_weight = 0.5f;
		color  refraction_color(1.0f, 1.0f, 1.0f);
		scalar refraction_weight= 0.0f;//zero will lead a dark image
		scalar refraction_glossiness = 0.5f;
		scalar refraction_thickness= 2.0f;//surfaceThickness
		color  translucency_color(i_transparency());
		scalar translucency_weight = i_translucence();
		scalar anisotropy = 1.0f;
		scalar rotation = 0.0f;
		scalar ior = 1.5f;//refractiveIndex
		bool fresnel_by_ior = eiTRUE;
		scalar fresnel_0_degree_refl = 0.2f;
		scalar fresnel_90_degree_refl = 1.0f;
		scalar fresnel_curve= 5.0f;
		bool is_metal = eiTRUE;
		int diffuse_samples = 8;
		int reflection_samples= 4;
		int refraction_samples= 4;
		scalar cutoff_threshold = LIQ_SCALAR_EPSILON;
		eiTag bump_shader = eiNULL_TAG;
		scalar bump_factor= 0.3f;

  		if( liq_UserDefinedNormal() == 0 )
  		{
  			i_normalCamera() = N;
  		}
		//-----------------------------------------
		vector In = normalize( I );
		normal Nn = normalize( i_normalCamera() );
		normal Nf = ShadingNormal(Nn);
		vector V = -In;
		//-----------------------------------------



		// specular is the percentage of specular lighting
		// limit weights in range [0, 1]
		specular_weight = clamp(specular_weight, 0.0f, 1.0f);
		refraction_weight = clamp(refraction_weight, 0.0f, 1.0f);
		translucency_weight = clamp(translucency_weight, 0.0f, 1.0f);
		// automatically compute Kd, Ks, Kt in an energy conserving way
		diffuse_weight = clamp(diffuse_weight, 0.0f, 1.0f);
		reflection_weight = clamp(reflection_weight, 0.0f, 1.0f);
		// the energy balancing between reflection and refraction is 
		// dominated by Fresnel law
		//color Kt(refraction_color * (spec * refr * (1.0f - trans)) * (is_metal?Cs:WHITE));
		
		// this is a monolithic shader which also serves as shadow shader
		if (ray_type == EI_RAY_SHADOW)
		{
			main_shadow(arg, refraction_color * (specular_weight * refraction_weight * (1.0f - translucency_weight)), 
				refraction_thickness, cutoff_threshold);
			return;
		}

		// for surface shader, we call bump shader
		eiTag shader = bump_shader;
		if (shader != eiNULL_TAG)
		{
			call_bump_shader(shader, bump_factor);
		}

		//color Kc(refraction_color * (spec * refr * trans) * (is_metal?Cs:WHITE));
		// non-reflected energy is absorbed
		//color Ks(specular_color * spec * (1.0f - refl) * (is_metal?Cs:WHITE));
		//color Kr(reflection_color * (spec * refl) * (is_metal?Cs:WHITE));
		// surface color will impact specular for metal material
		//const color Cs(surface_color);

//		const color Kd( Cs *(1.0f - spec) * diff );
//		const int spec_mode = clamp(specular_mode, 0, 3);

		computeSurface(
			i_color(),//outColor(),//out->Ci,//
			i_transparency(),//out->Oi,//
			i_matteOpacityMode(),
			i_matteOpacity(),
			o_outColor(),//out->Ci,//
			o_outTransparency()//out->Oi//
		);
		out->Ci = o_outColor();
		out->Oi = o_outTransparency();

		// apply rotation
		scalar deg = rotation;
		if (deg != 0.0f)
		{
			dPdu = rotate_vector(dPdu, N, radians(deg));
			dPdv = cross(dPdu, N);
			u_axis = normalize(dPdu);
			v_axis = normalize(dPdv);
		}

		// set the glossiness scale based on the chosen BSDF
		scalar glossiness_scale = 370.37f;
		if (specular_mode == 1)
		{
			glossiness_scale = 125.0f;
		}
		else if (specular_mode == 3)
		{
			// scale to make the same glossiness parameter 
			// results in similar lobe for different BSDFs
			glossiness_scale = 22.88f;
		}

//		scalar aniso = anisotropy;
		int refl_samples = reflection_samples;
		int refr_samples = refraction_samples;

		// prepare outgoing direction in local frame
		const vector wo(normalize(to_local(V)));
		// construct BSDFs
		OrenNayar Rd(roughness);
		scalar shiny_u = glossiness;
		if (shiny_u < eiSCALAR_EPS)
		{
			shiny_u = eiSCALAR_EPS;
			refl_samples = 1;
		}
		shiny_u = max(0.0f, glossiness_scale / shiny_u);
		scalar shiny_v = max(0.0f, shiny_u * anisotropy);
		
		scalar IOR = ior;
		eiBool fresn_by_ior = fresnel_by_ior;
		scalar fresn_0_degree_refl = fresnel_0_degree_refl;
		scalar fresn_90_degree_refl = fresnel_90_degree_refl;
		scalar fresn_curve = fresnel_curve;

		union {
			eiByte by_ior[sizeof(FresnelByIOR)];
			eiByte schlick[sizeof(FresnelSchlick)];
		} F_storage;

		union {
			eiByte by_ior[sizeof(FresnelByIOR)];
			eiByte schlick[sizeof(FresnelSchlick)];
		} invF_storage;

		Fresnel *F = NULL;
		Fresnel *invF = NULL;
		if (fresn_by_ior)
		{
			F = new (F_storage.by_ior) FresnelByIOR(IOR);
			invF = new (invF_storage.by_ior) InvFresnelByIOR(IOR);
		}
		else
		{
			F = new (F_storage.schlick) FresnelSchlick(
				fresn_0_degree_refl, 
				fresn_90_degree_refl, 
				fresn_curve);
			invF = new (invF_storage.schlick) InvFresnelSchlick(
				fresn_0_degree_refl, 
				fresn_90_degree_refl, 
				fresn_curve);
		}

		union {
			eiByte ward[sizeof(Ward)];
			eiByte phong[sizeof(StretchedPhong)];
			eiByte blinn[sizeof(Blinn)];
			eiByte cooktorrance[sizeof(CookTorrance)];
		} Rs_storage;

		BSDF *Rs = NULL;
		switch (specular_mode)
		{
		case 0:
 			Rs = new (Rs_storage.ward) Ward(F, shiny_u, shiny_v);
			break;
		case 1:
			Rs = new (Rs_storage.phong) StretchedPhong(F, shiny_u);
			break;
		case 2:
			Rs = new (Rs_storage.blinn) Blinn(F, shiny_u);
			break;
		case 3:
			Rs = new (Rs_storage.cooktorrance) CookTorrance(F, 1.0f / shiny_u);
			break;
		}

		SpecularReflection Rr(F);

		scalar refr_shiny_u = refraction_glossiness;
		if (refr_shiny_u < eiSCALAR_EPS)
		{
			refr_shiny_u = eiSCALAR_EPS;
			refr_samples = 1;
		}
		refr_shiny_u = max(0.0f, glossiness_scale / refr_shiny_u);
		scalar refr_shiny_v = max(0.0f, shiny_u * anisotropy);

		union {
			eiByte ward[sizeof(Ward)];
			eiByte phong[sizeof(StretchedPhong)];
			eiByte blinn[sizeof(Blinn)];
			eiByte cooktorrance[sizeof(CookTorrance)];
		} Rts_storage;

		BSDF *Rts = NULL;
		switch (specular_mode)
		{
		case 0:
			Rts = new (Rts_storage.ward) Ward(invF, refr_shiny_u, refr_shiny_v);
			break;
		case 1:
			Rts = new (Rts_storage.phong) StretchedPhong(invF, refr_shiny_u);
			break;
		case 2:
			Rts = new (Rts_storage.blinn) Blinn(invF, refr_shiny_u);
			break;
		case 3:
			Rts = new (Rts_storage.cooktorrance) CookTorrance(invF, 1.0f / refr_shiny_u);
			break;
		}

		scalar refr_thickness = refraction_thickness;

		// internal scale for refraction thickness, make it smaller
		BRDFtoBTDF Rt(Rts, IOR, refr_thickness * 0.1f, this);
		
		color Cdiffuse(0.0f);
		color Cspecular(0.0f);

		// don't integrate direct lighting if the ray hits the back face
 		if (dot_nd < 0.0f)
 		{
 			// integrate direct lighting from the front side
 			//out->Ci += integrate_direct_lighting(/*Kd*/diffuse(), Rd, wo);
 			//out->Ci *= i_diffuse() * getDiffuse(Nf, eiFALSE, eiFALSE);
			Cdiffuse += i_diffuse() * getDiffuse(Nf, eiFALSE, eiFALSE);

 			//out->Ci += integrate_direct_lighting(Ks, *Rs, wo);
 			//out->Ci += i_specularColor() * getPhong (Nf, V, i_cosinePower(), eiFALSE, eiFALSE);
			Cspecular += i_specularColor() * getPhong (Nf, V, i_cosinePower(), eiFALSE, eiFALSE);

		}

		// integrate for translucency from the back side
		if (!almost_black( refraction_color * (specular_weight * refraction_weight * translucency_weight)*(is_metal?Cs:WHITE) ) && //almost_black(Kc)
			(refr_thickness > 0.0f || dot_nd > 0.0f))
		{
			vector old_dPdu(dPdu);
			vector old_dPdv(dPdv);
			vector old_u_axis(u_axis);
			vector old_v_axis(v_axis);
			normal old_N(N);
			vector new_wo(wo);

			if (dot_nd < 0.0f)
			{
				dPdu = old_dPdv;
				dPdv = old_dPdu;
				u_axis = old_v_axis;
				v_axis = old_u_axis;
				N = -N;
				new_wo = vector(wo.y, wo.x, -wo.z);
			}
			
			// integrate direct lighting from the back side
			//out->Ci += Kc * integrate_direct_lighting(/*Kd*/i_diffuse(), Rd, new_wo);
			//out->Ci += i_diffuse() * getDiffuse(Nf, eiFALSE, eiFALSE);
			Cdiffuse += i_diffuse() * getDiffuse(Nf, eiFALSE, eiFALSE);

			//out->Ci += Kc * integrate_direct_lighting(Ks, *Rs, new_wo);
			//out->Ci += i_specularColor() * getPhong (Nf, V, i_cosinePower(), eiFALSE, eiFALSE);
			Cspecular += i_specularColor() * getPhong (Nf, V, i_cosinePower(), eiFALSE, eiFALSE);

			N = old_N;
			u_axis = old_u_axis;
			v_axis = old_v_axis;
			dPdu = old_dPdu;
			dPdv = old_dPdv;
		}


		scalar cutoff_thresh = cutoff_threshold;
				
		color CReflectSpecular(0.0f);
 		// integrate indirect specular lighting
		if (!almost_black( specular_color * (specular_weight * (1.0f - reflection_weight))*(is_metal?Cs:WHITE) ) && dot_nd < 0.0f)//almost_black(Ks)
 		{
 			IntegrateOptions opt;
 			opt.ray_type = EI_RAY_REFLECT_GLOSSY;
 			opt.min_samples = opt.max_samples = refl_samples;
 			opt.cutoff_threshold = cutoff_thresh;

			CReflectSpecular = integrate(wo, *Rs, opt);
  		}

		color CSpecularReflection(0.0f);
		// integrate perfect specular reflection
		if (!almost_black(reflection_color * (specular_weight * reflection_weight) * (is_metal?Cs:WHITE)) && dot_nd < 0.0f)//almost_black(Kr)
		{
			IntegrateOptions opt;
			opt.ray_type = EI_RAY_REFLECT_GLOSSY;
			opt.min_samples = opt.max_samples = 1; // force one sample for reflection
			opt.cutoff_threshold = cutoff_thresh;
			// the direct lighting of this BRDF is not accounted, 
			// so we trace lights here to compensate
			opt.trace_lights = eiTRUE;
			
			CSpecularReflection = integrate(wo, Rr, opt);
		}

		color CReflectDiffuse(0.0f);
		// integrate indirect diffuse lighting (color bleeding)
		if (!almost_black( Cs *(1.0f - specular_weight) * diffuse_weight ) && dot_nd < 0.0f)//almost_black(Kd)
		{
			IntegrateOptions opt;
			opt.ray_type = EI_RAY_REFLECT_DIFFUSE;
			opt.min_samples = opt.max_samples = diffuse_samples;
			opt.cutoff_threshold = cutoff_thresh;

			CReflectDiffuse = integrate(wo, Rd, opt);
		}

		color CRefraction(0.0f);
		// integrate refraction
		if ( !almost_black(refraction_color * specular_weight * refraction_weight * (1.0f-translucency_weight)*(is_metal?Cs:WHITE)) ) //almost_black(Kt)
		{
			IntegrateOptions opt;
			opt.ray_type = EI_RAY_REFRACT_GLOSSY;
			if (IOR == 1.0f)
			{
				opt.ray_type = EI_RAY_TRANSPARENT;
			}
			opt.min_samples = opt.max_samples = refr_samples;
			opt.cutoff_threshold = cutoff_thresh;
			// account for refractive caustics
			opt.trace_lights = eiTRUE;

			CRefraction = integrate(wo, Rt, opt);
		}

		out->Ci *= (Cdiffuse 

					+ CReflectDiffuse *
					  Cs *(1.0f - specular_weight) * diffuse_weight//Kd
					);

		out->Ci += (Cspecular 

					+ CReflectSpecular* 
					  specular_color   * specular_weight * (1.0f - reflection_weight)*(is_metal?Cs:WHITE)//Ks
					
					+ CSpecularReflection* 
					  reflection_color * specular_weight * reflection_weight * (is_metal?Cs:WHITE)//Kr
					
					+ CRefraction*
					  refraction_color * specular_weight * refraction_weight * (1.0f-translucency_weight) * (is_metal?Cs:WHITE)//Kt			
					);

#ifdef USE_AOV_aov_ambient
		aov_ambient() += ( i_ambientColor() 
							*(CReflectDiffuse *
							  Cs *(1.0f - specular_weight) * diffuse_weight//Kd
							 )
						 ) * (1.0f - o_outTransparency());
#endif
#ifdef USE_AOV_aov_diffuse
		aov_diffuse() += ( Cdiffuse * i_color() ) * (1.0f - o_outTransparency());
#endif
#ifdef USE_AOV_aov_specular
		aov_specular() += (Cspecular
							+ CReflectSpecular* 
							specular_color   * specular_weight * (1.0f - reflection_weight)*(is_metal?Cs:WHITE)//Ks

							+ CSpecularReflection* 
							reflection_color * specular_weight * reflection_weight * (is_metal?Cs:WHITE)//Kr

							+ CRefraction*
							refraction_color * specular_weight * refraction_weight * (1.0f-translucency_weight) * (is_metal?Cs:WHITE)//Kt			
							);
#endif


		if ( ! less_than( &i_transparency(), LIQ_SCALAR_EPSILON ) )
		{//transparent
			out->Ci = out->Ci * ( 1.0f - i_transparency() ) + trace_transparent() * i_transparency();
		}//else{ opacity }

		Rs->~BSDF();
		Rts->~BSDF();
	}
int main(int argc, char *argv[])
{
    //for random number generator
    srand((unsigned)time(NULL));

    //preprocessing
    if(argc != 5){
        std::cout<<"No enough arugments\n";
        exit(0);
    }
    int k, n_threads;
    double lambda, wall_timer;
    std::string data_dir, meta_dir, train_dir, test_dir;
    std::string line;
    int row, col, train_size, test_size;

    std::cout<<"----------------------------------------------"<<std::endl;
    std::cout<<"exec filename: "<<argv[0]<<std::endl;
    wall_timer = omp_get_wtime();
    k = atoi(argv[1]);
    lambda = atof(argv[2]);
    n_threads = atoi(argv[3]);
    data_dir = argv[4];

    //set number of threads
    omp_set_num_threads(n_threads); 
   
    std::vector<std::string> files(3,"");
    files.reserve(3);
    getdir(data_dir, files);

    meta_dir = data_dir+files[0];
    train_dir = data_dir+files[1];
    test_dir = data_dir+files[2];
 

    //std::cout<<"Using [rank, lambda, n_threads, directory]->>: "<<"[ "<<k<<", "<<lambda<<", "<<n_threads<<", "<<data_dir<<"* ]\n";
    //std::cout<<meta_dir<<" "<<train_dir<<" "<<test_dir<<" "<<"\n";

    //read meta file
    std::ifstream meta_file(meta_dir.c_str());
        //get rows and clos
    std::getline(meta_file, line);
    std::stringstream meta_ss(line);
    meta_ss >> row >> col;
        //get size of train
    std::getline(meta_file, line);
    std::stringstream train_ss(line);
    train_ss >> train_size;
        //get size of test
    std::getline(meta_file, line);
    std::stringstream test_ss(line);
    test_ss >> test_size;
    //std::cout<<"row: "<<row<<" col: "<<col<<" train size: "<<train_size<<" test size: "<<test_size<<"\n";


    //read from training file, and construct matrix
    int *Ix = new int[train_size];
    int *Jx = new int[train_size];
    double *xx = new double[train_size];

    int *Iy = new int[train_size];
    int *Jy = new int[train_size];
    double *yy = new double[train_size];

    int *cc = new int[col](); //number of non zeros in each column
    int *rc = new int[row]();


    std::ifstream train_file(train_dir.c_str());
    std::vector<T> train_list;
    train_list.reserve(train_size);

    for(int i=0;i<train_size;i++){
	train_file >> Ix[i];
	train_file >> Jx[i];
	train_file >> xx[i];
        Ix[i] = Ix[i] - offset;
        Jx[i] = Jx[i] - offset;
        train_list.push_back(T(Ix[i], Jx[i], xx[i]));
        cc[Jx[i]] = cc[Jx[i]] + 1;
        rc[Ix[i]]= rc[Ix[i]] + 1;
    }
    Eigen::SparseMatrix<double> R(row, col);
    R.setFromTriplets(train_list.begin(), train_list.end());



    Eigen::SparseMatrix<double> R_tsp = R.transpose();
    int i_count = 0;
    for(int i=0;i<R_tsp.outerSize(); i++){
        for(Eigen::SparseMatrix<double>::InnerIterator it(R_tsp, i); it; ++it){
            Iy[i_count] = it.row();
            Jy[i_count] = it.col();
            yy[i_count] = it.value();
            i_count++;
        }
    }
    
    //std::cout<<"cc(1)=2->>: "<<cc[1]<<" cc(14)=1->>: "<<cc[14]<<" cc(32)=8->>: "<<cc[32]<<"\n";
    //std::cout<<"xx(0)=4->>: "<<xx[0]<<" xx(7)=5->>: "<<xx[7]<<" xx(38)=3->>: "<<xx[7]<<"\n";
    //std::cout<<"rc(4)=23->>: "<<rc[4]<<" rc(6)=1->>: "<<rc[6]<<" rc[12]=148->>: "<<rc[12]<<"\n";
    //std::cout<<"yy(2)=5->>: "<<yy[2]<<" yy(8)=4->>: "<<yy[8]<<" yy(19)=3->>: "<<yy[19]<<"\n";

    //read from testing file, and construct matrix
    
    int *Ixt = new int[test_size];
    int *Jxt = new int[test_size];
    double *xxt = new double[test_size];

    std::ifstream test_file(test_dir.c_str());
    std::vector<T> test_list;
    test_list.reserve(test_size);

    for(int i=0;i<test_size;i++){
    	test_file >> Ixt[i];
        test_file >> Jxt[i];
        test_file >> xxt[i];
        Ixt[i] = Ixt[i] - offset;
        Jxt[i] = Jxt[i] - offset;
        test_list.push_back(T(Ixt[i], Jxt[i], xx[i]));
    }
    Eigen::SparseMatrix<double> Rt(row, col);
    Rt.setFromTriplets(test_list.begin(), test_list.end());

    int maxiter = 10;

    Eigen::MatrixXd U(k, row);
    Eigen::MatrixXd M(k, col);

    //generate random numbers within 0 and 1 for U, and M
    #pragma omp parallel for
    for(int i=0;i<k;i++){
        for(int j=0;j<row;j++){
            U(i,j) = (double) rand() / (double) RAND_MAX;
        }
        for(int j=0;j<col;j++){
            M(i,j) = (double) rand() / (double) RAND_MAX;
        }
    }
    //preprocessing for parallelization
    int *cci = new int[col];
    int *rci = new int[row];
    int pre_count;

    pre_count = 0;
    for(int i=0;i<col;i++){
        cci[i] = pre_count;
        pre_count = cci[i] + cc[i];
    }

    pre_count = 0;
    for(int i=0;i<row;i++){
       rci[i] = pre_count;
       pre_count = rci[i] + rc[i];
    }


    std::cout<<"walltime spent on preprocessing data: "<<omp_get_wtime() - wall_timer<<"\n";

    //std::cout<<"R_tsp: "<<R_tsp.size()<<" R_tsp(4999, 1825) = 3, the output is: "<<R_tsp.coeffRef(4999,1825)<<"\n";

    //for small
    //std::cout<<"R size: "<<R.size()<<" R(1825, 4999) = 3, the output is: "<<R.coeffRef(1825,4999)<<"\n";
    //std::cout<<"Rt size: "<<Rt.size()<<" Rt(1395, 4999) = 3, the output is: "<<Rt.coeffRef(1395,4999)<<"\n";

    //for medium
    //std::cout<<"R size: "<<R.size()<<" R(4750, 3951) is 4, the output is:  "<<R.coeffRef(4750,3951)<<"\n";
    //std::cout<<"Rt size: "<<Rt.size()<<" R(2128, 3951) is 3, the output is: "<<Rt.coeffRef(2128,3951)<<"\n";

//------------------------------begin processing----------------------------------------------//
    double accu_sum=0;
    double rmse_test=0;
    double rmse_train=0;
    Eigen::MatrixXd U_tps = U.transpose();
    #pragma omp parallel for reduction(+:accu_sum) 
    for(int i=0;i<train_size;i++){
        accu_sum += pow(U_tps.row(Ix[i])*M.col(Jx[i]) - xx[i], 2);
    }
    rmse_train = sqrt(accu_sum/train_size);

    accu_sum = 0;
    #pragma omp parallel for reduction(+:accu_sum)
    for(int i=0;i<test_size;i++){
        accu_sum += pow(U_tps.row(Ixt[i])*M.col(Jxt[i]) - xxt[i], 2);
    }
    rmse_test = sqrt(accu_sum/test_size);

    Eigen::MatrixXd iden = Eigen::MatrixXd::Identity(k,k);

    std::cout<<"start with rmse on train: "<< rmse_train <<" rmse on test: "<< rmse_test << " n_threads: "<<n_threads<<std::endl;
    double total_timer, end_timer;
    for(int t=0;t<maxiter;t++){
	printf("iter: %d\n",t+1);

	printf("Minimize M while fixing U ...");
        wall_timer = omp_get_wtime();
        //minimize M while fixing U
	#pragma omp parallel for schedule(dynamic, 1)
        for(int i=0;i<col;i++){
            if( cc[i]>0 ){
                //construct subU, and subR
                Eigen::MatrixXd subU(k, cc[i]);
                Eigen::VectorXd subR(cc[i]);
		int j=cci[i];
                for(int l=0; l<cc[i];l++){
                    subU.col(l) = U.col(Ix[j+l]);
		    subR[l] = xx[j+l];
                }
                M.col(i) = (lambda*iden+subU*subU.transpose()).llt().solve((subU*subR));
            }else{
                M.col(i) = Eigen::VectorXd::Zero(k);
            }
        
        }
        end_timer = omp_get_wtime();
	total_timer += end_timer-wall_timer;	
        printf("%0.2f seconds\n", end_timer - wall_timer);

	printf("Minimize U whilt fixing M ...");
	wall_timer = omp_get_wtime();
        //minimize U while fixing M
        #pragma omp parallel for schedule(dynamic, 1)
        for(int i=0;i<row;i++){
            if( rc[i] > 0){
                //construct subM, and subR
                Eigen::MatrixXd subM(k, rc[i]);
                Eigen::VectorXd subR(rc[i]);
		int j=rci[i];
                for(int l=0;l<rc[i];l++){
                    subM.col(l) = M.col(Iy[j+l]);
		    subR[l] = yy[j+l];
                }
                U.col(i) = (lambda*iden+subM*subM.transpose()).llt().solve((subM*subR));
            }else{
                U.col(i) = Eigen::VectorXd::Zero(k);
            }
        }
	end_timer = omp_get_wtime();
	total_timer += end_timer-wall_timer;
	printf("%0.2f seconds\n", end_timer - wall_timer);

	Eigen::MatrixXd U_tps = U.transpose();
    
        accu_sum = 0;
        #pragma omp parallel for reduction(+:accu_sum)
        for(int i=0;i<train_size;i++){
            accu_sum += pow(U_tps.row(Ix[i])*M.col(Jx[i]) - xx[i], 2);
        }
        rmse_train = sqrt(accu_sum/train_size);
        
        accu_sum = 0;
        #pragma omp parallel for reduction(+:accu_sum)
        for(int i=0;i<test_size;i++){
            accu_sum += pow(U_tps.row(Ixt[i])*M.col(Jxt[i]) - xxt[i], 2);
        }
        rmse_test = sqrt(accu_sum/test_size);

        printf("rmse on train: %0.6f, rmse on test: %0.6f\n",rmse_train, rmse_test);
    }
    printf("total running time: %0.2f\n",total_timer);

    //free variables
    delete[] Ix;
    delete[] Jx;
    delete[] xx;
    delete[] Iy;
    delete[] Jy;
    delete[] yy;
    delete[] Ixt;
    delete[] Jxt;
    delete[] xxt;
    delete[] rci;
    delete[] cci;
}
示例#26
0
void Assembler::cbnz(Instruction* at, const Register& rt, int imm19) {
  EmitBranch(at, SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
}
//\fn void ExtrinsicParam::computeRtMatrix(double pan, double tilt, cv::Mat image);
///\brief This function computes the rotation matrix and the translation vector of the extrinsic parameters.
///\param pan Value of the camera panoramique when the image has been taken.
///\param tilt Value of the camera tilt when the image has been taken.
///\param image The image from which the rotation matrix and the translation vector should be computed.
void ExtrinsicParam::computeRtMatrix(double pan, double tilt, cv::Mat image)
{
  // Save the value of the pan and the tilt in order to modify the extrinsic parameters when the camera position change
  this->pan = pan;
  this->tilt = tilt;
  cv::Mat img = image.clone();
  cv::Mat initImg = image.clone();
  int cnt = 0;

  double fx = K(0,0), fy = K(1,1), cx = K(0,2), cy = K(1,2);
  double u, v, x, y;
  double dist_to_point;

  Eigen::MatrixXd Pr, Pi;
  Pr.resize(4,6);
  Pr.setZero();
  Pi.resize(3,6);
  Pi.setZero();

  pp.cnt = 0;

  // Create a window of the scene
  cv::namedWindow("Extrinsic Image",CV_WINDOW_AUTOSIZE);
  cvSetMouseCallback("Extrinsic Image",On_Mouse,0);
  cv::waitKey(10);
  cv::imshow("Extrinsic Image", img);
  cv::waitKey(10);

  // We need 6 points to compute the translation vector and the rotation matrix
  while (pp.cnt <= 6)
    {
      if (cnt > pp.cnt) // Case where we do a right click in order to remove the last point inserted
	{
	  switch (cnt)
	    {
	    case 1:
	      {
		img = image.clone();
		cnt = pp.cnt;
	      }
	      break;
	      
	    case 2:
	      {
		img = image.clone();
		cv::circle(img,pp.p1[0],3,cv::Scalar(255,0,0));
		cv::putText(img,"(0,0,0)",pp.p1[0],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cnt = pp.cnt;
	      }
	      break;

	    case 3:
	      {
		img = image.clone();
		cv::circle(img,pp.p1[0],3,cv::Scalar(255,0,0));
		cv::putText(img,"(0,0,0)",pp.p1[0],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cv::circle(img,pp.p1[1],3,cv::Scalar(255,0,0));
		cv::putText(img,"(1.5,0,0)",pp.p1[1],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cnt = pp.cnt;
	      }
	      break;

	    case 4:
	      {
		img = image.clone();
		cv::circle(img,pp.p1[0],3,cv::Scalar(255,0,0));
		cv::putText(img,"(0,0,0)",pp.p1[0],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cv::circle(img,pp.p1[1],3,cv::Scalar(255,0,0));
		cv::putText(img,"(1.5,0,0)",pp.p1[1],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cv::circle(img,pp.p1[2],3,cv::Scalar(255,0,0));
		cv::putText(img,"(0,1.5,0)",pp.p1[2],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cnt = pp.cnt;
	      }
	      break;

	    case 5:
	      {
		img = image.clone();
		cv::circle(img,pp.p1[0],3,cv::Scalar(255,0,0));
		cv::putText(img,"(0,0,0)",pp.p1[0],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cv::circle(img,pp.p1[1],3,cv::Scalar(255,0,0));
		cv::putText(img,"(1.5,0,0)",pp.p1[1],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cv::circle(img,pp.p1[2],3,cv::Scalar(255,0,0));
		cv::putText(img,"(0,1.5,0)",pp.p1[2],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cv::circle(img,pp.p1[3],3,cv::Scalar(255,0,0));
		cv::putText(img,"(0,0,1)",pp.p1[3],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cnt = pp.cnt;
	      }
	      break;

	    case 6:
	      {
		img = image.clone();
		cv::circle(img,pp.p1[0],3,cv::Scalar(255,0,0));
		cv::putText(img,"(0,0,0)",pp.p1[0],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cv::circle(img,pp.p1[1],3,cv::Scalar(255,0,0));
		cv::putText(img,"(1.5,0,0)",pp.p1[1],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cv::circle(img,pp.p1[2],3,cv::Scalar(255,0,0));
		cv::putText(img,"(0,1.5,0)",pp.p1[2],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cv::circle(img,pp.p1[3],3,cv::Scalar(255,0,0));
		cv::putText(img,"(0,0,1)",pp.p1[3],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cv::circle(img,pp.p1[4],3,cv::Scalar(255,0,0));
		cv::putText(img,"(1.5,0,1)",pp.p1[4],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cnt = pp.cnt;
	      }
	      break;

	    default:
	      break;
	    }
	  cv::imshow("Extrinsic Image", img);
	  cv::waitKey(10);
	}
      if (pp.clicked) // Case where we do a left click in order to insert a point
	{
	  cv::circle(img,pp.p1[pp.cnt-1],3,cv::Scalar(255,0,0));
	  
	  if (pp.cnt == 1) // First point to insert (0,0,0) (on the mocap basis)
	    {
	      cv::putText(img,"(0,0,0)",pp.p1[pp.cnt-1],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
	      // Get the image coordinates
	      u = pp.p1[0].x;
	      v = pp.p1[0].y;
	      Eigen::Vector3d undist;
	      undist(0) = u;
	      undist(1) = v;
	      undist(2) = 1.;
	      
	      // Get the distance between the camera and the 3d point (the scale s)
	      if (cam == 1)
		{
		  dist_to_point = DIST_TO_000_CAM1;
		}
	      else if (cam == 2)
		{
		  dist_to_point= DIST_TO_000_CAM2;
		}
	      Pi(0,0) = u*dist_to_point;
	      Pi(1,0) = v*dist_to_point;
	      Pi(2,0) = dist_to_point;
	      Pr(3,0) = 1.;

	    } else if (pp.cnt == 2) // Second point to insert (500mm,0,0) (on the mocap basis)
	    {
	      cv::putText(img,"(1.5,0,0)",pp.p1[pp.cnt-1],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
	      u = pp.p1[1].x;
	      v = pp.p1[1].y;
	      Eigen::Vector3d undist;
	      undist(0) = u;
	      undist(1) = v;
	      undist(2) = 1.;
	      
	      if (cam == 1)
		{
		  dist_to_point = DIST_TO_0500_CAM1;
		}
	      if (cam == 2)
		{
		  dist_to_point = DIST_TO_0500_CAM2;
		}

	      x = (u)*(dist_to_point);
	      y = (v)*(dist_to_point);

	      Pi(0,1) = x;
	      Pi(1,1) = y;
	      Pi(2,1) = dist_to_point;
	      Pr(0,1) = 1500.;
	      Pr(3,1) = 1.;

	    } else if (pp.cnt == 3) // Third point to insert (0,500mm,0) (on the mocap basis)
	    {
	      cv::putText(img,"(0,1.5,0)",pp.p1[pp.cnt-1],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
	      u = pp.p1[2].x;
	      v = pp.p1[2].y;
	      Eigen::Vector3d undist;
	      undist(0) = x;
	      undist(1) = y;
	      undist(2) = 1.;
	      
	      if (cam == 1)
		{
		  dist_to_point = DIST_TO_0050_CAM1;
		}
	      if (cam == 2)
		{
		  dist_to_point = DIST_TO_0050_CAM2;
		}
	      x = (u)*(dist_to_point);
	      y = (v)*(dist_to_point);
	      Pi(0,2) = x;
	      Pi(1,2) = y;
	      Pi(2,2) = dist_to_point;
	      Pr(1,2) = 1500.;
	      Pr(3,2) = 1.;

	    } else if (pp.cnt == 4) // Fourth point to insert (0,0,1000mm) (on the mocap basis)
	    {
	      cv::putText(img,"(0,0,1)",pp.p1[pp.cnt-1],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
	      u = pp.p1[3].x;
	      v = pp.p1[3].y;
	      Eigen::Vector3d undist;
	      undist(0) = u;
	      undist(1) = v;
	      undist(2) = 1.;
	      
	      if (cam == 1)
		{
		  dist_to_point = DIST_TO_001_CAM1;
		}
	      if (cam == 2)
		{
		  dist_to_point = DIST_TO_001_CAM2;
		}

	      x = (u)*(dist_to_point);
	      y = (v)*(dist_to_point);

	      Pi(0,3) = x;
	      Pi(1,3) = y;
	      Pi(2,3) = dist_to_point;
	      Pr(2,3) = 1000.;
	      Pr(3,3) = 1.;
	    }else if (pp.cnt == 5) // Fifth point to insert (1500mm,0,1000mm) (on the mocap basis)
	    {
	      cv::putText(img,"(1.5,0,1)",pp.p1[pp.cnt-1],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
	      u = pp.p1[4].x;
	      v = pp.p1[4].y;
	      Eigen::Vector3d undist;
	      undist(0) = u;
	      undist(1) = v;
	      undist(2) = 1.;
	      
	      if (cam == 1)
		{
		  dist_to_point = 4856.439;
		}
	      if (cam == 2)
		{
		  dist_to_point = 6333.64;
		}

	      x = (u)*(dist_to_point);
	      y = (v)*(dist_to_point);

	      Pi(0,4) = x;
	      Pi(1,4) = y;
	      Pi(2,4) = dist_to_point;
	      Pr(0,4) = 1500.;
	      Pr(2,4) = 1000.;
	      Pr(3,4) = 1.;
	    }else if (pp.cnt == 6) // sixth point to insert (0,1500mm,1000mm) (on the mocap basis)
	    {
	      cv::putText(img,"(0,1.5,1)",pp.p1[pp.cnt-1],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
	      u = pp.p1[5].x;
	      v = pp.p1[5].y;
	      Eigen::Vector3d undist;
	      undist(0) = u;
	      undist(1) = v;
	      undist(2) = 1.;
	      
	      if (cam == 1)
		{
		  dist_to_point = 5142.713;
		}
	      if (cam == 2)
		{
		  dist_to_point = 4389.19;
		}

	      x = (u)*(dist_to_point);
	      y = (v)*(dist_to_point);

	      Pi(0,5) = x;
	      Pi(1,5) = y;
	      Pi(2,5) = dist_to_point;
	      Pr(1,5) = 1500.;
	      Pr(2,5) = 1000.;
	      Pr(3,5) = 1.;
	    }
	  cnt = pp.cnt;
	  pp.clicked = false;
	}
      // Keep showing the image and the modification on it
      cv::imshow("Extrinsic Image", img);
      cv::waitKey(10);
    }

  // Compute the rotation matrix and the translation vector
  Eigen::MatrixXd Prinv, Rt;
  pseudoInverse(Pr,Prinv);
  Rt.noalias() = (K.inverse())*Pi*(Prinv);

  rotation(0,0) = Rt(0,0);rotation(0,1) = Rt(0,1);rotation(0,2) = Rt(0,2);
  rotation(1,0) = Rt(1,0);rotation(1,1) = Rt(1,1);rotation(1,2) = Rt(1,2);
  rotation(2,0) = Rt(2,0);rotation(2,1) = Rt(2,1);rotation(2,2) = Rt(2,2);
  translation(0) = Rt(0,3);
  translation(1) = Rt(1,3);
  translation(2) = Rt(2,3);

  // Save the values in files
  if (cam == 1)
    {
      if (!fprintMatrix(rotation, "rotation_cam1.txt"))
	std::cout << "Error writing in rotation_cam1.txt" << std::endl;
      if (!fprintMatrix(translation, "translation_cam1.txt"))
	std::cout << "Error writing in rotation_cam1.txt" << std::endl;
      if(!fprintPT(pan,tilt, "PT_cam1.txt"))
	std::cout << "Error writing file PT_cam1.txt" << std::endl;
    } else if (cam == 2)
    {
      if (!fprintMatrix(rotation, "rotation_cam2.txt"))
	std::cout << "Error writing in rotation_cam2.txt" << std::endl;
      if (!fprintMatrix(translation, "translation_cam2.txt"))
	std::cout << "Error writing in rotation_cam2.txt" << std::endl;
      if(!fprintPT(pan,tilt, "PT_cam2.txt"))
	std::cout << "Error writing file PT_cam2.txt" << std::endl;
    }

  initial_rotation.noalias() = rotation;
  initial_translation.noalias() = translation;
  rotation_computed = true;

  // Destroy the window
  cv::destroyWindow("Extrinsic Image");
}
int main( ){

  mtsTaskManager* taskManager = mtsTaskManager::GetInstance();

  cmnLogger::SetMask( CMN_LOG_ALLOW_ALL );
  cmnLogger::SetMaskFunction( CMN_LOG_ALLOW_ALL );
  cmnLogger::SetMaskDefaultLog( CMN_LOG_ALLOW_ALL );

  // Create the OSG world
  osg::ref_ptr< osaOSGWorld > world = new osaOSGWorld;

  // Create OSG camera
  int x = 0, y = 0;
  int width = 640, height = 480;
  double Znear = 0.1, Zfar = 10.0;

  osg::Node::NodeMask maskleft  = 0x01;
  osg::Node::NodeMask maskright = 0x02;

  mtsOSGStereo* camera;
  camera = new mtsOSGStereo( "camera",
			     world,
			     x, y,
			     width, height,
			     55.0, ((double)width)/((double)height),
			     Znear, Zfar,
			     0.1 );
  camera->setCullMask( maskleft, osaOSGStereo::LEFT );
  camera->setCullMask( maskright, osaOSGStereo::RIGHT );
  taskManager->AddComponent( camera );

  // create the hubble motion
  HubbleMotion hmotion;
  taskManager->AddComponent( &hmotion );


  // create hubble
  cmnPath path;
  path.AddRelativeToCisstShare("/models/hubble");
  path.AddRelativeToCisstShare("/movies");

  vctFrame4x4<double> Rt( vctMatrixRotation3<double>(),
			  vctFixedSizeVector<double,3>( 0.0, 0.0, 0.5 ) );
  mtsOSGBody* hubble;
  hubble = new mtsOSGBody( "hubble", path.Find("hst.3ds"), world, Rt, 1.0, .5 );
  taskManager->AddComponent( hubble );


  // connect the motion to hubble
  taskManager->Connect( hubble->GetName(), "Input",
  			hmotion.GetName(), "Output" );


  // start the components
  taskManager->CreateAll();
  taskManager->WaitForStateAll( mtsComponentState::READY );
  taskManager->StartAll();
  taskManager->WaitForStateAll( mtsComponentState::ACTIVE );


  // Start the svl stuff
  svlInitialize();

  // Creating SVL objects
  svlStreamManager streamleft;
  svlFilterSourceVideoFile sourceleft(1);
  svlOSGImage imageleft( -0.5, -0.5, 1, 1, world );

  // Configure the filters
  sourceleft.SetFilePath( path.Find( "left.mpg" ) );
  imageleft.setNodeMask( maskleft );

  streamleft.SetSourceFilter( &sourceleft );
  sourceleft.GetOutput()->Connect( imageleft.GetInput() );

  svlStreamManager streamright;
  svlFilterSourceVideoFile sourceright(1);
  svlOSGImage imageright( -0.5, -0.5, 1, 1, world );

  sourceright.SetFilePath( path.Find( "right.mpg" ) );
  imageright.setNodeMask( maskright );

  streamright.SetSourceFilter( &sourceright );
  sourceright.GetOutput()->Connect( imageright.GetInput() );

  // start the streams
  if (streamleft.Play() != SVL_OK)
    { std::cerr << "Cannot start left stream." <<std::endl; }

  if (streamright.Play() != SVL_OK)
    { std::cerr << "Cannot start right stream." <<std::endl; }

  std::cout << "ENTER to exit." << std::endl;
  cmnGetChar();
  cmnGetChar();

  streamleft.Release();
  streamright.Release();

  taskManager->KillAll();
  taskManager->Cleanup();

  return 0;

}