예제 #1
0
void ChompOptimizer::getJacobian(int trajectory_point,
                                 Eigen::Vector3d& collision_point_pos,
                                 std::string& joint_name,
                                 Eigen::MatrixBase<Derived>& jacobian) const
{
  for(int j = 0; j < num_joints_; j++)
  {
    if(isParent(joint_name, joint_names_[j]))
    {
      Eigen::Vector3d column = joint_axes_[trajectory_point][j].cross(Eigen::Vector3d(collision_point_pos(0),
                                                                                      collision_point_pos(1),
                                                                                      collision_point_pos(2))
                                                                      - joint_positions_[trajectory_point][j]);

      jacobian.col(j)[0] = column.x();
      jacobian.col(j)[1] = column.y();
      jacobian.col(j)[2] = column.z();
    }
    else
    {
      jacobian.col(j)[0] = 0.0;
      jacobian.col(j)[1] = 0.0;
      jacobian.col(j)[2] = 0.0;
    }
  }
}
예제 #2
0
파일: CameraModel.cpp 프로젝트: obiou/FPL
         bool CameraModel::_project_intrinsic( const Eigen::MatrixBase<Derived1>& mP3D,
                                               Eigen::MatrixBase<Derived2>& mP2D,
                                               Eigen::MatrixBase<Derived3>& mdP2DI
                                               ) const { 
     if( !project( mP3D, mP2D ) ) {
         return false;
     }
     const unsigned int nNumPoints = mP3D.cols();
     const unsigned int nNumCamParams = get_num_parameters();
     // Compute finite-diff Jacobians
     Derived3 mdP2Dfd( 2, nNumPoints );
     Derived3 mP2Dfd( 2, nNumPoints );
     double dEps = 1e-4;
     Derived2 mP2DP = mP2D;
     Derived2 mP2DM = mP2D;
     { 
         // Compute intrinsic Jacobian
         // Horrible const_cast ... difficult to avoid
         for( size_t kk=0; kk<nNumCamParams; kk++ ) {
             std::string sVarName = parameter_index_to_name( kk );
             double dVal = get<double>( sVarName );
             const_cast<CameraModel*>( this )->set<double>( sVarName, dVal + dEps );
             project( mP3D, mP2DP );
             const_cast<CameraModel*>( this )->set<double>( sVarName, dVal - dEps );
             project( mP3D, mP2DM );
             mdP2Dfd = ( mP2DP - mP2DM ) / (2*dEps);
             const_cast<CameraModel*>( this )->set<double>( sVarName, dVal );
             for( size_t ll=0; ll<nNumPoints; ll++ ) {
                 mdP2DI.col( nNumCamParams*ll + kk ) = mdP2Dfd.col( ll );
             }
         }
     }
     return true;
 }
예제 #3
0
파일: CameraModel.cpp 프로젝트: obiou/FPL
         bool CameraModel::_project_extrinsic( const Eigen::MatrixBase<Derived1>& mP3D,
                                               Eigen::MatrixBase<Derived2>& mP2D,
                                               Eigen::MatrixBase<Derived3>& mdP2DE
                                               ) const {
     if( !project( mP3D, mP2D ) ) {
         return false;
     }
     const unsigned int nNum3DDims = 3;
     const unsigned int nNumPoints = mP3D.cols();
     // Compute finite-diff Jacobians
     Derived3 mdP2Dfd( 2, nNumPoints );
     Derived3 mP2Dfd( 2, nNumPoints );
     typename Eigen::internal::traits<Derived1>::Scalar dEps = 1e-4;
     Derived2 mP2DP = mP2D;
     Derived2 mP2DM = mP2D;
     { 
         // Compute extrinsic Jacobian
         for( size_t kk=0; kk<nNum3DDims; kk++ ) {
             Derived1 mP3DP = mP3D;
             Derived1 mP3DM = mP3D;
             mP3DP.row( kk ).array() += dEps;
             mP3DM.row( kk ).array() -= dEps;
             project( mP3DP, mP2DP );
             project( mP3DM, mP2DM );
             Derived2 mdP2Dfd = ( mP2DP - mP2DM ) / (2*dEps);
             for( size_t ll=0; ll<nNumPoints; ll++ ) {
                 mdP2DE.col( nNum3DDims*ll + kk ) = mdP2Dfd.col( ll );
             }
         }
     }
     return true;
 }
예제 #4
0
inline void makeCoordinateSystem(Eigen::MatrixBase<Derived>& v1,
                                 Eigen::MatrixBase<DerivedOther1>& v2,
                                 Eigen::MatrixBase<DerivedOther2>& v3)
{
    EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived, 3);
    EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(DerivedOther1, 3);
    EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(DerivedOther2, 3);
    /*ASSERTMSG(  v1.rows() == 3 && v1.cols()==1 &&
               v2.rows() == 3 && v2.cols() == 1 &&
               v3.rows() == 3 && v3.cols() == 1
               , "IN: "<< v1.rows()<<","<<v1.cols()<<" OUT: "<<
       v2.rows()<<","<<v2.cols()<<"/"<<v3.rows()<<","<<v3.cols() );*/

    typedef typename Derived::Scalar PREC;

    using std::abs;
    using std::sqrt;

    v1.normalize();

    if (abs(v1(0)) > abs(v1(2)))
    {
        PREC invLen = 1.0 / sqrt(v1(0) * v1(0) + v1(2) * v1(2));
        v2          = typename MyMatrix<double>::Vector3(-v1(2) * invLen, 0, v1(0) * invLen);
    }
    else
    {
        PREC invLen = 1.0 / sqrt(v1(1) * v1(1) + v1(2) * v1(2));
        v2          = typename MyMatrix<double>::Vector3(0, v1(2) * invLen, -v1(1) * invLen);
    }
    v3 = v1.cross(v2);

    v2.normalize();
    v3.normalize();
};
예제 #5
0
inline Eigen::Matrix<typename Eigen::internal::traits<Derived_T>::Scalar, 3, 3> crossMx(
    Eigen::MatrixBase<Derived_T> const & v)
{
  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Eigen::MatrixBase<Derived_T>, 3);
  assert((v.cols()==3 && v.rows()==1)||(v.rows()==3 && v.cols()==1));
  return crossMx(v(0, 0), v(1, 0), v(2, 0));
}
예제 #6
0
IGL_INLINE void igl::per_face_normals(
  const Eigen::MatrixBase<DerivedV>& V,
  const Eigen::MatrixBase<DerivedF>& F,
  const Eigen::MatrixBase<DerivedZ> & Z,
  Eigen::PlainObjectBase<DerivedN> & N)
{
  N.resize(F.rows(),3);
  // loop over faces
  int Frows = F.rows();
#pragma omp parallel for if (Frows>10000)
  for(int i = 0; i < Frows;i++)
  {
    const Eigen::Matrix<typename DerivedV::Scalar, 1, 3> v1 = V.row(F(i,1)) - V.row(F(i,0));
    const Eigen::Matrix<typename DerivedV::Scalar, 1, 3> v2 = V.row(F(i,2)) - V.row(F(i,0));
    N.row(i) = v1.cross(v2);//.normalized();
    typename DerivedV::Scalar r = N.row(i).norm();
    if(r == 0)
    {
      N.row(i) = Z;
    }else
    {
      N.row(i) /= r;
    }
  }
}
예제 #7
0
typename Gradient<Eigen::Matrix<typename DerivedR::Scalar, RPY_SIZE, 1>, DerivedDR::ColsAtCompileTime>::type drotmat2rpy(
    const Eigen::MatrixBase<DerivedR>& R,
    const Eigen::MatrixBase<DerivedDR>& dR)
{
  EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Eigen::MatrixBase<DerivedR>, SPACE_DIMENSION, SPACE_DIMENSION);
  EIGEN_STATIC_ASSERT(Eigen::MatrixBase<DerivedDR>::RowsAtCompileTime == RotmatSize, THIS_METHOD_IS_ONLY_FOR_MATRICES_OF_A_SPECIFIC_SIZE);

  const int nq = dR.cols();
  typedef typename DerivedR::Scalar Scalar;
  typedef typename Gradient<Eigen::Matrix<Scalar, RPY_SIZE, 1>, DerivedDR::ColsAtCompileTime>::type ReturnType;
  ReturnType drpy(RPY_SIZE, nq);

  auto dR11_dq = getSubMatrixGradient(dR, 0, 0, R.rows());
  auto dR21_dq = getSubMatrixGradient(dR, 1, 0, R.rows());
  auto dR31_dq = getSubMatrixGradient(dR, 2, 0, R.rows());
  auto dR32_dq = getSubMatrixGradient(dR, 2, 1, R.rows());
  auto dR33_dq = getSubMatrixGradient(dR, 2, 2, R.rows());

  Scalar sqterm = R(2,1) * R(2,1) + R(2,2) * R(2,2);

  using namespace std;
  // droll_dq
  drpy.row(0) = (R(2, 2) * dR32_dq - R(2, 1) * dR33_dq) / sqterm;

  // dpitch_dq
  Scalar sqrt_sqterm = sqrt(sqterm);
  drpy.row(1) = (-sqrt_sqterm * dR31_dq + R(2, 0) / sqrt_sqterm * (R(2, 1) * dR32_dq + R(2, 2) * dR33_dq)) / (R(2, 0) * R(2, 0) + R(2, 1) * R(2, 1) + R(2, 2) * R(2, 2));

  // dyaw_dq
  sqterm = R(0, 0) * R(0, 0) + R(1, 0) * R(1, 0);
  drpy.row(2) = (R(0, 0) * dR21_dq - R(1, 0) * dR11_dq) / sqterm;
  return drpy;
}
예제 #8
0
 void jacobian(const double t, const std::vector<double>& y,
               Eigen::MatrixBase<Derived1>& dy_dt,
               Eigen::MatrixBase<Derived2>& Jy,
               Eigen::MatrixBase<Derived2>& Jtheta) const {
   using Eigen::Dynamic;
   using Eigen::Map;
   using Eigen::Matrix;
   using Eigen::RowVectorXd;
   using stan::math::var;
   using std::vector;
   vector<double> grad(y.size() + theta_.size());
   Map<RowVectorXd> grad_eig(&grad[0], y.size() + theta_.size());
   try {
     start_nested();
     vector<var> y_var(y.begin(), y.end());
     vector<var> theta_var(theta_.begin(), theta_.end());
     vector<var> z_var;
     z_var.reserve(y.size() + theta_.size());
     z_var.insert(z_var.end(), y_var.begin(), y_var.end());
     z_var.insert(z_var.end(), theta_var.begin(), theta_var.end());
     vector<var> dy_dt_var = f_(t, y_var, theta_var, x_, x_int_, msgs_);
     for (size_t i = 0; i < dy_dt_var.size(); ++i) {
       dy_dt(i) = dy_dt_var[i].val();
       set_zero_all_adjoints_nested();
       dy_dt_var[i].grad(z_var, grad);
       Jy.row(i) = grad_eig.leftCols(y.size());
       Jtheta.row(i) = grad_eig.rightCols(theta_.size());
     }
   } catch (const std::exception& e) {
     recover_memory_nested();
     throw;
   }
   stan::math::recover_memory_nested();
 }
예제 #9
0
void MSF_MeasurementBase<EKFState_T>::calculateAndApplyCorrection(
    shared_ptr<EKFState_T> state, MSF_Core<EKFState_T>& core,
    const Eigen::MatrixBase<H_type>& H_delayed,
    const Eigen::MatrixBase<Res_type> & res_delayed,
    const Eigen::MatrixBase<R_type>& R_delayed) {

  EIGEN_STATIC_ASSERT_FIXED_SIZE (H_type);
  EIGEN_STATIC_ASSERT_FIXED_SIZE (R_type);

  // Get measurements.
  /// Correction from EKF update.
  Eigen::Matrix<double, MSF_Core<EKFState_T>::nErrorStatesAtCompileTime, 1> correction_;

  R_type S;
  Eigen::Matrix<double, MSF_Core<EKFState_T>::nErrorStatesAtCompileTime,
      R_type::RowsAtCompileTime> K;
  typename MSF_Core<EKFState_T>::ErrorStateCov & P = state->P;

  S = H_delayed * P * H_delayed.transpose() + R_delayed;
  K = P * H_delayed.transpose() * S.inverse();

  correction_ = K * res_delayed;
  const typename MSF_Core<EKFState_T>::ErrorStateCov KH =
      (MSF_Core<EKFState_T>::ErrorStateCov::Identity() - K * H_delayed);
  P = KH * P * KH.transpose() + K * R_delayed * K.transpose();

  // Make sure P stays symmetric.
  P = 0.5 * (P + P.transpose());

  core.applyCorrection(state, correction_);
}
예제 #10
0
IGL_INLINE void igl::internal_angles_using_edge_lengths(
  const Eigen::MatrixBase<DerivedL>& L,
  Eigen::PlainObjectBase<DerivedK> & K)
{
  // Note:
  //   Usage of internal_angles_using_squared_edge_lengths() is preferred to internal_angles_using_squared_edge_lengths()
  //   This function is deprecated and probably will be removed in future versions
  typedef typename DerivedL::Index Index;
  assert(L.cols() == 3 && "Edge-lengths should come from triangles");
  const Index m = L.rows();
  K.resize(m,3);
  parallel_for(
    m,
    [&L,&K](const Index f)
    {
      for(size_t d = 0;d<3;d++)
      {
        const auto & s1 = L(f,d);
        const auto & s2 = L(f,(d+1)%3);
        const auto & s3 = L(f,(d+2)%3);
        K(f,d) = acos((s3*s3 + s2*s2 - s1*s1)/(2.*s3*s2));
      }
    },
    1000l);
}
예제 #11
0
IGL_INLINE void igl::project_to_line_segment(
  const Eigen::MatrixBase<DerivedP> & P,
  const Eigen::MatrixBase<DerivedS> & S,
  const Eigen::MatrixBase<DerivedD> & D,
  Eigen::PlainObjectBase<Derivedt> & t,
  Eigen::PlainObjectBase<DerivedsqrD> & sqrD)
{
  project_to_line(P,S,D,t,sqrD);
  const int np = P.rows();
  // loop over points and fix those that projected beyond endpoints
#pragma omp parallel for if (np>10000)
  for(int p = 0;p<np;p++)
  {
    const DerivedP Pp = P.row(p);
    if(t(p)<0)
    {
      sqrD(p) = (Pp-S).squaredNorm();
      t(p) = 0;
    }else if(t(p)>1)
    {
      sqrD(p) = (Pp-D).squaredNorm();
      t(p) = 1;
    }
  }
}
예제 #12
0
파일: hessian.cpp 프로젝트: dukecyto/libigl
IGL_INLINE void igl::hessian(
                             const Eigen::MatrixBase<DerivedV> & V,
                             const Eigen::MatrixBase<DerivedF> & F,
                             Eigen::SparseMatrix<Scalar>& H)
{
    typedef typename DerivedV::Scalar denseScalar;
    typedef typename Eigen::Matrix<denseScalar, Eigen::Dynamic, 1> VecXd;
    typedef typename Eigen::SparseMatrix<Scalar> SparseMat;
    typedef typename Eigen::DiagonalMatrix
                       <Scalar, Eigen::Dynamic, Eigen::Dynamic> DiagMat;
    
    int dim = V.cols();
    assert((dim==2 || dim==3) &&
           "The dimension of the vertices should be 2 or 3");
    
    //Construct the combined gradient matric
    SparseMat G;
    igl::grad(Eigen::PlainObjectBase<DerivedV>(V),
              Eigen::PlainObjectBase<DerivedF>(F),
              G, false);
    SparseMat GG(F.rows(), dim*V.rows());
    GG.reserve(G.nonZeros());
    for(int i=0; i<dim; ++i)
        GG.middleCols(i*G.cols(),G.cols()) = G.middleRows(i*F.rows(),F.rows());
    SparseMat D;
    igl::repdiag(GG,dim,D);
    
    //Compute area matrix
    VecXd areas;
    igl::doublearea(V, F, areas);
    DiagMat A = (0.5*areas).replicate(dim,1).asDiagonal();
    
    //Compute FEM Hessian
    H = D.transpose()*A*G;
}
예제 #13
0
bool checkRange(const Eigen::MatrixBase<DerivedA>& mm,
  typename DerivedA::Scalar minVal, typename DerivedA::Scalar maxVal)
{
    assert(minVal < maxVal);

    // Go through each element in the matrix and ensure they are not
    // NaN and fall within the specified min and max values.
    for (int ii = 0; ii < mm.rows(); ii++)
    {
        for (int jj = 0; jj < mm.cols(); jj++)
        {
            if (std::isnan(mm(ii, jj)))
            {
                std::stringstream ss;
                ss << "NaN detected at index (" << ii << ", " << jj << "), returning false!";
                ROS_WARN_STREAM(ss.str());
                return false;
            }
            if (mm(ii, jj) > maxVal || mm(ii, jj) < minVal)
            {
                std::stringstream ss;
                ss << "Value of out range at index (" << ii << ", " << jj << "), returning false.\n"
                     " - value = " << mm(ii, jj) << "\n"
                     " - minVal = " << minVal << "\n"
                     " - maxVal = " << maxVal;
                ROS_WARN_STREAM(ss.str());
                return false;
            }
        }
    }

    return true;
}
예제 #14
0
void SLHA_io::set_block_imag(const std::string& name,
                             const Eigen::MatrixBase<Derived>& matrix,
                             const std::string& symbol, double scale)
{
   std::ostringstream ss;
   ss << "Block " << name;
   if (scale != 0.)
      ss << " Q= " << FORMAT_SCALE(scale);
   ss << '\n';

   const int rows = matrix.rows();
   const int cols = matrix.cols();
   for (int i = 1; i <= rows; ++i) {
      if (cols == 1) {
         ss << boost::format(vector_formatter) % i % Im(matrix(i-1,0))
            % ("Im(" + symbol + "(" + ToString(i) + "))");
      } else {
         for (int k = 1; k <= cols; ++k) {
            ss << boost::format(mixing_matrix_formatter) % i % k % Im(matrix(i-1,k-1))
               % ("Im(" + symbol + "(" + ToString(i) + "," + ToString(k) + "))");
         }
      }
   }

   set_block(ss);
}
예제 #15
0
double SLHA_io::read_vector(const std::string& block_name, Eigen::MatrixBase<Derived>& vector) const
{
   if (vector.cols() != 1) throw SetupError("Vector has more than 1 column");

   auto block = data.find(data.cbegin(), data.cend(), block_name);

   const int rows = vector.rows();
   double scale = 0.;

   while (block != data.cend()) {
      for (const auto& line: *block) {
         if (!line.is_data_line()) {
            // read scale from block definition
            if (line.size() > 3 &&
                to_lower(line[0]) == "block" && line[2] == "Q=")
               scale = convert_to<double>(line[3]);
            continue;
         }

         if (line.size() >= 2) {
            const int i = convert_to<int>(line[0]) - 1;
            if (0 <= i && i < rows) {
               const double value = convert_to<double>(line[1]);
               vector(i,0) = value;
            }
         }
      }

      ++block;
      block = data.find(block, data.cend(), block_name);
   }

   return scale;
}
예제 #16
0
	MatrixSuccessCode LUPartialPivotDecompositionSuccessful(Eigen::MatrixBase<Derived> const& LU)
	{
		#ifndef BERTINI_DISABLE_ASSERTS
			assert(LU.rows()==LU.cols() && "non-square matrix in LUPartialPivotDecompositionSuccessful");
			assert(LU.rows()>0 && "empty matrix in LUPartialPivotDecompositionSuccessful");
		#endif

			// this loop won't test entry (0,0).  it's tested separately after.
		for (unsigned int ii = LU.rows()-1; ii > 0; ii--)
		{
			if (IsSmallValue(LU(ii,ii))) 
			{
				return MatrixSuccessCode::SmallValue;
			}

			if (IsLargeChange(LU(ii-1,ii-1),LU(ii,ii)))
			{
				return MatrixSuccessCode::LargeChange;
			}
		}

		// this line is the reason for the above assert on non-empty matrix.
		if (IsSmallValue(LU(0,0))) 
		{
			return MatrixSuccessCode::SmallValue;
		}

		return MatrixSuccessCode::Success;
	}
예제 #17
0
IGL_INLINE void igl::vertex_triangle_adjacency(
  const Eigen::MatrixBase<DerivedF> & F,
  const int n,
  Eigen::PlainObjectBase<DerivedVF> & VF,
  Eigen::PlainObjectBase<DerivedNI> & NI)
{
  typedef Eigen::Matrix<typename DerivedVF::Scalar,Eigen::Dynamic,1> VectorXI;
  // vfd  #V list so that vfd(i) contains the vertex-face degree (number of
  // faces incident on vertex i)
  VectorXI vfd = VectorXI::Zero(n);
  for (int i = 0; i < F.rows(); i++)
  {
    for (int j = 0; j < 3; j++)
    {
      vfd[F(i,j)]++;
    }
  }
  igl::cumsum(vfd,1,NI);
  // Prepend a zero
  NI = (DerivedNI(n+1)<<0,NI).finished();
  // vfd now acts as a counter
  vfd = NI;

  VF.derived()= Eigen::VectorXi(3*F.rows());
  for (int i = 0; i < F.rows(); i++)
  {
    for (int j = 0; j < 3; j++)
    {
      VF[vfd[F(i,j)]] = i;
      vfd[F(i,j)]++;
    }
  }
}
예제 #18
0
	void Precision(Eigen::MatrixBase<Derived> & v, unsigned prec)
	{
		using bertini::Precision;
		
		for (int ii=0; ii<v.rows(); ++ii)
			for (int jj=0; jj<v.cols(); ++jj)
				Precision(v(ii,jj),prec);
	}
예제 #19
0
 void ErrorTermFs<C>::setInvR(const Eigen::MatrixBase<DERIVED>& invR)
 {
   SM_ASSERT_EQ(Exception, invR.rows(), invR.cols(), "The covariance matrix must be square");
   SM_ASSERT_EQ(Exception, invR.rows(), (int)dimension(), "The covariance matrix does not match the size of the error");
   // http://eigen.tuxfamily.org/dox-devel/classEigen_1_1LDLT.html#details
   // LDLT seems to work on positive semidefinite matrices.
   sm::eigen::computeMatrixSqrt(invR, _sqrtInvR);
 }
예제 #20
0
 template<typename Rhs> inline const Eigen::internal::solve_retval<MyJacobiPreconditioner, Rhs>
 solve(const Eigen::MatrixBase<Rhs>& b) const
 {
   eigen_assert(m_isInitialized && "MyJacobiPreconditioner is not initialized.");
   eigen_assert(m_invdiag.size()==b.rows()
             && "MyJacobiPreconditioner::solve(): invalid number of rows of the right hand side matrix b");
   return Eigen::internal::solve_retval<MyJacobiPreconditioner, Rhs>(*this, b.derived());
 }
void assert_size(const Eigen::MatrixBase<Derived> &X, int size_expected)
{
    common_assert_msg_ex(
        X.size() == size_expected,
        "matrix size mismatch" << std::endl << 
            "actual: " << X.size() << std::endl << 
            "expected: " << size_expected,
        eigen_utilities::assert_error);
}
void assert_size(const Eigen::MatrixBase<Derived> &X, int rows_expected, int cols_expected)
{
    common_assert_msg_ex(
            X.rows() == rows_expected && X.cols() == cols_expected,
            "matrix [row, col] mismatch" << std::endl << 
                "actual: [" << X.rows() << ", " << X.cols() << "]" << std::endl << 
                "expected: [" << rows_expected << ", " << cols_expected << "]",
            eigen_utilities::assert_error);
}
예제 #23
0
bool allGreaterEquals(const Eigen::MatrixBase<DerivedA> & A,
                      const Eigen::MatrixBase<DerivedB> & B,
                      double tolerance = math::NUMERICAL_ZERO_DIFFERENCE)
{
  assertion(A.rows() == B.rows(), "Matrices with different number of rows can't be compared.");
  assertion(A.cols() == B.cols(), "Matrices with different number of cols can't be compared.");
  
  return ((A-B).array() >= tolerance).all();
}
예제 #24
0
파일: EigsGen.cpp 프로젝트: cran/rARPACK
SEXP returnRealIfPossible(Eigen::MatrixBase<Derived> &matvec)
{
    SEXP result;
    if(matvec.imag().isZero())
        result = wrap(matvec.real());
    else
        result = wrap(matvec);
    
    return result;
}
예제 #25
0
 inline void
 computeRNEADerivatives(const ModelTpl<Scalar,Options,JointCollectionTpl> & model,
                        DataTpl<Scalar,Options,JointCollectionTpl> & data,
                        const Eigen::MatrixBase<ConfigVectorType> & q,
                        const Eigen::MatrixBase<TangentVectorType1> & v,
                        const Eigen::MatrixBase<TangentVectorType2> & a)
 {
   computeRNEADerivatives(model,data,q.derived(),v.derived(),a.derived(),
                          data.dtau_dq, data.dtau_dv, data.M);
 }
예제 #26
0
파일: ba81quad.cpp 프로젝트: OpenMx/OpenMx
void ba81NormalQuad::layer::detectTwoTier(Eigen::ArrayBase<T1> &param,
					  Eigen::MatrixBase<T2> &mean, Eigen::MatrixBase<T3> &cov)
{
	if (mean.rows() < 3) return;

	std::vector<int> orthogonal;

	Eigen::Matrix<Eigen::DenseIndex, Eigen::Dynamic, 1>
		numCov((cov.array() != 0.0).matrix().colwise().count());
	std::vector<int> candidate;
	for (int fx=0; fx < numCov.rows(); ++fx) {
		if (numCov(fx) == 1) candidate.push_back(fx);
	}
	if (candidate.size() > 1) {
		std::vector<bool> mask(numItems());
		for (int cx=candidate.size() - 1; cx >= 0; --cx) {
			std::vector<bool> loading(numItems());
			for (int ix=0; ix < numItems(); ++ix) {
				loading[ix] = param(candidate[cx], itemsMap[ix]) != 0;
			}
			std::vector<bool> overlap(loading.size());
			std::transform(loading.begin(), loading.end(),
				       mask.begin(), overlap.begin(),
				       std::logical_and<bool>());
			if (std::find(overlap.begin(), overlap.end(), true) == overlap.end()) {
				std::transform(loading.begin(), loading.end(),
					       mask.begin(), mask.begin(),
					       std::logical_or<bool>());
				orthogonal.push_back(candidate[cx]);
			}
		}
	}
	std::reverse(orthogonal.begin(), orthogonal.end());

	if (orthogonal.size() == 1) orthogonal.clear();
	if (orthogonal.size() && orthogonal[0] != mean.rows() - int(orthogonal.size())) {
		mxThrow("Independent specific factors must be given after general dense factors");
	}

	numSpecific = orthogonal.size();

	if (numSpecific) {
		Sgroup.assign(numItems(), 0);
		for (int ix=0; ix < numItems(); ix++) {
			for (int dx=orthogonal[0]; dx < mean.rows(); ++dx) {
				if (param(dx, itemsMap[ix]) != 0) {
					Sgroup[ix] = dx - orthogonal[0];
					continue;
				}
			}
		}
		//Eigen::Map< Eigen::ArrayXi > foo(Sgroup.data(), param.cols());
		//mxPrintMat("sgroup", foo);
	}
}
예제 #27
0
파일: LSTM.hpp 프로젝트: yehaibuaa/LSTMRNN
void LSTM::compute_intermediate(const Eigen::MatrixBase<Derived> &w_mat)
{

	for(int i = 0; i < intermediate.cols(); i++)
	{
		if( input(i) != -1 )
			intermediate.col(i) = w_mat.col( input(i) );
		else
			intermediate.col(i) = w_mat.col(0);
	}
}
예제 #28
0
        friend typename Eigen::Matrix <typename Eigen::MatrixBase<D>::Scalar, 3, -1>
        operator*( const ConstraintTranspose &, const Eigen::MatrixBase<D> & F )
        {
          typedef Eigen::Matrix<typename Eigen::MatrixBase<D>::Scalar, 3, -1> MatrixReturnType;
          assert(F.rows()==6);

          MatrixReturnType result (3, F.cols ());
          result.template topRows <2> () = F.template topRows <2> ();
          result.template bottomRows <1> () = F.template bottomRows <1> ();
          return result;
        }
예제 #29
0
IGL_INLINE void igl::per_face_normals_stable(
  const Eigen::MatrixBase<DerivedV>& V,
  const Eigen::MatrixBase<DerivedF>& F,
  Eigen::PlainObjectBase<DerivedN> & N)
{
  using namespace Eigen;
  typedef Matrix<typename DerivedV::Scalar,1,3> RowVectorV3;
  typedef typename DerivedV::Scalar Scalar;

  const size_t m = F.rows();

  N.resize(F.rows(),3);
  // Grad all points
  for(size_t f = 0;f<m;f++)
  {
    const RowVectorV3 p0 = V.row(F(f,0));
    const RowVectorV3 p1 = V.row(F(f,1));
    const RowVectorV3 p2 = V.row(F(f,2));
    const RowVectorV3 n0 = (p1 - p0).cross(p2 - p0);
    const RowVectorV3 n1 = (p2 - p1).cross(p0 - p1);
    const RowVectorV3 n2 = (p0 - p2).cross(p1 - p2);

    // careful sum
    for(int d = 0;d<3;d++)
    {
      // This is a little _silly_ in terms of complexity, but its recursive
      // implementation is clean looking...
      const std::function<Scalar(Scalar,Scalar,Scalar)> sum3 =
        [&sum3](Scalar a, Scalar b, Scalar c)->Scalar
      {
        if(fabs(c)>fabs(a))
        {
          return sum3(c,b,a);
        }
        // c < a
        if(fabs(c)>fabs(b))
        {
          return sum3(a,c,b);
        }
        // c < a, c < b
        if(fabs(b)>fabs(a))
        {
          return sum3(b,a,c);
        }
        return (a+b)+c;
      };

      N(f,d) = sum3(n0(d),n1(d),n2(d));
    }
    // sum better not be sure, or else NaN
    N.row(f) /= N.row(f).norm();
  }

}
예제 #30
0
	unsigned Precision(Eigen::MatrixBase<Derived> const & v)
	{
		auto a = Precision(v(0,0));
		for (int ii=0; ii<v.rows(); ++ii)
			for (int jj=0; jj<v.cols(); ++jj)
			{
				auto b = Precision(v(ii,jj));
				assert (a==b);
				a = b;
			}
		return a;
	}