コード例 #1
0
std::vector<double>
unit_vector_grad(Eigen::Matrix<double,Eigen::Dynamic,1>& y_dbl,
                 int k) {
  using Eigen::Matrix;
  using Eigen::Dynamic;
  using stan::math::var;
  Matrix<var,Dynamic,1> y(y_dbl.size());
  for (int i = 0; i < y.size(); ++i)
    y(i) = y_dbl(i);

  std::vector<var> x(y.size());
  for (size_t i = 0; i < x.size(); ++i)
    x[i] = y(i);

  var fx_k = stan::math::unit_vector_constrain(y)[k];
  std::vector<double> grad(y.size());
  fx_k.grad(x,grad);
  return grad;
}
コード例 #2
0
ファイル: softmax_test.cpp プロジェクト: javaosos/stan
// compute grad using templated definition in math
// to check custom derivatives
std::vector<double>
softmax_grad(Eigen::Matrix<double,Eigen::Dynamic,1>& alpha_dbl,
             int k) {
  using Eigen::Matrix;
  using Eigen::Dynamic;
  using stan::agrad::var;
  Matrix<var,Dynamic,1> alpha(alpha_dbl.size());
  for (int i = 0; i < alpha.size(); ++i)
    alpha(i) = alpha_dbl(i);

  std::vector<var> x(alpha.size());
  for (size_t i = 0; i < x.size(); ++i)
    x[i] = alpha(i);
  
  var fx_k = stan::math::softmax(alpha)[k];
  std::vector<double> grad(alpha.size());
  fx_k.grad(x,grad);
  return grad;
}
コード例 #3
0
ファイル: log_determinant_spd.hpp プロジェクト: javaosos/stan
    inline var log_determinant_spd(const Eigen::Matrix<var,R,C>& m) {
      using stan::math::domain_error;
      using Eigen::Matrix;

      math::check_square("log_determinant_spd", "m", m);

      Matrix<double,R,C> m_d(m.rows(),m.cols());
      for (int i = 0; i < m.size(); ++i)
        m_d(i) = m(i).val();

      Eigen::LDLT<Matrix<double,R,C> > ldlt(m_d);
      if (ldlt.info() != Eigen::Success) {
        double y = 0;
        domain_error("log_determinant_spd",
                "matrix argument", y,
                "failed LDLT factorization");
      }

       // compute the inverse of A (needed for the derivative)
      m_d.setIdentity(m.rows(), m.cols());
      ldlt.solveInPlace(m_d);
          
      if (ldlt.isNegative() || (ldlt.vectorD().array() <= 1e-16).any()) {
        double y = 0;
        domain_error("log_determinant_spd",
                "matrix argument", y,
                "matrix is negative definite");
      }

      double val = ldlt.vectorD().array().log().sum();

      if (!boost::math::isfinite(val)) {
        double y = 0;
        domain_error("log_determinant_spd",
                "matrix argument", y,
                "log determininant is infinite");
      }

      vari** operands = ChainableStack::memalloc_
        .alloc_array<vari*>(m.size());
      for (int i = 0; i < m.size(); ++i)
        operands[i] = m(i).vi_;

      double* gradients = ChainableStack::memalloc_
        .alloc_array<double>(m.size());
      for (int i = 0; i < m.size(); ++i)
        gradients[i] = m_d(i);

      return var(new precomputed_gradients_vari(val,m.size(),operands,gradients));
    }
コード例 #4
0
ファイル: simplex_free.hpp プロジェクト: stan-dev/math
    Eigen::Matrix<T, Eigen::Dynamic, 1>
    simplex_free(const Eigen::Matrix<T, Eigen::Dynamic, 1>& x) {
      using Eigen::Dynamic;
      using Eigen::Matrix;
      using std::log;

      typedef typename index_type<Matrix<T, Dynamic, 1> >::type size_type;

      check_simplex("stan::math::simplex_free",
                    "Simplex variable", x);
      int Km1 = x.size() - 1;
      Eigen::Matrix<T, Eigen::Dynamic, 1> y(Km1);
      T stick_len(x(Km1));
      for (size_type k = Km1; --k >= 0; ) {
        stick_len += x(k);
        T z_k(x(k) / stick_len);
        y(k) = logit(z_k) + log(Km1 - k);
        // note: log(Km1 - k) = logit(1.0 / (Km1 + 1 - k));
      }
      return y;
    }
コード例 #5
0
ファイル: positive_ordered_free.hpp プロジェクト: alyst/math
    Eigen::Matrix<T, Eigen::Dynamic, 1>
    positive_ordered_free(const Eigen::Matrix<T, Eigen::Dynamic, 1>& y) {
      using Eigen::Matrix;
      using Eigen::Dynamic;
      using stan::math::index_type;

      typedef typename index_type<Matrix<T, Dynamic, 1> >::type size_type;

      stan::math::check_positive_ordered("stan::math::positive_ordered_free",
                                                   "Positive ordered variable",
                                                   y);

      size_type k = y.size();
      Matrix<T, Dynamic, 1> x(k);
      if (k == 0)
        return x;
      x[0] = log(y[0]);
      for (size_type i = 1; i < k; ++i)
        x[i] = log(y[i] - y[i-1]);
      return x;
    }
コード例 #6
0
ファイル: print_ijv.cpp プロジェクト: azer89/BBW
IGL_INLINE void igl::print_ijv(
  const Eigen::SparseMatrix<T>& X,
  const int offset)
{
  Eigen::Matrix<int,Eigen::Dynamic,1> I;
  Eigen::Matrix<int,Eigen::Dynamic,1> J;
  Eigen::Matrix<T,Eigen::Dynamic,1> V;
  igl::find(X,I,J,V);
  // Concatenate I,J,V
  Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> IJV(I.size(),3);
  IJV.col(0) = I.cast<T>();
  IJV.col(1) = J.cast<T>();
  IJV.col(2) = V;
  // Offset
  if(offset != 0)
  {
    IJV.col(0).array() += offset;
    IJV.col(1).array() += offset;
  }
  std::cout<<IJV;
}
コード例 #7
0
ファイル: MitsubishiH7.cpp プロジェクト: roboticslibrary/rl
		void
		MitsubishiH7::setMotorPulse(const ::Eigen::Matrix< ::std::int32_t, ::Eigen::Dynamic, 1>& p)
		{
			assert(p.size() >= this->getDof());
			
			this->out.dat2.pls.p1 = 0;
			this->out.dat2.pls.p2 = 0;
			this->out.dat2.pls.p3 = 0;
			this->out.dat2.pls.p4 = 0;
			this->out.dat2.pls.p5 = 0;
			this->out.dat2.pls.p6 = 0;
			this->out.dat2.pls.p7 = 0;
			this->out.dat2.pls.p8 = 0;
			
			switch (this->getDof())
			{
			case 8:
				this->out.dat2.pls.p8 = p(7);
			case 7:
				this->out.dat2.pls.p7 = p(6);
			case 6:
				this->out.dat2.pls.p6 = p(5);
			case 5:
				this->out.dat2.pls.p5 = p(4);
			case 4:
				this->out.dat2.pls.p4 = p(3);
			case 3:
				this->out.dat2.pls.p3 = p(2);
			case 2:
				this->out.dat2.pls.p2 = p(1);
			case 1:
				this->out.dat2.pls.p1 = p(0);
			default:
				break;
			}
			
			this->out.command = MXT_COMMAND_MOVE;
			this->out.sendType = MXT_TYPE_PULSE;
		}
コード例 #8
0
ファイル: grad_hessian.hpp プロジェクト: stan-dev/math
 void
 grad_hessian(const F& f,
              const Eigen::Matrix<double, Eigen::Dynamic, 1>& x,
              double& fx,
              Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>& H,
              std::vector<Eigen::Matrix<double,
              Eigen::Dynamic, Eigen::Dynamic> >&
              grad_H) {
   using Eigen::Matrix;
   using Eigen::Dynamic;
   fx = f(x);
   int d = x.size();
   H.resize(d, d);
   grad_H.resize(d, Matrix<double, Dynamic, Dynamic>(d, d));
   try {
     for (int i = 0; i < d; ++i) {
       for (int j = i; j < d; ++j) {
         start_nested();
         Matrix<fvar<fvar<var> >, Dynamic, 1> x_ffvar(d);
         for (int k = 0; k < d; ++k)
           x_ffvar(k) = fvar<fvar<var> >(fvar<var>(x(k), i == k),
                                         fvar<var>(j == k, 0));
         fvar<fvar<var> > fx_ffvar = f(x_ffvar);
         H(i, j) = fx_ffvar.d_.d_.val();
         H(j, i) = H(i, j);
         grad(fx_ffvar.d_.d_.vi_);
         for (int k = 0; k < d; ++k) {
           grad_H[i](j, k) = x_ffvar(k).val_.val_.adj();
           grad_H[j](i, k) = grad_H[i](j, k);
         }
         recover_memory_nested();
       }
     }
   } catch (const std::exception& e) {
     recover_memory_nested();
     throw;
   }
 }
コード例 #9
0
ファイル: quad_form_diag.hpp プロジェクト: stan-dev/math
 inline Eigen::Matrix <
   typename boost::math::tools::promote_args<T1, T2>::type,
   Eigen::Dynamic, Eigen::Dynamic>
 quad_form_diag(const Eigen::Matrix<T1, Eigen::Dynamic, Eigen::Dynamic>& mat,
                const Eigen::Matrix<T2, R, C>& vec) {
   using boost::math::tools::promote_args;
   check_vector("quad_form_diag", "vec", vec);
   check_square("quad_form_diag", "mat", mat);
   int size = vec.size();
   check_equal("quad_form_diag", "matrix size", mat.rows(),
               size);
   Eigen::Matrix<typename promote_args<T1, T2>::type,
                 Eigen::Dynamic, Eigen::Dynamic> result(size, size);
   for (int i = 0; i < size; i++) {
     result(i, i) = vec(i)*vec(i)*mat(i, i);
     for (int j = i+1; j < size; ++j) {
       typename promote_args<T1, T2>::type temp = vec(i)*vec(j);
       result(j, i) = temp*mat(j, i);
       result(i, j) = temp*mat(i, j);
     }
   }
   return result;
 }
コード例 #10
0
ファイル: sort_indices_test.cpp プロジェクト: aseyboldt/math
void test_sort_indices_desc3(Eigen::Matrix<T,R,C> val) {

  typedef Eigen::Matrix<fvar<fvar<double> >,R,C> AVEC;
  
  const size_t size = val.size();

  AVEC x(size);
  for(size_t i=0U; i<size; i++)
    x.data()[i] = fvar<fvar<double> >(val[i]);
  
  std::vector<int> val_sorted = sort_indices_desc(val);
  std::vector<int> x_sorted = sort_indices_desc(x);
  
  for(size_t i=0U; i<size; i++)
    EXPECT_EQ(val_sorted.data()[i],x_sorted.data()[i]);

  for(size_t i=0U; i<size; i++)
    for(size_t j=0U; j<size; j++)
      if(val_sorted.data()[i] == val.data()[j])
        EXPECT_EQ(x_sorted.data()[i],x.data()[j]);
      else
        EXPECT_FALSE(x_sorted.data()[i]==x.data()[j]);
}
コード例 #11
0
ファイル: check_simplex.hpp プロジェクト: alyst/math
    bool check_simplex(const char* function,
                       const char* name,
                       const Eigen::Matrix<T_prob, Eigen::Dynamic, 1>& theta) {
      using Eigen::Dynamic;
      using Eigen::Matrix;
      using stan::math::index_type;

      typedef typename index_type<Matrix<T_prob, Dynamic, 1> >::type size_t;

      check_nonzero_size(function, name, theta);
      if (!(fabs(1.0 - theta.sum()) <= CONSTRAINT_TOLERANCE)) {
        std::stringstream msg;
        T_prob sum = theta.sum();
        msg << "is not a valid simplex.";
        msg.precision(10);
        msg << " sum(" << name << ") = " << sum
            << ", but should be ";
        std::string msg_str(msg.str());
        domain_error(function, name, 1.0,
                     msg_str.c_str());
        return false;
      }
      for (size_t n = 0; n < theta.size(); n++) {
        if (!(theta[n] >= 0)) {
          std::ostringstream msg;
          msg << "is not a valid simplex. "
              << name << "[" << n + stan::error_index::value << "]"
              << " = ";
          std::string msg_str(msg.str());
          domain_error(function, name, theta[n],
                       msg_str.c_str(),
                       ", but should be greater than or equal to 0");
          return false;
        }
      }
      return true;
    }
コード例 #12
0
ファイル: sort_indices_test.cpp プロジェクト: stan-dev/math
void test_sort_indices_desc(Eigen::Matrix<T,R,C> val) {
  using stan::math::sort_indices_desc;

  typedef Eigen::Matrix<AVAR,R,C> AVEC;
  
  const size_t size = val.size();

  AVEC x(size);
  for(size_t i=0U; i<size; i++)
    x.data()[i] = AVAR(val[i]);
  
  std::vector<int> val_sorted = sort_indices_desc(val);
  std::vector<int> x_sorted = sort_indices_desc(x);
  
  for(size_t i=0U; i<size; i++)
    EXPECT_EQ(val_sorted.data()[i],x_sorted.data()[i]);

  for(size_t i=0U; i<size; i++)
    for(size_t j=0U; j<size; j++)
      if(val_sorted.data()[i] == val.data()[j])
        EXPECT_EQ(x_sorted.data()[i],x.data()[j]);
      else
        EXPECT_FALSE(x_sorted.data()[i]==x.data()[j]);
}
コード例 #13
0
ファイル: categorical_logit.hpp プロジェクト: HerraHuu/stan
    typename boost::math::tools::promote_args<T_prob>::type
    categorical_logit_log(const std::vector<int>& ns, 
                          const Eigen::Matrix<T_prob,Eigen::Dynamic,1>& beta) {
      static const char* function = "stan::prob::categorical_logit_log(%1%)";

      using stan::math::check_bounded;
      using stan::math::check_finite;
      using stan::math::log_softmax;
      using stan::math::sum;

      double lp = 0.0;
      for (size_t k = 0; k < ns.size(); ++k)
        if (!check_bounded(function, ns[k], 1, beta.size(),
                           "categorical outcome out of support",
                           &lp))
          return lp;

      if (!check_finite(function, beta, "log odds parameter", &lp))
        return lp;

      if (!include_summand<propto,T_prob>::value)
        return 0.0;
      
      if (ns.size() == 0)
        return 0.0;
        
      Eigen::Matrix<T_prob,Eigen::Dynamic,1> log_softmax_beta
        = log_softmax(beta);

      // FIXME:  replace with more efficient sum()
      Eigen::Matrix<typename boost::math::tools::promote_args<T_prob>::type,
                    Eigen::Dynamic,1> results(ns.size());
      for (size_t i = 0; i < ns.size(); ++i)
        results[i] = log_softmax_beta(ns[i] - 1);
      return sum(results);
    }
コード例 #14
0
ファイル: categorical_logit.hpp プロジェクト: HerraHuu/stan
    typename boost::math::tools::promote_args<T_prob>::type
    categorical_logit_log(int n, 
                          const Eigen::Matrix<T_prob,Eigen::Dynamic,1>& beta) {
      static const char* function = "stan::prob::categorical_logit_log(%1%)";

      using stan::math::check_bounded;
      using stan::math::check_finite;
      using stan::math::log_sum_exp;

      double lp = 0.0;
      if (!check_bounded(function, n, 1, beta.size(),
                         "categorical outcome out of support",
                         &lp))
        return lp;

      if (!check_finite(function, beta, "log odds parameter", &lp))
        return lp;

      if (!include_summand<propto,T_prob>::value)
        return 0.0;
        
      // FIXME:  wasteful vs. creating term (n-1) if not vectorized
      return beta(n-1) - log_sum_exp(beta); // == log_softmax(beta)(n-1);
    }
コード例 #15
0
ファイル: softmax.hpp プロジェクト: alyst/math
    inline
    Eigen::Matrix<fvar<T>, Eigen::Dynamic, 1>
    softmax(const Eigen::Matrix<fvar<T>, Eigen::Dynamic, 1>& alpha) {
      using stan::math::softmax;
      using Eigen::Matrix;
      using Eigen::Dynamic;

      Matrix<T, Dynamic, 1> alpha_t(alpha.size());
      for (int k = 0; k < alpha.size(); ++k)
        alpha_t(k) = alpha(k).val_;

      Matrix<T, Dynamic, 1> softmax_alpha_t = softmax(alpha_t);

      Matrix<fvar<T>, Dynamic, 1> softmax_alpha(alpha.size());
      for (int k = 0; k < alpha.size(); ++k) {
        softmax_alpha(k).val_ = softmax_alpha_t(k);
        softmax_alpha(k).d_ = 0;
      }

      // for each input position
      for (int m = 0; m < alpha.size(); ++m) {
        // for each output position
        T negative_alpha_m_d_times_softmax_alpha_t_m
          = - alpha(m).d_ * softmax_alpha_t(m);
        for (int k = 0; k < alpha.size(); ++k) {
          // chain from input to output
          if (m == k) {
            softmax_alpha(k).d_
              += softmax_alpha_t(k)
              * (alpha(m).d_
                 + negative_alpha_m_d_times_softmax_alpha_t_m);
          } else {
            softmax_alpha(k).d_
              += negative_alpha_m_d_times_softmax_alpha_t_m
              * softmax_alpha_t(k);
          }
        }
      }

      return softmax_alpha;
    }
コード例 #16
0
ファイル: softmax.hpp プロジェクト: stan-dev/math
inline
Eigen::Matrix<fvar<T>, Eigen::Dynamic, 1>
softmax(const Eigen::Matrix<fvar<T>, Eigen::Dynamic, 1>& alpha) {
    using Eigen::Matrix;
    using Eigen::Dynamic;

    Matrix<T, Dynamic, 1> alpha_t(alpha.size());
    for (int k = 0; k < alpha.size(); ++k)
        alpha_t(k) = alpha(k).val_;

    Matrix<T, Dynamic, 1> softmax_alpha_t = softmax(alpha_t);

    Matrix<fvar<T>, Dynamic, 1> softmax_alpha(alpha.size());
    for (int k = 0; k < alpha.size(); ++k) {
        softmax_alpha(k).val_ = softmax_alpha_t(k);
        softmax_alpha(k).d_ = 0;
    }

    for (int m = 0; m < alpha.size(); ++m) {
        T negative_alpha_m_d_times_softmax_alpha_t_m
            = - alpha(m).d_ * softmax_alpha_t(m);
        for (int k = 0; k < alpha.size(); ++k) {
            if (m == k) {
                softmax_alpha(k).d_
                += softmax_alpha_t(k)
                   * (alpha(m).d_
                      + negative_alpha_m_d_times_softmax_alpha_t_m);
            } else {
                softmax_alpha(k).d_
                += negative_alpha_m_d_times_softmax_alpha_t_m
                   * softmax_alpha_t(k);
            }
        }
    }

    return softmax_alpha;
}
コード例 #17
0
ファイル: sort_desc.hpp プロジェクト: alyst/math
 inline typename Eigen::Matrix<var, R, C>
 sort_desc(Eigen::Matrix<var, R, C> xs) {
   std::sort(xs.data(), xs.data()+xs.size(), std::greater<var>());
   return xs;
 }
コード例 #18
0
ファイル: gaussian.cpp プロジェクト: lenalebt/libmusic
 double GaussianFullCov<ScalarType>::calculateNoMeanValue(const Eigen::Matrix<ScalarType, Eigen::Dynamic, 1>& dataVector)
 {
     assert(dataVector.size() == mean.size());
     return preFactor * std::exp(-0.5 * (dataVector.transpose() * ldlt.solve(dataVector))(0));
 }
コード例 #19
0
ファイル: gaussian.cpp プロジェクト: lenalebt/libmusic
 double GaussianFullCov<ScalarType>::calculateValueWithoutWeights(const Eigen::Matrix<ScalarType, Eigen::Dynamic, 1>& dataVector)
 {
     assert(dataVector.size() == mean.size());
     Eigen::Matrix<ScalarType, Eigen::Dynamic, 1> dist = dataVector - mean;
     return preFactorWithoutWeights * std::exp(-0.5 * (dist.transpose() * ldlt.solve(dist))(0));
 }
コード例 #20
0
TEST(Matrices,fromMatlabStringFormat)
{
	const char* mat1 = "[1 2 3;-3 -6 -5]";
	const double vals1[] = {1,2,3,-3,-6,-5};

	const char* mat2 = " [ 	  -8.2	 9.232 ; -2e+2		+6 ; 1.000  7 ] ";    // With tabs and spaces...
	const double vals2[] = {-8.2, 9.232, -2e+2, +6, 1.000 ,7};

	const char* mat3 = "[9]";
	const char* mat4 = "[1 2 3 4 5 6 7 9 10  ; 1 2 3 4 5 6 7 8 9 10 11]";   // An invalid matrix
	const char* mat5 = "[  ]";  // Empty
	const char* mat6 = "[ -405.200 42.232 ; 1219.600    -98.696 ]";  // M1 * M2

	const char* mat13 = "[9 8 7]";
	const char* mat31 = "[9; 8; 7]";

	CMatrixDouble	M1,M2,M3, M4, M5, M6;

	if (! M1.fromMatlabStringFormat(mat1) ||
		(CMatrixFixedNumeric<double,2,3>(vals1)-M1).array().abs().sum() > 1e-4 )
		GTEST_FAIL() << mat1;

	{
		CMatrixFixedNumeric<double,2,3> M1b;
		if (! M1b.fromMatlabStringFormat(mat1) ||
			(CMatrixFixedNumeric<double,2,3>(vals1)-M1b).array().abs().sum() > 1e-4 )
			GTEST_FAIL() << mat1;
	}

	if (! M2.fromMatlabStringFormat(mat2) ||
		M2.cols()!=2 || M2.rows()!=3 ||
		(CMatrixFixedNumeric<double,3,2>(vals2)-M2).array().abs().sum() > 1e-4 )
		GTEST_FAIL() << mat2;

	{
		CMatrixFixedNumeric<double,3,2> M2b;
		if (! M2b.fromMatlabStringFormat(mat2) ||
			(CMatrixFixedNumeric<double,3,2>(vals2)-M2b).array().abs().sum() > 1e-4 )
			GTEST_FAIL() << mat2;
	}

	if (! M3.fromMatlabStringFormat(mat3) )
		GTEST_FAIL() << mat3;

	{
		CVectorDouble m;
		if (! m.fromMatlabStringFormat(mat3) || m.size()!=1 ) GTEST_FAIL() << "CVectorDouble:" << mat3;
	}
	{
		CArrayDouble<1> m;
		if (! m.fromMatlabStringFormat(mat3) ) GTEST_FAIL() << "CArrayDouble<1>:" << mat3;
	}

	{
		CVectorDouble m;
		if (! m.fromMatlabStringFormat(mat31) || m.size()!=3 ) GTEST_FAIL() << "CVectorDouble:" << mat31;
	}
	{
		CArrayDouble<3> m;
		if (! m.fromMatlabStringFormat(mat31) ) GTEST_FAIL() << "CArrayDouble<3>:" << mat31;
	}

	{
		Eigen::Matrix<double,1,3> m;
		if (! m.fromMatlabStringFormat(mat13) ) GTEST_FAIL() << "Matrix<double,1,3>:" << mat13;
	}
	{
		Eigen::Matrix<double,1,Eigen::Dynamic> m;
		if (! m.fromMatlabStringFormat(mat13) || m.size()!=3 ) GTEST_FAIL() << "Matrix<double,1,Dynamic>:" << mat13;
	}

	// This one MUST BE detected as WRONG:
	if ( M4.fromMatlabStringFormat(mat4, NULL /*dont dump errors to cerr*/) )
		GTEST_FAIL() << mat4;

	if (! M5.fromMatlabStringFormat(mat5) || size(M5,1)!=0 || size(M5,2)!=0 )
		GTEST_FAIL() << mat5;

	if (! M6.fromMatlabStringFormat(mat6) )
		GTEST_FAIL() << mat6;

	// Check correct values loaded:
	CMatrixDouble RES = M1*M2;

	EXPECT_NEAR(0,(M6 - M1*M2).array().square().sum(), 1e-3);
}
コード例 #21
0
    typename return_type<
      T_y,
      typename return_type<T_F,T_G,T_V,T_W,T_m0,T_C0>::type >::type
    gaussian_dlm_obs_log(const Eigen::Matrix<T_y,Eigen::Dynamic,Eigen::Dynamic>& y,
                     const Eigen::Matrix<T_F,Eigen::Dynamic,Eigen::Dynamic>& F,
                     const Eigen::Matrix<T_G,Eigen::Dynamic,Eigen::Dynamic>& G,
                     const Eigen::Matrix<T_V,Eigen::Dynamic,Eigen::Dynamic>& V,
                     const Eigen::Matrix<T_W,Eigen::Dynamic,Eigen::Dynamic>& W,
                     const Eigen::Matrix<T_m0,Eigen::Dynamic,1>& m0,
                     const Eigen::Matrix<T_C0,Eigen::Dynamic,Eigen::Dynamic>& C0) {
      static const char* function = "stan::prob::gaussian_dlm_obs_log(%1%)";
      typedef typename return_type<
        T_y,
        typename return_type<T_F,T_G,T_V,T_W,T_m0,T_C0>::type  >::type T_lp;
      T_lp lp(0.0);

      using stan::math::add;
      using stan::math::check_cov_matrix;
      using stan::math::check_finite;
      using stan::math::check_not_nan;
      using stan::math::check_size_match;
      using stan::math::check_spsd_matrix;
      using stan::math::inverse_spd;
      using stan::math::log_determinant_spd;
      using stan::math::multiply;
      using stan::math::quad_form_sym;
      using stan::math::subtract;
      using stan::math::trace_quad_form;
      using stan::math::transpose;

      int r = y.rows(); // number of variables
      int T = y.cols(); // number of observations
      int n = G.rows(); // number of states

      // check y
      if (!check_finite(function, y, "y", &lp))
        return lp;
      if (!check_not_nan(function, y, "y", &lp))
        return lp;
      // check F
      if (!check_size_match(function,
                            F.cols(), "columns of F",
                            y.rows(), "rows of y",
                            &lp))
        return lp;
      if (!check_size_match(function,
                            F.rows(), "rows of F",
                            G.rows(), "rows of G",
                            &lp))
        return lp;
      if (!check_finite(function, F, "F", &lp))
        return lp;
      if (!check_not_nan(function, F, "F", &lp))
        return lp;
      // check G
      if (!check_size_match(function,
                            G.rows(), "rows of G",
                            G.cols(), "columns of G",
                            &lp))
        return lp;
      if (!check_finite(function, G, "G", &lp))
        return lp;
      if (!check_not_nan(function, G, "G", &lp))
        return lp;
      // check V
      if (!check_size_match(function,
                            V.rows(), "rows of V",
                            y.rows(), "rows of y",
                            &lp))
        return lp;
      // TODO: incorporate support for infinite V
      if (!check_finite(function, V, "V", &lp))
        return lp;
      if (!check_not_nan(function, V, "V", &lp))
        return lp;
      if (!check_spsd_matrix(function, V, "V", &lp))
        return lp;
      // check W
      if (!check_size_match(function,
                            W.rows(), "rows of W",
                            G.rows(), "rows of G",
                            &lp))
        return lp;
      // TODO: incorporate support for infinite W
      if (!check_finite(function, W, "W", &lp))
        return lp;
      if (!check_not_nan(function, W, "W", &lp))
        return lp;
      if (!check_spsd_matrix(function, W, "W", &lp))
        return lp;
      // check m0
      if (!check_size_match(function,
                            m0.size(), "size of m0",
                            G.rows(), "rows of G",
                            &lp))
        return lp;
      if (!check_finite(function, m0, "m0", &lp))
        return lp;
      if (!check_not_nan(function, m0, "m0", &lp))
        return lp;
      // check C0
      if (!check_size_match(function,
                            C0.rows(), "rows of C0",
                            G.rows(), "rows of G",
                            &lp))
        return lp;
      if (!check_cov_matrix(function, C0, "C0", &lp))
        return lp;
      if (!check_finite(function, C0, "C0", &lp))
        return lp;
      if (!check_not_nan(function, C0, "C0", &lp))
        return lp;

      if (y.cols() == 0 || y.rows() == 0)
        return lp;

      if (include_summand<propto>::value) {
        lp -= 0.5 * LOG_TWO_PI * r * T;
      }

      if (include_summand<propto,T_y,T_F,T_G,T_V,T_W,T_m0,T_C0>::value) {
        Eigen::Matrix<T_lp,Eigen::Dynamic, 1> m(n);
        Eigen::Matrix<T_lp,Eigen::Dynamic, Eigen::Dynamic> C(n, n);

        // TODO: how to recast matrices
        for (int i = 0; i < m0.size(); i ++ ) {
          m(i) = m0(i);
        }
        for (int i = 0; i < C0.rows(); i ++ ) {
          for (int j = 0; j < C0.cols(); j ++ ) {
            C(i, j) = C0(i, j);
          }
        }

        Eigen::Matrix<typename return_type<T_y>::type,
                      Eigen::Dynamic, 1> yi(r);
        Eigen::Matrix<T_lp,Eigen::Dynamic, 1> a(n);
        Eigen::Matrix<T_lp,Eigen::Dynamic, Eigen::Dynamic> R(n, n);
        Eigen::Matrix<T_lp,Eigen::Dynamic, 1> f(r);
        Eigen::Matrix<T_lp,Eigen::Dynamic, Eigen::Dynamic> Q(r, r);
        Eigen::Matrix<T_lp,Eigen::Dynamic, Eigen::Dynamic> Q_inv(r, r);
        Eigen::Matrix<T_lp,Eigen::Dynamic, 1> e(r);
        Eigen::Matrix<T_lp,Eigen::Dynamic, Eigen::Dynamic> A(n, r);

        for (int i = 0; i < y.cols(); i++) {
          yi = y.col(i);
          // // Predict state
          // a_t = G_t m_{t-1}
          a = multiply(G, m);
          // R_t = G_t C_{t-1} G_t' + W_t
          R = add(quad_form_sym(C, transpose(G)), W);
          // // predict observation
          // f_t = F_t' a_t
          f = multiply(transpose(F), a);
          // Q_t = F'_t R_t F_t + V_t
          Q = add(quad_form_sym(R, F), V);
          Q_inv = inverse_spd(Q);
          // // filtered state
          // e_t = y_t - f_t
          e = subtract(yi, f);
          // A_t = R_t F_t Q^{-1}_t
          A = multiply(multiply(R, F), Q_inv);
          // m_t = a_t + A_t e_t
          m = add(a, multiply(A, e));
          // C = R_t - A_t Q_t A_t'
          C = subtract(R, quad_form_sym(Q, transpose(A)));
          lp -= 0.5 * (log_determinant_spd(Q) + trace_quad_form(Q_inv, e));
        }
      }
      return lp;
    }
コード例 #22
0
ファイル: dump.hpp プロジェクト: actuariat/stan
 void dims(Eigen::Matrix<T,1,Eigen::Dynamic> rv, 
           std::vector<size_t> ds) {
   ds.push_back(rv.size());
 }
コード例 #23
0
ファイル: initialize_variable.hpp プロジェクト: javaosos/stan
 inline void initialize_variable(Eigen::Matrix<var,R,C>& matrix, const var& value) {
   for (int i = 0; i < matrix.size(); ++i)
     matrix(i) = value;
 }
コード例 #24
0
    typename boost::math::tools::promote_args<T_y,T_loc,T_covar>::type
    multi_normal_cholesky_log(const Eigen::Matrix<T_y,Eigen::Dynamic,1>& y,
                              const Eigen::Matrix<T_loc,Eigen::Dynamic,1>& mu,
                              const Eigen::Matrix<T_covar,Eigen::Dynamic,Eigen::Dynamic>& L) {
      static const char* function = "stan::prob::multi_normal_cholesky_log(%1%)";

      using stan::math::mdivide_left_tri_low;
      using stan::math::dot_self;
      using stan::math::multiply;
      using stan::math::subtract;
      using stan::math::sum;
      
      using stan::math::check_size_match;
      using stan::math::check_finite;
      using stan::math::check_not_nan;
      using stan::math::check_cov_matrix;
      using boost::math::tools::promote_args;

      typename promote_args<T_y,T_loc,T_covar>::type lp(0.0);

      if (!check_size_match(function, 
                            y.size(), "Size of random variable",
                            mu.size(), "size of location parameter",
                            &lp))
        return lp;
      if (!check_size_match(function, 
                            y.size(), "Size of random variable",
                            L.rows(), "rows of covariance parameter",
                            &lp))
        return lp;
      if (!check_size_match(function, 
                            y.size(), "Size of random variable",
                            L.cols(), "columns of covariance parameter",
                            &lp))
        return lp;
      if (!check_finite(function, mu, "Location parameter", &lp)) 
        return lp;
      if (!check_not_nan(function, y, "Random variable", &lp)) 
        return lp;

      if (y.rows() == 0)
        return lp;
      
      if (include_summand<propto>::value) 
        lp += NEG_LOG_SQRT_TWO_PI * y.rows();
      
      if (include_summand<propto,T_covar>::value) {
        Eigen::Matrix<T_covar,Eigen::Dynamic,1> L_log_diag = L.diagonal().array().log().matrix();
        lp -= sum(L_log_diag);
      }

      if (include_summand<propto,T_y,T_loc,T_covar>::value) {
        Eigen::Matrix<typename 
          boost::math::tools::promote_args<T_y,T_loc>::type,
          Eigen::Dynamic, 1> y_minus_mu(y.size());
        for (int i = 0; i < y.size(); i++)
          y_minus_mu(i) = y(i)-mu(i);
        Eigen::Matrix<typename 
          boost::math::tools::promote_args<T_covar,T_loc,T_y>::type,
          Eigen::Dynamic, 1> 
          half(mdivide_left_tri_low(L,y_minus_mu));
        // FIXME: this code does not compile. revert after fixing subtract()
        // Eigen::Matrix<typename 
        //               boost::math::tools::promote_args<T_covar,T_loc,T_y>::type,
        //               Eigen::Dynamic, 1> 
        //   half(mdivide_left_tri_low(L,subtract(y,mu)));
        lp -= 0.5 * dot_self(half);
      }
      return lp;
    }
コード例 #25
0
ファイル: variance.hpp プロジェクト: HerraHuu/stan
 var variance(const Eigen::Matrix<var,R,C>& m) {
   stan::math::validate_nonzero_size(m,"variance");
   if (m.size() == 1) return 0;
   return calc_variance(m.size(), &m(0));
 }
コード例 #26
0
IGL_INLINE void igl::slice(
  const Eigen::SparseMatrix<TX>& X,
  const Eigen::Matrix<int,Eigen::Dynamic,1> & R,
  const Eigen::Matrix<int,Eigen::Dynamic,1> & C,
  Eigen::SparseMatrix<TY>& Y)
{
#if 1
  int xm = X.rows();
  int xn = X.cols();
  int ym = R.size();
  int yn = C.size();

  // special case when R or C is empty
  if(ym == 0 || yn == 0)
  {
    Y.resize(ym,yn);
    return;
  }

  assert(R.minCoeff() >= 0);
  assert(R.maxCoeff() < xm);
  assert(C.minCoeff() >= 0);
  assert(C.maxCoeff() < xn);

  // Build reindexing maps for columns and rows, -1 means not in map
  std::vector<std::vector<int> > RI;
  RI.resize(xm);
  for(int i = 0;i<ym;i++)
  {
    RI[R(i)].push_back(i);
  }
  std::vector<std::vector<int> > CI;
  CI.resize(xn);
  // initialize to -1
  for(int i = 0;i<yn;i++)
  {
    CI[C(i)].push_back(i);
  }
  // Resize output
  Eigen::DynamicSparseMatrix<TY, Eigen::RowMajor> dyn_Y(ym,yn);
  // Take a guess at the number of nonzeros (this assumes uniform distribution
  // not banded or heavily diagonal)
  dyn_Y.reserve((X.nonZeros()/(X.rows()*X.cols())) * (ym*yn));
  // Iterate over outside
  for(int k=0; k<X.outerSize(); ++k)
  {
    // Iterate over inside
    for(typename Eigen::SparseMatrix<TX>::InnerIterator it (X,k); it; ++it)
    {
      std::vector<int>::iterator rit, cit;
      for(rit = RI[it.row()].begin();rit != RI[it.row()].end(); rit++)
      {
        for(cit = CI[it.col()].begin();cit != CI[it.col()].end(); cit++)
        {
          dyn_Y.coeffRef(*rit,*cit) = it.value();
        }
      }
    }
  }
  Y = Eigen::SparseMatrix<TY>(dyn_Y);
#else

  // Alec: This is _not_ valid for arbitrary R,C since they don't necessary
  // representation a strict permutation of the rows and columns: rows or
  // columns could be removed or replicated. The removal of rows seems to be
  // handled here (although it's not clear if there is a performance gain when
  // the #removals >> #remains). If this is sufficiently faster than the
  // correct code above, one could test whether all entries in R and C are
  // unique and apply the permutation version if appropriate.
  //

  int xm = X.rows();
  int xn = X.cols();
  int ym = R.size();
  int yn = C.size();

  // special case when R or C is empty
  if(ym == 0 || yn == 0)
  {
    Y.resize(ym,yn);
    return;
  }

  assert(R.minCoeff() >= 0);
  assert(R.maxCoeff() < xm);
  assert(C.minCoeff() >= 0);
  assert(C.maxCoeff() < xn);

  // initialize row and col permutation vectors
  Eigen::VectorXi rowIndexVec = Eigen::VectorXi::LinSpaced(xm,0,xm-1);
  Eigen::VectorXi rowPermVec  = Eigen::VectorXi::LinSpaced(xm,0,xm-1);
  for(int i=0;i<ym;i++)
  {
    int pos = rowIndexVec.coeffRef(R(i));
    if(pos != i)
    {
      int& val = rowPermVec.coeffRef(i);
      std::swap(rowIndexVec.coeffRef(val),rowIndexVec.coeffRef(R(i)));
      std::swap(rowPermVec.coeffRef(i),rowPermVec.coeffRef(pos));
    }
  }
  Eigen::PermutationMatrix<Eigen::Dynamic,Eigen::Dynamic,int> rowPerm(rowIndexVec);

  Eigen::VectorXi colIndexVec = Eigen::VectorXi::LinSpaced(xn,0,xn-1);
  Eigen::VectorXi colPermVec =  Eigen::VectorXi::LinSpaced(xn,0,xn-1);
  for(int i=0;i<yn;i++)
  {
    int pos = colIndexVec.coeffRef(C(i));
    if(pos != i)
    {
      int& val = colPermVec.coeffRef(i);
      std::swap(colIndexVec.coeffRef(val),colIndexVec.coeffRef(C(i)));
      std::swap(colPermVec.coeffRef(i),colPermVec.coeffRef(pos));
    }
  }
  Eigen::PermutationMatrix<Eigen::Dynamic,Eigen::Dynamic,int> colPerm(colPermVec);

  Eigen::SparseMatrix<T> M = (rowPerm * X);
  Y = (M * colPerm).block(0,0,ym,yn);
#endif
}
コード例 #27
0
ファイル: ransac_applications.cpp プロジェクト: gamman/MRPT
void mrpt::math::ransac_detect_3D_planes(
	const Eigen::Matrix<NUMTYPE,Eigen::Dynamic,1>  &x,
	const Eigen::Matrix<NUMTYPE,Eigen::Dynamic,1>  &y,
	const Eigen::Matrix<NUMTYPE,Eigen::Dynamic,1>  &z,
	vector<pair<size_t,TPlane> >   &out_detected_planes,
	const double           threshold,
	const size_t           min_inliers_for_valid_plane
	)
{
	MRPT_START

	ASSERT_(x.size()==y.size() && x.size()==z.size())

	out_detected_planes.clear();

	if (x.empty())
		return;

	// The running lists of remaining points after each plane, as a matrix:
	CMatrixTemplateNumeric<NUMTYPE> remainingPoints( 3, x.size() );
	remainingPoints.insertRow(0,x);
	remainingPoints.insertRow(1,y);
	remainingPoints.insertRow(2,z);


	// ---------------------------------------------
	// For each plane:
	// ---------------------------------------------
	for (;;)
	{
		mrpt::vector_size_t				this_best_inliers;
		CMatrixTemplateNumeric<NUMTYPE> this_best_model;

		math::RANSAC_Template<NUMTYPE>::execute(
			remainingPoints,
			ransac3Dplane_fit,
			ransac3Dplane_distance,
			ransac3Dplane_degenerate,
			threshold,
			3,  // Minimum set of points
			this_best_inliers,
			this_best_model,
			true, // Verbose
			0.999  // Prob. of good result
			);

		// Is this plane good enough?
		if (this_best_inliers.size()>=min_inliers_for_valid_plane)
		{
			// Add this plane to the output list:
			out_detected_planes.push_back(
				std::make_pair<size_t,TPlane>(
					this_best_inliers.size(),
					TPlane( this_best_model(0,0), this_best_model(0,1),this_best_model(0,2),this_best_model(0,3) )
					) );

			out_detected_planes.rbegin()->second.unitarize();

			// Discard the selected points so they are not used again for finding subsequent planes:
			remainingPoints.removeColumns(this_best_inliers);
		}
		else
		{
			break; // Do not search for more planes.
		}
	}

	MRPT_END
}
コード例 #28
0
    typename return_type<
      T_y,
      typename return_type<T_F,T_G,T_V,T_W,T_m0,T_C0>::type >::type
    gaussian_dlm_obs_log(const Eigen::Matrix<T_y,Eigen::Dynamic,Eigen::Dynamic>& y,
                     const Eigen::Matrix<T_F,Eigen::Dynamic,Eigen::Dynamic>& F,
                     const Eigen::Matrix<T_G,Eigen::Dynamic,Eigen::Dynamic>& G,
                     const Eigen::Matrix<T_V,Eigen::Dynamic,1>& V,
                     const Eigen::Matrix<T_W,Eigen::Dynamic,Eigen::Dynamic>& W,
                     const Eigen::Matrix<T_m0,Eigen::Dynamic,1>& m0,
                     const Eigen::Matrix<T_C0,Eigen::Dynamic,Eigen::Dynamic>& C0) {
      static const char* function = "stan::prob::gaussian_dlm_obs_log(%1%)";
      typedef typename return_type<
        T_y,
        typename return_type<T_F,T_G,T_V,T_W,T_m0,T_C0>::type  >::type T_lp;
      T_lp lp(0.0);

      using stan::math::add;
      using stan::math::check_cov_matrix;
      using stan::math::check_finite;
      using stan::math::check_nonnegative;
      using stan::math::check_not_nan;
      using stan::math::check_size_match;
      using stan::math::check_spsd_matrix;
      using stan::math::dot_product;
      using stan::math::multiply;
      using stan::math::quad_form_sym;
      using stan::math::subtract;
      using stan::math::tcrossprod;
      using stan::math::trace_quad_form;
      using stan::math::transpose;

      int r = y.rows(); // number of variables
      int T = y.cols(); // number of observations
      int n = G.rows(); // number of states

      // check y
      if (!check_finite(function, y, "y", &lp))
        return lp;
      if (!check_not_nan(function, y, "y", &lp))
        return lp;
      // check F
      if (!check_size_match(function,
                            F.cols(), "columns of F",
                            y.rows(), "rows of y",
                            &lp))
        return lp;
      if (!check_size_match(function,
                            F.rows(), "rows of F",
                            G.rows(), "rows of G",
                            &lp))
        return lp;
      if (!check_finite(function, F, "F", &lp))
        return lp;
      if (!check_not_nan(function, F, "F", &lp))
        return lp;
      // check G
      if (!check_size_match(function,
                            G.rows(), "rows of G",
                            G.cols(), "columns of G",
                            &lp))
        return lp;
      if (!check_finite(function, G, "G", &lp))
        return lp;
      if (!check_not_nan(function, G, "G", &lp))
        return lp;
      // check V
      if (!check_nonnegative(function, V, "V", &lp))
        return lp;
      if (!check_size_match(function,
                            V.size(), "size of V",
                            y.rows(), "rows of y",
                            &lp))
        return lp;
      // TODO: support infinite V
      if (!check_finite(function, V, "V", &lp))
        return lp;
      if (!check_not_nan(function, V, "V", &lp))
        return lp;
      // check W
      if (!check_spsd_matrix(function, W, "W", &lp))
        return lp;
      if (!check_size_match(function,
                            W.rows(), "rows of W",
                            G.rows(), "rows of G",
                            &lp))
        return lp;
      // TODO: support infinite W
      if (!check_finite(function, W, "W", &lp))
        return lp;
      if (!check_not_nan(function, W, "W", &lp))
        return lp;
      // check m0
      if (!check_size_match(function,
                            m0.size(), "size of m0",
                            G.rows(), "rows of G",
                            &lp))
        return lp;
      if (!check_finite(function, m0, "m0", &lp))
        return lp;
      if (!check_not_nan(function, m0, "m0", &lp))
        return lp;
      // check C0
      if (!check_cov_matrix(function, C0, "C0", &lp))
        return lp;
      if (!check_size_match(function,
                            C0.rows(), "rows of C0",
                            G.rows(), "rows of G",
                            &lp))
        return lp;
      if (!check_finite(function, C0, "C0", &lp))
        return lp;
      if (!check_not_nan(function, C0, "C0", &lp))
        return lp;

      if (y.cols() == 0 || y.rows() == 0)
        return lp;

      if (include_summand<propto>::value) {
        lp += 0.5 * NEG_LOG_TWO_PI * r * T;
      }

      if (include_summand<propto,T_y,T_F,T_G,T_V,T_W,T_m0,T_C0>::value) {
        T_lp f;
        T_lp Q;
        T_lp Q_inv;
        T_lp e;
        Eigen::Matrix<T_lp, Eigen::Dynamic, 1> A(n);
        Eigen::Matrix<T_lp, Eigen::Dynamic, 1> Fj(n);
        Eigen::Matrix<T_lp, Eigen::Dynamic, 1> m(n);
        Eigen::Matrix<T_lp, Eigen::Dynamic, Eigen::Dynamic> C(n, n);

        // TODO: how to recast matrices
        for (int i = 0; i < m0.size(); i ++ ) {
          m(i) = m0(i);
        }
        for (int i = 0; i < C0.rows(); i ++ ) {
          for (int j = 0; j < C0.cols(); j ++ ) {
            C(i, j) = C0(i, j);
          }
        }

        for (int i = 0; i < y.cols(); i++) {
          // Predict state
          // reuse m and C instead of using a and R
          m = multiply(G, m);
          C = add(quad_form_sym(C, transpose(G)), W);
          for (int j = 0; j < y.rows(); ++j) {
            // predict observation
            T_lp yij(y(j, i));
            // dim Fj = (n, 1)
            for (int k = 0; k < F.rows(); ++k) {
              Fj(k) = F(k, j);
            }
            // // f_{t,i} = F_{t,i}' m_{t,i-1}
            f = dot_product(Fj, m);
            Q = trace_quad_form(C, Fj) + V(j);
            Q_inv = 1.0 / Q;
            // // filtered observation
            // // e_{t,i} = y_{t,i} - f_{t,i}
            e = yij - f;
            // // A_{t,i} = C_{t,i-1} F_{t,i} Q_{t,i}^{-1}
            A = multiply(multiply(C, Fj), Q_inv);
            // // m_{t,i} = m_{t,i-1} + A_{t,i} e_{t,i}
            m += multiply(A, e);
            // // c_{t,i} = C_{t,i-1} - Q_{t,i} A_{t,i} A_{t,i}'
            // // // tcrossprod throws an error (ambiguous)
            // C = subtract(C, multiply(Q, tcrossprod(A)));
            C -= multiply(Q, multiply(A, transpose(A)));
            C = 0.5 * add(C, transpose(C));
            lp -= 0.5 * (log(Q) + pow(e, 2) * Q_inv);
          }
        }
      }
      return lp;
    }
コード例 #29
0
ファイル: max.hpp プロジェクト: alyst/math
 inline T max(const Eigen::Matrix<T, R, C>& m) {
   if (m.size() == 0)
     return -std::numeric_limits<double>::infinity();
   return m.maxCoeff();
 }
コード例 #30
0
    typename boost::math::tools::promote_args<T_y,T_loc,T_covar>::type
    multi_normal_cholesky_log(const Eigen::Matrix<T_y,Eigen::Dynamic,Eigen::Dynamic>& y,
                              const Eigen::Matrix<T_loc,Eigen::Dynamic,1>& mu,
                              const Eigen::Matrix<T_covar,Eigen::Dynamic,Eigen::Dynamic>& L) {
      static const char* function = "stan::prob::multi_normal_cholesky_log(%1%)";

      using stan::math::mdivide_left_tri_low;
      using stan::math::columns_dot_self;
      using stan::math::multiply;
      using stan::math::subtract;
      using stan::math::sum;
      using stan::math::log;
      
      using stan::math::check_size_match;
      using stan::math::check_finite;
      using stan::math::check_not_nan;
      using stan::math::check_cov_matrix;
      using boost::math::tools::promote_args;

      typename promote_args<T_y,T_loc,T_covar>::type lp(0.0);

      if (!check_size_match(function, 
                            y.cols(), "Columns of random variable",
                            mu.rows(), "rows of location parameter",
                            &lp))
        return lp;
      if (!check_size_match(function, 
                            y.cols(), "Columns of random variable",
                            L.rows(), "rows of covariance parameter",
                            &lp))
        return lp;
      if (!check_size_match(function, 
                            y.cols(), "Columns of random variable",
                            L.cols(), "columns of covariance parameter",
                            &lp))
        return lp;
      if (!check_finite(function, mu, "Location parameter", &lp)) 
        return lp;
      if (!check_not_nan(function, y, "Random variable", &lp)) 
        return lp;

      if (y.cols() == 0)
        return lp;

      if (include_summand<propto>::value) 
        lp += NEG_LOG_SQRT_TWO_PI * y.cols() * y.rows();

      if (include_summand<propto,T_covar>::value) {
        Eigen::Matrix<T_covar,Eigen::Dynamic,1> L_log_diag = L.diagonal().array().log().matrix();
        lp -= sum(L_log_diag) * y.rows();
      }

      if (include_summand<propto,T_y,T_loc,T_covar>::value) {
        Eigen::Matrix<T_loc, Eigen::Dynamic, Eigen::Dynamic> MU(y.rows(),y.cols());
        for (typename Eigen::Matrix<T_loc, Eigen::Dynamic, Eigen::Dynamic>::size_type i = 0; 
             i < y.rows(); i++)
          MU.row(i) = mu;
  
        Eigen::Matrix<typename
          boost::math::tools::promote_args<T_loc,T_y>::type,
          Eigen::Dynamic,Eigen::Dynamic>
          y_minus_MU(y.rows(), y.cols());
        for (int i = 0; i < y.size(); i++)
          y_minus_MU(i) = y(i)-MU(i);

        Eigen::Matrix<typename 
          boost::math::tools::promote_args<T_loc,T_y>::type,
          Eigen::Dynamic,Eigen::Dynamic> 
          z(y_minus_MU.transpose()); // was = 
        
        // FIXME: revert this code when subtract() is fixed.
        // Eigen::Matrix<typename 
        //               boost::math::tools::promote_args<T_loc,T_y>::type,
        //               Eigen::Dynamic,Eigen::Dynamic> 
        //   z(subtract(y,MU).transpose()); // was = 
                
        Eigen::Matrix<typename 
          boost::math::tools::promote_args<T_covar,T_loc,T_y>::type,
          Eigen::Dynamic,Eigen::Dynamic> 
          half(mdivide_left_tri_low(L,z));
          
        lp -= 0.5 * sum(columns_dot_self(half));
      }
      return lp;
    }