コード例 #1
0
ファイル: SOGP.cpp プロジェクト: nttputus/MLvisualdemos
//Log probability of this data
double SOGP::log_prob(const ColumnVector& in, const ColumnVector& out){
  static const double ls2pi= log(sqrt(2*M_PI));  //Only compute once
  double sigma;
  double out2;
  if(current_size == 0){         //mu = zero, sigma = kappa.
    sigma=sqrt(m_params.m_kernel->kstar(in)+m_params.s20); //Is this right?  V_0=kstar, v_1 = s20
    out2=out.SumSquare();
  }    
  else{
    ColumnVector mu = predict(in,sigma);
    mu-=out;
    out2=mu.SumSquare();
  }
  return(-ls2pi -log(sigma) -.5*out2/(sigma*sigma));
}
コード例 #2
0
ファイル: hholder.cpp プロジェクト: CalebVDW/smr-motion
// Matrix A's first n columns are orthonormal
// so A.Columns(1,n).t() * A.Columns(1,n) is the identity matrix.
// Fill out the remaining columns of A to make them orthonormal
// so A.t() * A is the identity matrix 
void extend_orthonormal(Matrix& A, int n)
{
   REPORT
   Tracer et("extend_orthonormal");
   int nr = A.nrows(); int nc = A.ncols();
   if (nc > nr) Throw(IncompatibleDimensionsException(A));
   if (n > nc) Throw(IncompatibleDimensionsException(A));
   ColumnVector SSR;
   { Matrix A1 = A.Columns(1,n); SSR = A1.sum_square_rows(); }
   for (int i = n; i < nc; ++i)
   {
      // pick row with smallest SSQ
      int k; SSR.minimum1(k);
      // orthogonalise column with 1 at element k, 0 elsewhere
      // next line is rather inefficient
      ColumnVector X = - A.Columns(1, i) * A.SubMatrix(k, k, 1, i).t();
      X(k) += 1.0;
      // normalise
      X /= sqrt(X.SumSquare());
      // update row sums of squares
      for (k = 1; k <= nr; ++k) SSR(k) += square(X(k));
      // load new column into matrix
      A.Column(i+1) = X;
   }
}
コード例 #3
0
ファイル: example.cpp プロジェクト: 151706061/sofa
void test1(Real* y, Real* x1, Real* x2, int nobs, int npred)
{
   cout << "\n\nTest 1 - traditional, bad\n";

   // traditional sum of squares and products method of calculation
   // but not adjusting means; maybe subject to round-off error

   // make matrix of predictor values with 1s into col 1 of matrix
   int npred1 = npred+1;        // number of cols including col of ones.
   Matrix X(nobs,npred1);
   X.Column(1) = 1.0;

   // load x1 and x2 into X
   //    [use << rather than = when loading arrays]
   X.Column(2) << x1;  X.Column(3) << x2;

   // vector of Y values
   ColumnVector Y(nobs); Y << y;

   // form sum of squares and product matrix
   //    [use << rather than = for copying Matrix into SymmetricMatrix]
   SymmetricMatrix SSQ; SSQ << X.t() * X;

   // calculate estimate
   //    [bracket last two terms to force this multiplication first]
   //    [ .i() means inverse, but inverse is not explicity calculated]
   ColumnVector A = SSQ.i() * (X.t() * Y);

   // Get variances of estimates from diagonal elements of inverse of SSQ
   // get inverse of SSQ - we need it for finding D
   DiagonalMatrix D; D << SSQ.i();
   ColumnVector V = D.AsColumn();

   // Calculate fitted values and residuals
   ColumnVector Fitted = X * A;
   ColumnVector Residual = Y - Fitted;
   Real ResVar = Residual.SumSquare() / (nobs-npred1);

   // Get diagonals of Hat matrix (an expensive way of doing this)
   DiagonalMatrix Hat;  Hat << X * (X.t() * X).i() * X.t();

   // print out answers
   cout << "\nEstimates and their standard errors\n\n";

   // make vector of standard errors
   ColumnVector SE(npred1);
   for (int i=1; i<=npred1; i++) SE(i) = sqrt(V(i)*ResVar);
   // use concatenation function to form matrix and use matrix print
   // to get two columns
   cout << setw(11) << setprecision(5) << (A | SE) << endl;

   cout << "\nObservations, fitted value, residual value, hat value\n";

   // use concatenation again; select only columns 2 to 3 of X
   cout << setw(9) << setprecision(3) <<
     (X.Columns(2,3) | Y | Fitted | Residual | Hat.AsColumn());
   cout << "\n\n";
}
コード例 #4
0
/** 
 * Compute a distance metric between two columns of a
 * matrix. <b>Note that the indexes are *1* based (not 0) as that is
 * Newmat's convention</b>. Note that dist(M,i,j) must equal dist(M,j,i);
 * 
 * @param M - Matrix whose columns represent individual items to be clustered.
 * @param col1Ix - Column index to be compared (1 based).
 * @param col2Ix - Column index to be compared (1 based).
 * 
 * @return - "Distance" or "dissimilarity" metric between two columns of matrix.
 */
double GuassianRadial::dist(const Matrix &M, int col1Ix, int col2Ix) const {
  double dist = 0;
  if(col1Ix == col2Ix) 
    return 0;
  ColumnVector V = M.Column(col1Ix) - M.Column(col2Ix);
  dist = V.SumSquare() / (2 * m_Sigma * m_Sigma);
  dist = exp(-1 * dist);
  return dist;
}
コード例 #5
0
ファイル: example.cpp プロジェクト: 151706061/sofa
void test3(Real* y, Real* x1, Real* x2, int nobs, int npred)
{
   cout << "\n\nTest 3 - Cholesky\n";

   // traditional sum of squares and products method of calculation
   // with subtraction of means - using Cholesky decomposition

   Matrix X(nobs,npred);
   X.Column(1) << x1;  X.Column(2) << x2;
   ColumnVector Y(nobs); Y << y;
   ColumnVector Ones(nobs); Ones = 1.0;
   RowVector M = Ones.t() * X / nobs;
   Matrix XC(nobs,npred);
   XC = X - Ones * M;
   ColumnVector YC(nobs);
   Real m = Sum(Y) / nobs;  YC = Y - Ones * m;
   SymmetricMatrix SSQ; SSQ << XC.t() * XC;

   // Cholesky decomposition of SSQ
   LowerTriangularMatrix L = Cholesky(SSQ);

   // calculate estimate
   ColumnVector A = L.t().i() * (L.i() * (XC.t() * YC));

   // calculate estimate of constant term
   Real a = m - (M * A).AsScalar();

   // Get variances of estimates from diagonal elements of invoice of SSQ
   DiagonalMatrix D; D << L.t().i() * L.i();
   ColumnVector V = D.AsColumn();
   Real v = 1.0/nobs + (L.i() * M.t()).SumSquare();

   // Calculate fitted values and residuals
   int npred1 = npred+1;
   ColumnVector Fitted = X * A + a;
   ColumnVector Residual = Y - Fitted;
   Real ResVar = Residual.SumSquare() / (nobs-npred1);

   // Get diagonals of Hat matrix (an expensive way of doing this)
   Matrix X1(nobs,npred1); X1.Column(1)<<Ones; X1.Columns(2,npred1)<<X;
   DiagonalMatrix Hat;  Hat << X1 * (X1.t() * X1).i() * X1.t();

   // print out answers
   cout << "\nEstimates and their standard errors\n\n";
   cout.setf(ios::fixed, ios::floatfield);
   cout << setw(11) << setprecision(5)  << a << " ";
   cout << setw(11) << setprecision(5)  << sqrt(v*ResVar) << endl;
   ColumnVector SE(npred);
   for (int i=1; i<=npred; i++) SE(i) = sqrt(V(i)*ResVar);
   cout << setw(11) << setprecision(5) << (A | SE) << endl;
   cout << "\nObservations, fitted value, residual value, hat value\n";
   cout << setw(9) << setprecision(3) <<
      (X | Y | Fitted | Residual | Hat.AsColumn());
   cout << "\n\n";
}
コード例 #6
0
ファイル: example.cpp プロジェクト: 151706061/sofa
void test5(Real* y, Real* x1, Real* x2, int nobs, int npred)
{
   cout << "\n\nTest 5 - singular value\n";

   // Singular value decomposition method
 
   // load data - 1s into col 1 of matrix
   int npred1 = npred+1;
   Matrix X(nobs,npred1); ColumnVector Y(nobs);
   X.Column(1) = 1.0;  X.Column(2) << x1;  X.Column(3) << x2;  Y << y;

   // do SVD
   Matrix U, V; DiagonalMatrix D;
   SVD(X,D,U,V);                              // X = U * D * V.t()
   ColumnVector Fitted = U.t() * Y;
   ColumnVector A = V * ( D.i() * Fitted );
   Fitted = U * Fitted;
   ColumnVector Residual = Y - Fitted;
   Real ResVar = Residual.SumSquare() / (nobs-npred1);

   // get variances of estimates
   D << V * (D * D).i() * V.t();

   // Get diagonals of Hat matrix
   DiagonalMatrix Hat;  Hat << U * U.t();

   // print out answers
   cout << "\nEstimates and their standard errors\n\n";
   ColumnVector SE(npred1);
   for (int i=1; i<=npred1; i++) SE(i) = sqrt(D(i)*ResVar);
   cout << setw(11) << setprecision(5) << (A | SE) << endl;
   cout << "\nObservations, fitted value, residual value, hat value\n";
   cout << setw(9) << setprecision(3) << 
      (X.Columns(2,3) | Y | Fitted | Residual | Hat.AsColumn());
   cout << "\n\n";
}
コード例 #7
0
ファイル: example.cpp プロジェクト: 151706061/sofa
void test2(Real* y, Real* x1, Real* x2, int nobs, int npred)
{
   cout << "\n\nTest 2 - traditional, OK\n";

   // traditional sum of squares and products method of calculation
   // with subtraction of means - less subject to round-off error
   // than test1

   // make matrix of predictor values
   Matrix X(nobs,npred);

   // load x1 and x2 into X
   //    [use << rather than = when loading arrays]
   X.Column(1) << x1;  X.Column(2) << x2;

   // vector of Y values
   ColumnVector Y(nobs); Y << y;

   // make vector of 1s
   ColumnVector Ones(nobs); Ones = 1.0;

   // calculate means (averages) of x1 and x2 [ .t() takes transpose]
   RowVector M = Ones.t() * X / nobs;

   // and subtract means from x1 and x1
   Matrix XC(nobs,npred);
   XC = X - Ones * M;

   // do the same to Y [use Sum to get sum of elements]
   ColumnVector YC(nobs);
   Real m = Sum(Y) / nobs;  YC = Y - Ones * m;

   // form sum of squares and product matrix
   //    [use << rather than = for copying Matrix into SymmetricMatrix]
   SymmetricMatrix SSQ; SSQ << XC.t() * XC;

   // calculate estimate
   //    [bracket last two terms to force this multiplication first]
   //    [ .i() means inverse, but inverse is not explicity calculated]
   ColumnVector A = SSQ.i() * (XC.t() * YC);

   // calculate estimate of constant term
   //    [AsScalar converts 1x1 matrix to Real]
   Real a = m - (M * A).AsScalar();

   // Get variances of estimates from diagonal elements of inverse of SSQ
   //    [ we are taking inverse of SSQ - we need it for finding D ]
   Matrix ISSQ = SSQ.i(); DiagonalMatrix D; D << ISSQ;
   ColumnVector V = D.AsColumn();
   Real v = 1.0/nobs + (M * ISSQ * M.t()).AsScalar();
					    // for calc variance of const

   // Calculate fitted values and residuals
   int npred1 = npred+1;
   ColumnVector Fitted = X * A + a;
   ColumnVector Residual = Y - Fitted;
   Real ResVar = Residual.SumSquare() / (nobs-npred1);

   // Get diagonals of Hat matrix (an expensive way of doing this)
   Matrix X1(nobs,npred1); X1.Column(1)<<Ones; X1.Columns(2,npred1)<<X;
   DiagonalMatrix Hat;  Hat << X1 * (X1.t() * X1).i() * X1.t();

   // print out answers
   cout << "\nEstimates and their standard errors\n\n";
   cout.setf(ios::fixed, ios::floatfield);
   cout << setw(11) << setprecision(5)  << a << " ";
   cout << setw(11) << setprecision(5)  << sqrt(v*ResVar) << endl;
   // make vector of standard errors
   ColumnVector SE(npred);
   for (int i=1; i<=npred; i++) SE(i) = sqrt(V(i)*ResVar);
   // use concatenation function to form matrix and use matrix print
   // to get two columns
   cout << setw(11) << setprecision(5) << (A | SE) << endl;
   cout << "\nObservations, fitted value, residual value, hat value\n";
   cout << setw(9) << setprecision(3) <<
     (X | Y | Fitted | Residual | Hat.AsColumn());
   cout << "\n\n";
}