Пример #1
0
double MultiKernel::calculate(arma::mat &x, int r1, arma::mat &x2, int r2){
    

    int dim1=x.n_rows;
    int dim2=x2.n_rows;
    int l = 0,h=0;
    
    
    double result=0;
    
    
    for (int i=0; i<features.size(); i++) {
        
        h+=features[i]->calculateFeatureDimension();
        
        

        arma::mat x_1_part=x.submat(0, l, dim1-1, h-1);
        arma::mat x_2_part=x2.submat(0, l, dim2-1, h-1);
        
        
        result+=this->kernels[i]->calculate(x_1_part, r1, x_2_part, r2);
        l=h;
    }
    
    return result;
    
}
Пример #2
0
/**
 * CrossValidation function runs a k-fold cross validation on the training data
 * by dividing the training data into k equal disjoint subsets. The model is
 * trained on k-1 of these subsets and 1 subset is used as validation data.
 *
 * This process is repeated k times assigning each subset to be the validation
 * data at most once.
 *
 * @params trainData The data available for training.
 * @params trainLabels The labels corresponding to the training data.
 * @params k The parameter k in k-fold cross validation.
 * @param hiddenLayerSize Hidden layer size.
 * @param maxEpochs Maximum number of epochs.
 * @param trainError Error of predictions on training data.
 * @param validationError Validation error of predictions.
*/
void CrossValidation(arma::mat& trainData,
                     const arma::mat& trainLabels,
                     const size_t k,
                     const size_t hiddenLayerSize,
                     const size_t maxEpochs,
                     double& trainError,
                     double& validationError)
{
  // Number of datapoints in each subset in K-fold CV.
  size_t validationDataSize = (int) trainData.n_cols / k;
  trainError = validationError = 0.0;

  for (size_t i = 0; i < trainData.n_cols; i = i + validationDataSize)
  {
    validationDataSize = (int) trainData.n_cols / k;

    // The collection of the k-1 subsets to be used in training in a particular
    // iteration.
    arma::mat validationTrainData(trainData.n_rows, trainData.n_cols);

    // The labels corresponding to training data.
    arma::mat validationTrainLabels(trainLabels.n_rows, trainLabels.n_cols);

    // The data subset which is used as validation data in a particular
    // iteration.
    arma::mat validationTestData(trainData.n_rows, validationDataSize);

    // The labels corresponding to the validation data.
    arma::mat validationTestLabels(trainLabels.n_rows, validationDataSize);

    if (i + validationDataSize > trainData.n_cols)
    {
      validationDataSize = trainData.n_cols - i;
    }

    validationTestData = trainData.submat(0, i, trainData.n_rows - 1,
        i + validationDataSize - 1);

    validationTestLabels = trainLabels.submat(0, i, trainLabels.n_rows - 1,
        i + validationDataSize - 1);

    validationTrainData = trainData;
    validationTrainData.shed_cols(i, i + validationDataSize - 1);

    validationTrainLabels = trainLabels;
    validationTrainLabels.shed_cols(i, i + validationDataSize - 1);

    double tError, vError;

    BuildVanillaNetwork(validationTrainData, validationTrainLabels,
        validationTestData, validationTestLabels, hiddenLayerSize, maxEpochs,
        validationTrainLabels.n_rows, tError, vError);

    trainError += tError;
    validationError += vError;
  }

  trainError /= k;
  validationError /= k;
}
  void EvalParams(arma::mat const &parameters, size_t l1, size_t l2, size_t l3,
                  std::true_type /* unused */)
  {
    // w1, w2, b1 and b2 are not extracted separately, 'parameters' is directly
    // used in their place to avoid copying data. The following representations
    // are used:
    // w1 <- parameters.submat(0, 0, l1-1, l2-1)
    // w2 <- parameters.submat(l1, 0, l3-1, l2-1).t()
    // b1 <- parameters.submat(0, l2, l1-1, l2)
    // b2 <- parameters.submat(l3, 0, l3, l2-1).t()

    // Compute activations of the hidden and output layers.
    arma::mat tempInput = parameters.submat(0, 0, l1 - 1, l2 - 1) * data +
                          arma::repmat(parameters.submat(0, l2, l1 - 1, l2), 1, data.n_cols);
    hiddenLayerFunc.Forward(tempInput,
                            hiddenLayer);

    tempInput = parameters.submat(l1, 0, l3 - 1, l2 - 1).t() * hiddenLayer +
                arma::repmat(parameters.submat(l3, 0, l3, l2 - 1).t(), 1, data.n_cols);
    outputLayerFunc.Forward(tempInput,
                            outputLayer);

    // Average activations of the hidden layer.
    rhoCap = arma::sum(hiddenLayer, 1) / static_cast<double>(data.n_cols);
    // Difference between the reconstructed data and the original data.
    diff = outputLayer - data;
  }
Пример #4
0
/**
 * AvgCrossValidation function takes a dataset and runs CrossValidation "iter"
 * number of times and then return the average training and validation error.
 * It shuffles the dataset in every iteration.
 *
 * @params dataset The dataset inclusive of the labels. Assuming the last
 *                 "numLabels" number of rows are the labels which the model
 *                 has to predict.
 * @params numLabels number of rows which are the output labels in the dataset.
 * @params iter The number of times Cross Validation has to be run.
 * @params hiddenLayerSize The number of nodes in the hidden layer.
 * @params maxEpochs The maximum number of epochs for the training.
 * @param avgTrainError Average error.
 * @param validationError Average validation.
 */
void AvgCrossValidation(arma::mat& dataset,
                        const size_t numLabels,
                        const size_t iter,
                        const size_t hiddenLayerSize,
                        const size_t maxEpochs,
                        double& avgTrainError,
                        double& avgValidationError)
{
  avgValidationError = avgTrainError = 0.0;

  for (size_t i = 0; i < iter; ++i)
  {
    dataset = arma::shuffle(dataset, 1);

    arma::mat trainData = dataset.submat(0, 0, dataset.n_rows - 1 - numLabels,
        dataset.n_cols - 1);
    arma::mat trainLabels = dataset.submat(dataset.n_rows - numLabels, 0,
        dataset.n_rows - 1, dataset.n_cols - 1);

    double trainError, validationError;
    CrossValidation(trainData, trainLabels, 10, hiddenLayerSize, maxEpochs,
        trainError, validationError);

    avgTrainError += trainError;
    avgValidationError += validationError;
  }

  avgTrainError /= iter;
  avgValidationError /= iter;
}
void subspaceIdMoor::buildRhsLhsMatrix(arma::mat const &gam_inv, arma::mat const &gamm_inv, arma::mat const &R_,
	arma::uword i, arma::uword n, arma::uword ny, arma::uword nu, arma::mat &RHS, arma::mat &LHS){
	mat RhsUpper = join_horiz(gam_inv * R_.submat((2 * nu + ny)*i, 0, 2 * (nu + ny)*i - 1, (2 * nu + ny)*i - 1), zeros(n, ny));
	mat RhsLower = R_.submat(nu*i, 0, 2 * nu*i - 1, (2 * nu + ny)*i + ny - 1);
	RHS = join_vert(RhsUpper, RhsLower);
	mat LhsUpper = gamm_inv*R_.submat((2 * nu + ny)*i + ny, 0, 2 * (nu + ny)*i - 1, (2 * nu + ny)*i + ny - 1);
	mat LhsLower = R_.submat((2 * nu + ny)*i, 0, (2 * nu + ny)*i + ny - 1, (2 * nu + ny)*i + ny - 1);
	LHS = join_vert(LhsUpper, LhsLower);
}
void subspaceIdMoor::buildNMatrix(arma::uword k, arma::mat const &M, arma::mat const &L1, arma::mat const &L2, arma::mat const &X,
	arma::uword i, arma::uword n, arma::uword ny, arma::mat &N){
	mat Upper, Lower;
	Upper = join_horiz(M.cols((k - 1)*ny, ny*i - 1) - L1.cols((k-1)*ny, ny*i - 1), zeros(n, (k-1)*ny));
	Lower = join_horiz(-L2.cols((k - 1) * ny, ny*i - 1), zeros(ny, (k - 1)*ny));
	N = join_vert(Upper, Lower);
	if (k == 1)
		N.submat(n, 0, n + ny - 1, ny - 1) = eye(ny, ny) + N.submat(n, 0, n + ny - 1, ny - 1);
	N = N * X;
}
Пример #7
0
vmat::vmat(const arma::mat &x, const arma::uvec &rc1, const arma::uvec &rc2) {
  arma::mat x11 = x.submat(rc1,rc1);
  arma::mat x12 = x.submat(rc1,rc2);
  arma::mat x21 = x.submat(rc2,rc1);
  arma::mat x22 = x.submat(rc2,rc2);

  proj = x12*arma::inv(x22);
  vcov = x11-proj*x21;
  inv = arma::inv_sympd(vcov);
  loginvsqdet = log(1/sqrt(arma::det(vcov)));
}
Пример #8
0
void ColumnsToBlocks::Transform(const arma::mat& maximalInputs,
                                arma::mat& output)
{
  if (!IsPerfectSquare(maximalInputs.n_rows))
  {
    throw std::runtime_error("maximalInputs.n_rows should be perfect square");
  }

  if (blockHeight == 0 || blockWidth == 0)
  {
    size_t const squareRows =
        static_cast<size_t>(std::sqrt(maximalInputs.n_rows));
    blockHeight = squareRows;
    blockWidth = squareRows;
  }

  if (blockHeight * blockWidth != maximalInputs.n_rows)
  {
    throw std::runtime_error("blockHeight * blockWidth should "
                             "equal to maximalInputs.n_rows");
  }

  const size_t rowOffset = blockHeight+bufSize;
  const size_t colOffset = blockWidth+bufSize;
  output.ones(bufSize + rows * rowOffset,
              bufSize + cols * colOffset);
  output *= bufValue;

  size_t k = 0;
  const size_t maxSize = std::min(rows * cols, (size_t) maximalInputs.n_cols);
  for (size_t i = 0; i != rows; ++i)
  {
    for (size_t j = 0; j != cols; ++j)
    {
      // Now, copy the elements of the row to the output submatrix.
      const size_t minRow = bufSize + i * rowOffset;
      const size_t minCol = bufSize + j * colOffset;
      const size_t maxRow = i * rowOffset + blockHeight;
      const size_t maxCol = j * colOffset + blockWidth;

      output.submat(minRow, minCol, maxRow, maxCol) =
          arma::reshape(maximalInputs.col(k++), blockHeight, blockWidth);

      if (k >= maxSize)
        break;
    }
  }

  if (scale)
  {
    const double max = output.max();
    const double min = output.min();
    if ((max - min) != 0)
    {
      output = (output - min) / (max - min) * (maxRange - minRange) + minRange;
    }
  }
}
Пример #9
0
void MaximalInputs(const arma::mat& parameters, arma::mat& output)
{
  arma::mat paramTemp(parameters.submat(0, 0, (parameters.n_rows - 1) / 2 - 1,
                                        parameters.n_cols - 2).t());
  double const mean = arma::mean(arma::mean(paramTemp));
  paramTemp -= mean;

  NormalizeColByMax(paramTemp, output);
}
Пример #10
0
unsigned int optCoef(arma::mat& weights, const arma::icube& obs, const arma::cube& emission,
    const arma::mat& initk, const arma::cube& beta, const arma::mat& scales, arma::mat& coef,
    const arma::mat& X, const arma::ivec& cumsumstate, const arma::ivec& numberOfStates,
    int trace) {

  int iter = 0;
  double change = 1.0;
  while ((change > 1e-10) & (iter < 100)) {
    arma::vec tmpvec(X.n_cols * (weights.n_rows - 1));
    bool solve_ok = arma::solve(tmpvec, hCoef(weights, X),
        gCoef(obs, beta, scales, emission, initk, weights, X, cumsumstate, numberOfStates));
    if (solve_ok == false) {
      return (4);
    }

    arma::mat coefnew(coef.n_rows, coef.n_cols - 1);
    for (unsigned int i = 0; i < (weights.n_rows - 1); i++) {
      coefnew.col(i) = coef.col(i + 1) - tmpvec.subvec(i * X.n_cols, (i + 1) * X.n_cols - 1);
    }
    change = arma::accu(arma::abs(coef.submat(0, 1, coef.n_rows - 1, coef.n_cols - 1) - coefnew))
        / coefnew.n_elem;
    coef.submat(0, 1, coef.n_rows - 1, coef.n_cols - 1) = coefnew;
    iter++;
    if (trace == 3) {
      Rcout << "coefficient optimization iter: " << iter;
      Rcout << " new coefficients: " << std::endl << coefnew << std::endl;
      Rcout << " relative change: " << change << std::endl;
    }
    weights = exp(X * coef).t();
    if (!weights.is_finite()) {
      return (5);
    }
    weights.each_row() /= sum(weights, 0);

  }
  return (0);
}
Пример #11
0
double mlpack::cf::SVDWrapper<DummyClass>::Apply(const arma::mat& V,
                                     size_t r,
                                     arma::mat& W,
                                     arma::mat& H) const
{
  // check if the given rank is valid
  if(r > V.n_rows || r > V.n_cols)
  {
    Log::Info << "Rank " << r << ", given for decomposition is invalid." << std::endl;
    r = (V.n_rows > V.n_cols) ? V.n_cols : V.n_rows;
    Log::Info << "Setting decomposition rank to " << r << std::endl;
  }

  // get svd factorization
  arma::vec sigma;
  arma::svd(W, sigma, H, V);

  // remove the part of W and H depending upon the value of rank
  W = W.submat(0, 0, W.n_rows - 1, r - 1);
  H = H.submat(0, 0, H.n_cols - 1, r - 1);

  // take only required eigenvalues
  sigma = sigma.subvec(0, r - 1);

  // eigenvalue matrix is multiplied to W
  // it can either be multiplied to H matrix
  W = W * arma::diagmat(sigma);

  // take transpose of the matrix H as required by CF module
  H = arma::trans(H);

  // reconstruct the matrix
  arma::mat V_rec = W * H;

  // return the normalized frobenius norm
  return arma::norm(V - V_rec, "fro") / arma::norm(V, "fro");
}
Пример #12
0
// [[Rcpp::export]]
double loglikelihoodLogitCpp_t(const arma::vec& beta, const arma::mat& sigma, const arma::vec& sigmaType, const arma::vec& u, 
const arma::vec& df, const arma::vec& kKi, const arma::vec& kLh, const arma::vec& kLhi, const arma::vec& kY, const arma::mat& kX, const arma::mat& kZ) {
  double value = 0; /** The value to be returned */
  
  int nObs = kY.n_elem;
  int kP = kX.n_cols;  /** Dimension of Beta */
  int kK = kZ.n_cols;  /** Dimension of U */
  int kR = kKi.n_elem; /** Number of variance components */
  // int kL = sum(kLh);   /** Number of subvariance components */
  
  
  
  /** sum of yij * (wij - log(1 + ...))
   *  This corresponds to the 
  */
  for (int i = 0; i < nObs; i++) {
    double wij = 0;
    for (int j = 0; j < kP; j++) {
      wij += kX(i, j) * beta(j);
    }
    
    for (int j = 0; j < kK; j++) {
      wij += kZ(i, j) * u(j);
    }
    value += kY(i) * wij - log(1 + exp(wij));
  }
  
  int from = 0;
  int to = - 1;
  int counter = 0;
  for (int i = 0; i < kR; i++) {
    for (int j = 0; j < kLh(i); j++) {
      // std::cout<<i<<"\n";
      to += kLhi(counter);
      // std::cout<<"from:"<<from<<'\n';
      // std::cout<<"to:"<<to<<'\n';
      // std::cout<<sigmaType(i)<<"\n";
      // std::cout<<kron(arma::mat(kLhi(counter), kLhi(counter), arma::fill::eye), getSigma(sigma.row(i).t()))<<"\n";
      value += ldmt(u.subvec(from, to), df(counter), sigma.submat(from, from, to, to), sigmaType(i));
      from = to + 1;
      counter += 1;
    }
  }
  
  
  return value;
}
// get a power spectrum from the samples
arma::mat getPowerSpec(arma::mat samples)
{
	int winpts = round(wintime * data.sampleRate); // points in a window
	int steppts = round(steptime * data.sampleRate); // points in a step
	int winnum = (data.totalFrames - winpts) / steppts + 1; // how many windows
	int nfft = pow(2.0, ceil(log((double)winpts) / log(2.0))); // fft numbers

	arma::mat powerSpec(nfft, winnum);
	arma::mat hamming = makeHamming(winpts);

	// for each window, do hamming window and get the power spectrum
	for (int i = 0; i < winnum; i++) {
		int start = i * steppts;
		int end = start + winpts - 1;
		arma::mat winsamples = samples.submat(0, start, 0, end);
		winsamples = winsamples % hamming; // element-wise multiplication, do hamming window
		powerSpec.col(i) = powerFFT(trans(winsamples), nfft); // do fft and get power spectrum
	}
	return powerSpec;
}
arma::mat InverseKinematicJacobian::getJacobianForTasks(KinematicTree const& tree, std::vector<const Task*> const& tasks, arma::mat &jacobianRAW, bool normalize) const
{
	const uint32_t numTasks = tasks.size();
	uint numCols = tree.getMotorCnt();

	arma::mat jacobian = arma::zeros(1, numCols);
	if ((0 < numTasks) &&
		(0 < numCols)) /* at least one task and at least one motor */
	{
		/* calculate the size of the jacobian and the task vector */
		uint32_t numRows = 0;
		for (const Task* const &task : tasks)
		{
			if (task->hasTarget()) {
				numRows += task->getDimensionCnt();
			}
		}

		/* build the "big" jacobian */
		jacobian = arma::zeros(numRows, numCols);
		jacobianRAW = arma::zeros(numRows, numCols);

		uint32_t beginRow = 0;
		for (const Task *const &task : tasks)
		{
			if (task->hasTarget())
			{
				const uint32_t endRow = beginRow + task->getDimensionCnt() - 1;
				arma::mat jacobianRawSub;
				jacobian.submat(beginRow, 0, endRow, numCols - 1) = task->getJacobianForTask(tree, jacobianRawSub, normalize);
				jacobianRAW.submat(beginRow, 0, endRow, numCols - 1) = jacobianRawSub;

				beginRow = endRow + 1;
			}
		}
	}

	return jacobian;
}
Пример #15
0
arma::rowvec MultiKernel::predictAll(arma::mat &newX,std::vector<supportData*>& S,int B){
    using namespace arma;
    
    // preprocess first
    preprocess(S,B);
    
    int n=newX.n_rows;
    
    mat y_hat(1,1,fill::zeros);
    mat y(1,1,fill::zeros);
    
    rowvec scores(n,fill::ones);
    
    int dim1=newX.n_rows;
    int l = 0,h=0;
    std::vector<arma::mat> new_x;
    
    for (int i=0; i<this->features.size(); i++) {
        h+=features[i]->calculateFeatureDimension();
        arma::mat x_1_part=newX.submat(0, l, dim1-1, h-1);
        new_x.push_back(x_1_part);
        l=h;
    }
    
    for (int k = 0; k < newX.n_rows; ++k) {
        
        y(0)=k;
        double current=0;
        
        
        for (int i = 0; i < S.size(); ++i) {
            int dim2=S[i]->x->n_rows;
            l=0;
            h=0;
            std::vector<arma::mat> old_x;
            
            
            for (int j=0; j<this->features.size(); j++) {
                h+=features[j]->calculateFeatureDimension();
                arma::mat x_2_part=S[i]->x->submat(0, l, dim2-1, h-1);
                old_x.push_back(x_2_part);
                l=h;
            }
            
            for (int yhat = 0; yhat < S[i]->x->n_rows; ++yhat) {
                
                y_hat(0)=yhat;
                
                if ((*S[i]->beta)[yhat]!=0){
                    
                    // the below has to be multiplied by the velocities kernel
//                    current+= (*S[i]->beta)[yhat]*
//                    calculate(newX, y(0), *S[i]->x, y_hat(0));
                    
                    for (int j=0; j<this->features.size(); j++) {
                        current+=(*S[i]->beta)[yhat]*this->kernels[j]->calculate(new_x[j], y(0), old_x[j], y_hat(0));
                    }
                    
                }
            }
            
        }
        
        scores[k]=current;
        
        
    }
    
    return scores;
}
Пример #16
0
 Rotation3D::Rotation(const arma::mat& m) {
     *this = m.submat(0, 0, 2, 2);
     *this = this->orthogonalise();
 }
Пример #17
0
arma::vec DIIS::get_w_diis_wrk(const arma::mat & errs) const {
  // Size of LA problem
  int N=(int) errs.n_cols;
  
  // DIIS weights
  arma::vec sol(N);
  sol.zeros();
  
  if(c1diis) {
    // Original, Pulay's DIIS (C1-DIIS)
    
    // Array holding the errors
    arma::mat B(N+1,N+1);
    B.zeros();
    // Compute errors
    for(int i=0;i<N;i++)
      for(int j=0;j<N;j++) {
	B(i,j)=arma::dot(errs.col(i),errs.col(j));
      }
    // Fill in the rest of B
    for(int i=0;i<N;i++) {
      B(i,N)=-1.0;
      B(N,i)=-1.0;
    }
    
    // RHS vector
    arma::vec A(N+1);
    A.zeros();
    A(N)=-1.0;
    
    // Solve B*X = A
    arma::vec X;
    bool succ;
    
    succ=arma::solve(X,B,A);
    
    if(succ) {
      // Solution is (last element of X is DIIS error)
      sol=X.subvec(0,N-1);
      
      // Check that weights are within tolerance
      if(arma::max(arma::abs(sol))>=MAXWEIGHT) {
	printf("Large coefficient produced by DIIS. Reducing to %i matrices.\n",N-1);
	arma::vec w0=get_w_diis_wrk(errs.submat(1,1,errs.n_rows-1,errs.n_cols-1));
	
	// Helper vector
	arma::vec w(N);
	w.zeros();
	w.subvec(w.n_elem-w0.n_elem,w.n_elem-1)=w0;
	return w;
      }
    }
    
    if(!succ) {
      // Failed to invert matrix. Use the two last iterations instead.
      printf("C1-DIIS was not succesful, mixing matrices instead.\n");
      sol.zeros();
      sol(0)=0.5;
      sol(1)=0.5;
    }
  } else {
    // C2-DIIS

    // Array holding the errors
    arma::mat B(N,N);
    B.zeros();
    // Compute errors
    for(int i=0;i<N;i++)
      for(int j=0;j<N;j++) {
	B(i,j)=arma::dot(errs.col(i),errs.col(j));
      }

    // Solve eigenvectors of B
    arma::mat Q;
    arma::vec lambda;
    eig_sym_ordered(lambda,Q,B);

    // Normalize weights
    for(int i=0;i<N;i++) {
      Q.col(i)/=arma::sum(Q.col(i));
    }

    // Choose solution by picking out solution with smallest error
    arma::vec errors(N);
    arma::mat eQ=errs*Q;
    // The weighted error is
    for(int i=0;i<N;i++) {
      errors(i)=arma::norm(eQ.col(i),2);
    }

    // Find minimal error
    double mine=DBL_MAX;
    int minloc=-1;
    for(int i=0;i<N;i++) {
      if(errors[i]<mine) {
	// Check weights
	bool ok=arma::max(arma::abs(Q.col(i)))<MAXWEIGHT;
	if(ok) {
	  mine=errors(i);
	  minloc=i;
	}
      }
    }

    if(minloc!=-1) {
      // Solution is
      sol=Q.col(minloc);
    } else {
      printf("C2-DIIS did not find a suitable solution. Mixing matrices instead.\n");
      
      sol.zeros();
      sol(0)=0.5;
      sol(1)=0.5;
    }
  }
  
  return sol;
}
ss subspaceIdMoor::subidKnownOrder(arma::uword ny, arma::uword nu,  arma::mat const &R, arma::mat const &Usvd, arma::vec const &singval,
	arma::uword i, arma::uword n){
	ss ssout;

	mat U1 = Usvd.cols(0, n - 1);
	/* STEP 4 in Subspace Identification*/
	/*Determine gam and gamm*/
	mat gam = U1 * diagmat(sqrt(singval.subvec(0, n - 1)));
	mat gamm = gam.rows(0, ny*(i - 1) - 1);
	mat gam_inv = pinv(gam); /*pseudo inverse*/
	mat gamm_inv = pinv(gamm); /*pseudo inverse*/

	/* STEP 5*/
	mat Rhs, Lhs;
	buildRhsLhsMatrix(gam_inv, gamm_inv, R, i, n, ny, nu, Rhs, Lhs);

	/* Solve least square*/
	mat solls;
	solls = solve(Rhs.t(), Lhs.t()).t();

	/* Extract system matrix:*/
	mat A, C;
	A = solls.submat(0, 0, n - 1, n - 1);
	C = solls.submat(n, 0, n + ny - 1, n - 1);
	mat res = Lhs - solls*Rhs;

	/* Recompute gamma from A and C:*/
	gam.zeros();
	gam.rows(0, ny - 1) = C;
	for (uword k = 2; k <= i; k++){
		gam.rows((k - 1)*ny, k*ny - 1) = gam.rows((k-2)*ny, (k-1)*ny - 1) * A;
	}
	gamm = gam.rows(0, ny*(i - 1) - 1);
	gam_inv = pinv(gam);
	gamm_inv = pinv(gamm);

	/* Recompute the states with the new gamma:*/
	buildRhsLhsMatrix(gam_inv, gamm_inv, R, i, n, ny, nu, Rhs, Lhs);

	/* STEP 6:*/
	/* Computing system Matrix B and D*/
	/*ref pag 125 for P and Q*/
	mat P = Lhs - join_vert(A, C) * Rhs.rows(0, n - 1);
	P = P.cols(0, 2*nu*i - 1);
	mat Q = R.submat(nu*i, 0, 2 * nu*i - 1, 2 * nu*i - 1); /*Future inputs*/

	/* Matrix L1, L2 and M as on page 119*/
	mat L1 = A * gam_inv;
	mat L2 = C * gam_inv;
	mat M = join_horiz(zeros(n, ny), gamm_inv);
	mat X = join_vert(join_horiz(eye(ny, ny), zeros(ny, n)), join_horiz(zeros(ny*(i-1), ny), gamm));

	/* Calculate N and the Kronecker products (page 126)*/
	mat N;
	uword kk = 1;
	buildNMatrix(kk, M, L1, L2, X, i, n, ny, N);
	mat totm = kron(Q.rows((kk-1)*nu, kk*nu - 1).t(), N);
	for (kk = 2; kk <= i; kk++){
		buildNMatrix(kk, M, L1, L2, X, i, n, ny, N);
		totm = totm + kron(Q.rows((kk - 1)*nu, kk*nu - 1).t(), N);
	}

	/* Solve Least Squares: */
	mat Pvec = vectorise(P);
	mat sollsq2 = solve(totm, Pvec);

	/*Mount B and D*/
	sollsq2.reshape(n + ny, nu);
	mat D = sollsq2.rows(0, ny - 1);
	mat B = sollsq2.rows(ny, ny+n - 1);

	/* STEP 7: Compute sys Matrix G, L0*/
	mat covv, Qs, Ss, Rs, sig, G, L0, K, Ro;
	if (norm(res) > 1e-10){ /*Determine QSR from the residuals*/
		covv = res*res.t();
		Qs = covv.submat(0, 0, n - 1, n - 1);
		Ss = covv.submat(0, n, n - 1, n + ny - 1);
		Rs = covv.submat(n, n, n+ny - 1, n+ny - 1);
		simple_dlyap(A, Qs, sig); /*solves discrete lyapunov matrix equation*/
		G = A*sig*C.t() + Ss;
		L0 = C*sig*C.t() + Rs;

		/* Determine K and Ro*/
		g12kr(A, G, C, L0, K, Ro);
	}

	/* Set to ss structure:*/
	ssout.A = A;
	ssout.B = B;
	ssout.C = C;
	ssout.D = D;
	//ssout.A = A; parei aqui -> add later the ones related with stochastic to ss.

	return ssout;
}
Пример #19
0
double
HMM::baumWelchCached(const arma::mat & data, const std::vector<GMM> & B, unsigned int seed = 0) {

  init(B, seed);
  allocateMemory(data.n_cols);


  cacheProbabilities(data);
  double logprobc = forwardProcedureCached();
  double logprobprev = 0.0;
  double delta;
  do {
    backwardProcedureCached();
    computeXiCached();
    computeGamma();
    logprobprev = logprobc;
    //update state probabilities pi
    pi_ = gamma_.col(0).t();
    for (unsigned int i = 0; i < N_; ++i) {
      arma::rowvec xi_i = arma::sum(xi_.slice(i));
      double shortscale = 1./arma::accu(gamma_.submat(arma::span(i),arma::span(0,T_-2)));
      for (unsigned int j = 0; j < N_; ++j) {
        A_(i,j) = xi_i(j) * shortscale;
      }
      // and update distributions

      unsigned int numComponents = (unsigned int) BModels_[i].getNumComponents();
      arma::mat gamma_lt = arma::mat(T_, numComponents);
      gamma_lt = (gamma_.row(i).t()* arma::ones(1, numComponents))% gammaLts_[i];

      for(unsigned int t = 0; t < T_ ; ++t) {
        double sumProb = B_(t,i);
        if (sumProb != 0.) {
          gamma_lt.row(t) /= sumProb; 
        }
      }

      double scale = 1./arma::accu(gamma_.row(i));
      for (unsigned int l = 0; l < numComponents; ++l) {
        double sumGammaLt = arma::accu(gamma_lt.col(l));
        double newWeight = scale * sumGammaLt;
        arma::vec newMu = data * gamma_lt.col(l) / sumGammaLt;
        arma::mat tempMat = data- newMu * arma::ones(1,T_);
        unsigned int d = data.n_rows;
        arma::mat newSigma = arma::zeros(d, d);
        for (unsigned int t = 0; t < T_ ; ++t) {
          arma::vec diff = data.col(t) - newMu;
          newSigma += diff * diff.t() * gamma_lt(t, l)/ sumGammaLt;
        }

        try{
          BModels_[i].updateGM(l, newWeight, newMu, newSigma);
        }
        catch( const std::runtime_error & e) {
          tempMat.print("tempMat");
          gamma_lt.col(l).print("gamma_lt");
          throw e;
        }
      }
      arma::uvec indices = BModels_[i].cleanupGMs();
      gammaLts_[i] = gammaLts_[i].cols(indices);
    }


    cacheProbabilities(data);
    logprobc = forwardProcedureCached();
    std::cout << logprobc << " " << logprobprev << std::endl;
    delta = logprobc - logprobprev;

  } 
  while (delta >= eps_ * std::abs(logprobprev));

  return logprobc;

}
Пример #20
0
// Subroutine
double LogLikMWNOUPairs(arma::mat x, double t, arma::vec mu, arma::vec alpha, arma::vec sigma, int maxK = 2, double etrunc = 50) {
  
  /*
  * Description: Computation of the loglikelihood for a MWN-OU diffusion (with diagonal diffusion matrix)
  *              from a sample of initial and final pairs of dihedrals
  * 
  * Arguments:
  *
  * - x: a n x 4 matrix of initial and final pairs of dihedrals. Each row is an observation containing 
  *      the angles (phi_0, psi_0, phi_t, psi_t). They must be in [-PI, PI) so that the truncated wrapping by 
  *      maxK windings is able to capture periodicity.
  * - t: the common time between the observed pairs.
  * - mu: a vector of length 2 with the mean parameter of the MWN-OU process. The mean of the MWN stationary distribution.
  * - alpha: vector of length 3 containing the A matrix of the drift of the MWN-OU process in the following codification: 
  *          A = [alpha[0], alpha[2] * sqrt(sigma[0] / sigma[1]); alpha[2] * sqrt(sigma[1] / sigma[0]), alpha[1]]. 
  *          This enforces that A^(-1) * Sigma is symmetric. Positive definiteness is guaranteed if 
  *          alpha[0] * alpha[1] > alpha[2] * alpha[2] and this is checked during the computation of the loglikelihood.
  * - sigma: vector of length 2 containing the diagonal of Sigma, the diffusion matrix. Note that these are the *squares*
  *          (i.e. variances) of the diffusion coefficients that multiply the Wiener process.
  * - maxK: maximum number of windings considered in the computation of the approximated transition probability density.
  * - etrunc: truncation for exponential. exp(x) with x <= -etrunc is set to zero.
  * 
  * Warning: 
  * 
  *  - A combination of small etrunc (< 30) and low maxK (<= 1) can lead to NaNs produced by 0 / 0 in the weight computation. 
  *    This is specially dangerous if sigma is large and there are values in x or x0 outside [-PI, PI).
  *    
  * Value: 
  * 
  * - loglik: final loglikelihood, defined as the sum of the loglikelihood of the initial dihedrals according 
  *           to the stationary density and the loglikelihood of the transitions from initial to final dihedrals.
  *           If positive definiteness is violated, the loglikelihood is computed from a close alpha ensuring 
  *           positive deifiniteness and a negative penalty is added. If the output value is Inf, -100 * N is 
  *           returned instead.
  * 
  * Author: Eduardo García-Portugués ([email protected]) 
  * 
  */
  
  /*
  * Create basic objects
  */
  
  // Number of pairs
  int N = x.n_rows;
  
  // Create log-likelihoods
  double loglikinitial = 0;
  double logliktpd = 0;
  double loglik = 0;
  
  // Create and initialize A
  double quo = sqrt(sigma(0) / sigma(1));
  arma::mat A(2, 2); 
  A(0, 0) = alpha(0); 
  A(1, 1) = alpha(1); 
  A(0, 1) = alpha(2) * quo;
  A(1, 0) = alpha(2) / quo;
  
  // Create and initialize Sigma
  arma::mat Sigma = diagmat(sigma);
  
  // Sequence of winding numbers
  const int lk = 2 * maxK + 1;
  arma::vec twokpi = arma::linspace<arma::vec>(-maxK * 2 * M_PI, maxK * 2 * M_PI, lk);
  
  // Bivariate vector (2 * K1 * PI, 2 * K2 * PI) for weighting
  arma::vec twokepivec(2);
  
  // Bivariate vector (2 * K1 * PI, 2 * K2 * PI) for wrapping
  arma::vec twokapivec(2);
  
  /*
  * Check for symmetry and positive definiteness of A^(-1) * Sigma
  */
  
  // Add a penalty to the loglikelihood in case any assumption is violated
  double penalty = 0;
  
  // Only positive definiteness can be violated with the parametrization of A
  double testalpha = alpha(0) * alpha(1) - alpha(2) * alpha(2);
  
  // Check positive definiteness 
  if(testalpha <= 0) {
    
    // Add a penalty
    penalty = -testalpha * 10000 + 10;
    
    // Update alpha(2) such that testalpha > 0
    alpha(2) = std::signbit(alpha(2)) * sqrt(alpha(0) * alpha(1)) * 0.9999;
    
    // Reset A to a matrix with positive determinant
    A(0, 1) = alpha(2) * quo;
    A(1, 0) = alpha(2) / quo;
    
  }
  
  // Inverse of 1/2 * A^(-1) * Sigma: 2 * Sigma^(-1) * A
  arma::mat invSigmaA = 2 * diagmat(1 / diagvec(Sigma)) * A;
  
  // Log-normalizing constant for the Gaussian with covariance SigmaA
  double lognormconstSigmaA = -log(2 * M_PI) + log(det(invSigmaA)) / 2;
  
  /*
  * Computation of Gammat and exp(-t * A) analytically
  */
  
  // A * Sigma
  arma::mat ASigma = A * Sigma;
  
  // A * Sigma * A^T
  arma::mat ASigmaA = ASigma * A.t();
  
  // Update with A * Sigma + Sigma * A^T
  ASigma += ASigma.t();
  
  // Quantities for computing exp(-t * A)
  double s = trace(A) / 2;
  double q = sqrt(fabs(det(A - s * arma::eye<arma::mat>(2, 2))));
  
  // Avoid indetermination in sinh(q * t) / q when q == 0
  if(q == 0){
    
    q = 1e-6;
    
  }
  
  // Repeated terms in the analytic integrals
  double q2 = q * q;
  double s2 = s * s;
  double est = exp(s * t);
  double e2st = est * est;
  double inve2st = 1 / e2st;
  double c2st = exp(2 * q * t);
  double s2st = (c2st - 1/c2st) / 2;
  c2st = (c2st + 1/c2st) / 2;
  
  // Integrals
  double cte = inve2st / (4 * q2 * s * (s2 - q2));
  double integral1 = cte * (- s2 * (3 * q2 + s2) * c2st - q * s * (q2 + 3 * s2) * s2st - q2 * (q2 - 5 * s2) * e2st + (q2 - s2) *  (q2 - s2));
  double integral2 = cte * s * ((q2 + s2) * c2st + 2 * q * s * s2st - 2 * q2 * e2st + q2 - s2);
  double integral3 = cte * (- s * (s * c2st + q * s2st) + (e2st - 1) * q2 + s2);
  
  // Gammat    
  arma::mat Gammat = integral1 * Sigma + integral2 * ASigma + integral3 * ASigmaA;
  
  // Matrix exp(-t*A)
  double eqt = exp(q * t);
  double cqt = (eqt + 1/eqt) / 2;
  double sqt = (eqt - 1/eqt) / 2;
  arma::mat ExptA = ((cqt + s * sqt / q) * arma::eye<arma::mat>(2, 2) - sqt / q * A) / est;
  
  // Inverse and log-normalizing constant for the Gammat
  arma::mat invGammat = inv_sympd(Gammat);
  double lognormconstGammat = -log(2 * M_PI) + log(det(invGammat)) / 2;
  
  /* 
  * Weights of the winding numbers for each data point
  */
  
  // We store the weights in a matrix to skip the null later in the computation of the tpd
  arma::mat weightswindsinitial(N, lk * lk);
  
  // Loop in the data
  for(int i = 0; i < N; i++){
    
    // Compute the factors in the exponent that do not depend on the windings
    arma::vec xmu = x.submat(i, 0, i, 1).t() - mu;
    arma::vec xmuinvSigmaA = invSigmaA * xmu;
    double xmuinvSigmaAxmudivtwo = dot(xmuinvSigmaA, xmu) / 2;
    
    // Loop in the winding weight K1
    for(int wek1 = 0; wek1 < lk; wek1++){
      
      // 2 * K1 * PI
      twokepivec(0) = twokpi(wek1); 
      
      // Compute once the index
      int wekl1 = wek1 * lk;
      
      // Loop in the winding weight K2  
      for(int wek2 = 0; wek2 < lk; wek2++){
        
        // 2 * K2 * PI
        twokepivec(1) = twokpi(wek2);
        
        // Decomposition of the exponent
        double exponent = xmuinvSigmaAxmudivtwo + dot(xmuinvSigmaA, twokepivec) + dot(invSigmaA * twokepivec, twokepivec) / 2 - lognormconstSigmaA;
        
        // Truncate the negative exponential
        if(exponent > etrunc){
          
          weightswindsinitial(i, wek1 * lk + wek2) = 0;
          
        }else{
          
          weightswindsinitial(i, wekl1 + wek2) = exp(-exponent);
          
        }
        
      }
      
    }
    
  }
  
  // The unstandardized weights of the tpd give the required wrappings for the initial loglikelihood
  loglikinitial = accu(log(sum(weightswindsinitial, 1)));
  
  // Standardize weights for the tpd
  weightswindsinitial.each_col() /= sum(weightswindsinitial, 1);
  
  /*
  * Computation of the tpd: wrapping + weighting
  */
  
  // The evaluations of the tpd are stored in a vector, no need to keep track of wrappings
  arma::vec tpdfinal(N); tpdfinal.zeros();
  
  // Loop in the data
  for(int i = 0; i < N; i++){
    
    // Initial point x0 varying with i
    arma::vec x0 = x.submat(i, 0, i, 1).t();
    
    // Loop on the winding weight K1
    for(int wek1 = 0; wek1 < lk; wek1++){
      
      // 2 * K1 * PI
      twokepivec(0) = twokpi(wek1); 
      
      // Compute once the index
      int wekl1 = wek1 * lk;
      
      // Loop on the winding weight K2  
      for(int wek2 = 0; wek2 < lk; wek2++){
        
        // Skip zero weights
        if(weightswindsinitial(i, wekl1 + wek2) > 0){
          
          // 2 * K1 * PI
          twokepivec(1) = twokpi(wek2); 
          
          // mut
          arma::vec mut = mu + ExptA * (x0 + twokepivec - mu);
          
          // Compute the factors in the exponent that do not depend on the windings
          arma::vec xmut = x.submat(i, 2, i, 3).t() - mut;
          arma::vec xmutinvGammat = invGammat * xmut;
          double xmutinvGammatxmutdiv2 = dot(xmutinvGammat, xmut) / 2;
          
          // Loop in the winding wrapping K1
          for(int wak1 = 0; wak1 < lk; wak1++){
            
            // 2 * K1 * PI
            twokapivec(0) = twokpi(wak1); 
            
            // Loop in the winding wrapping K2
            for(int wak2 = 0; wak2 < lk; wak2++){
              
              // 2 * K2 * PI
              twokapivec(1) = twokpi(wak2);
              
              // Decomposition of the exponent
              double exponent = xmutinvGammatxmutdiv2 + dot(xmutinvGammat, twokapivec) + dot(invGammat * twokapivec, twokapivec) / 2 - lognormconstGammat;
              
              // Truncate the negative exponential
              if(exponent < etrunc){
                
                tpdfinal(i) += exp(-exponent) * weightswindsinitial(i, wekl1 + wek2);
                
              }
              
            }
            
          }
          
        }
        
      }
      
    }
    
  }
  
  // Logarithm of tpd
  tpdfinal = log(tpdfinal);
  
  // Set log(0) to -trunc, as this is the truncation of the negative exponentials
  tpdfinal.elem(find_nonfinite(tpdfinal)).fill(-etrunc);
  
  // Log-likelihood from tpd
  logliktpd = sum(tpdfinal);
  
  // Final loglikelihood
  loglik = loglikinitial + logliktpd;
  
  // Check if it is finite
  if(!std::isfinite(loglik)) loglik = -100 * N;
  
  return loglik - penalty;
  
}