Exemplo n.º 1
0
    template < class xpr, class Out> void
    padeapproximantofdegree(const xpr & a, const size_t & m, Out &f)
    {
      //  padeapproximantofdegree  pade approximant to exponential.
      //     f = padeapproximantofdegree(m) is the degree m diagonal
      //     pade approximant to exp(a), where m = 3, 5, 7, 9 or 13.
      //     series are evaluated in decreasing order of powers, which is
      //     in approx. increasing order of maximum norms of the terms.

      typedef typename xpr::value_type                      value_type;
      typedef typename meta::as_real<value_type>::type          base_t;
      typedef expm_helper<base_t>                                  h_t;
      typedef table <value_type>                                 tab_t;

      size_t n = nt2::length(a);
      tab_t  c = h_t::getpadecoefficients(m);
      std::vector<tab_t> apowers(m/2+1);
      // evaluate pade approximant.
      tab_t  u = nt2::zeros(n, n, meta::as_<value_type>());
      tab_t  v = u, u1;
      switch (m)
      {
      case 3:
      case 5:
      case 7:
      case 9:
        apowers[0] = nt2::eye(n, meta::as_<value_type>());
        apowers[1] = nt2::mtimes(a, a);
        for (size_t j = 2; j < m/2+1; j++)
        {
          apowers[j] = nt2::mtimes(apowers[j-1], apowers[1]);
        }

        for(ptrdiff_t j=m+1; j >= 2 ; j-= 2)
        {
          //              u = u+ c(j)*apowers[j/2-1];
          u = nt2::fma(c(j),apowers[j/2-1], u);
        }
        u = mtimes(a, u);
        for(ptrdiff_t j=m; j >= 1 ; j-= 2)
        {
          //              v = v + c(j)*apowers[(j+1)/2-1];
          v = nt2::fma(c(j),apowers[(j+1)/2-1], v);
        }
        break;
      case 13:
        // for optimal evaluation need different formula for m >= 12.
        tab_t a2 = nt2::mtimes(a, a);
        tab_t a4 = nt2::mtimes(a2, a2);
        tab_t a6 = nt2::mtimes(a2, a4);
        u = mtimes(a, (mtimes(a6,(c(14)*a6 + c(12)*a4 + c(10)*a2))+
                       c(8)*a6 + c(6)*a4 + nt2::fma(c(4), a2, c(2))));
        v = mtimes(a6,c(13)*a6 + c(11)*a4 + c(9)*a2)
          + c(7)*a6 + c(5)*a4 + nt2::fma(c(3), a2, c(1));
      }
      f = nt2::linsolve((-u+v), (u+v));
    }
Exemplo n.º 2
0
int main(void) {

	/*Matrix& A0 = *new DenseMatrix(new double[4] {1, 2, 3, 4}, 4, 1);
	disp(A0);
	Matrix& A1 = reshape(A0, new int[2] {2, 2});
	disp(A1);*/

	int m = 8;
	int r = m / 4;

	Matrix& L = randn(m, r);
	Matrix& R = randn(m, r);

	Matrix& A_star = mtimes(L, R.transpose());
	Matrix& E_star0 = zeros(size(A_star));
	int* indices = randperm(m * m);
	int nz = m * m / 20;
	int* nz_indices = new int[nz];
	for (int i = 0; i < nz; i++) {
		nz_indices[i] = indices[i] - 1;
	}
	Matrix& E_vec = vec(E_star0);
	Matrix& Temp = (minus(rand(nz, 1), 0.5).times(100));
	// disp(Temp);
	setSubMatrix(E_vec, nz_indices, nz, new int[1] {0}, 1, Temp);
	// disp(E_vec);
	Matrix& E_star = reshape(E_vec, size(E_star0));
	// disp(E_star);

	// Input
	Matrix& D = A_star.plus(E_star);
	double lambda = 1 * pow(m, -0.5);
	RobustPCA& robustPCA = *new RobustPCA(lambda);
	robustPCA.feedData(D);
	tic();
	robustPCA.run();
	fprintf("Elapsed time: %.2f seconds.\n", toc());

	// Output
	Matrix& A_hat = robustPCA.GetLowRankEstimation();
	Matrix& E_hat = robustPCA.GetErrorMatrix();

	fprintf("A*:\n");
	disp(A_star, 4);
	fprintf("A^:\n");
	disp(A_hat, 4);
	fprintf("E*:\n");
	disp(E_star, 4);
	fprintf("E^:\n");
	disp(E_hat, 4);
	fprintf("rank(A*): %d\n", rank(A_star));
	fprintf("rank(A^): %d\n", rank(A_hat));
	fprintf("||A* - A^||_F: %.4f\n", norm(A_star.minus(A_hat), "fro"));
	fprintf("||E* - E^||_F: %.4f\n", norm(E_star.minus(E_hat), "fro"));

	return EXIT_SUCCESS;

}
Exemplo n.º 3
0
  void QpToNlp::init(const Dict& opts) {
    // Initialize the base classes
    Qpsol::init(opts);

    // Default options
    string nlpsol_plugin;
    Dict nlpsol_options;

    // Read user options
    for (auto&& op : opts) {
      if (op.first=="nlpsol") {
        nlpsol_plugin = op.second.to_string();
      } else if (op.first=="nlpsol_options") {
        nlpsol_options = op.second;
      }
    }

    // Create a symbolic matrix for the decision variables
    SX X = SX::sym("X", n_, 1);

    // Parameters to the problem
    SX H = SX::sym("H", sparsity_in(QPSOL_H));
    SX G = SX::sym("G", sparsity_in(QPSOL_G));
    SX A = SX::sym("A", sparsity_in(QPSOL_A));

    // Put parameters in a vector
    std::vector<SX> par;
    par.push_back(H.nonzeros());
    par.push_back(G.nonzeros());
    par.push_back(A.nonzeros());

    // The nlp looks exactly like a mathematical description of the NLP
    SXDict nlp = {{"x", X}, {"p", vertcat(par)},
                  {"f", mtimes(G.T(), X) + 0.5*mtimes(mtimes(X.T(), H), X)},
                  {"g", mtimes(A, X)}};

    // Create an Nlpsol instance
    casadi_assert_message(!nlpsol_plugin.empty(), "'nlpsol' option has not been set");
    solver_ = nlpsol("nlpsol", nlpsol_plugin, nlp, nlpsol_options);
    alloc(solver_);

    // Allocate storage for NLP solver  parameters
    alloc_w(solver_.nnz_in(NLPSOL_P), true);
  }
Exemplo n.º 4
0
NT2_TEST_CASE_TPL(tr_result, NT2_REAL_TYPES)
{
    typedef nt2::table<T> t_t;
    t_t a =     nt2::tril(  nt2::ones (4, 4, nt2::meta::as_<T>())
                            + T(10)*nt2::eye  (4, 4, nt2::meta::as_<T>()));
    t_t b = nt2::ones(4, 1, nt2::meta::as_<T>());
    nt2::display("a     ", a);
    nt2::display("b     ", b);
    nt2::details::tr_solve_result<t_t> f(a, b, 'L', 'N', 'N');

    nt2::display("x", f.x());
    NT2_DISPLAY(a);
    NT2_TEST(nt2::isulpequal(b, mtimes(a, f.x()), T(2.0)));
}
Exemplo n.º 5
0
/**
 * Generate random samples chosen from the multivariate Gaussian
 * distribution with mean MU and covariance SIGMA.
 *
 * X ~ N(u, Lambda) => Y = B * X + v ~ N(B * u + v, B * Lambda * B')
 * Therefore, if X ~ N(0, Lambda),
 * then Y = B * X + MU ~ N(MU, B * Lambda * B').
 * We only need to do the eigen decomposition: SIGMA = B * Lambda * B'.
 *
 * @param MU 1 x d mean vector
 *
 * @param SIGMA covariance matrix
 *
 * @param cases number of d dimensional random samples
 *
 * @return cases-by-d sample matrix subject to the multivariate
 *         Gaussian distribution N(MU, SIGMA)
 *
 */
Matrix& mvnrnd(Matrix& MU, Matrix& SIGMA, int cases) {

	int d = MU.getColumnDimension();

	if (MU.getRowDimension() != 1) {
		errf("MU is expected to be 1 x %d matrix!\n", d);
	}

	if (norm(SIGMA.transpose().minus(SIGMA)) > 1e-10)
		errf("SIGMA should be a %d x %d real symmetric matrix!\n", d);

	Matrix** eigenDecompostion = eigs(SIGMA, d, "lm");

	Matrix& B = *eigenDecompostion[0];
	Matrix& Lambda = *eigenDecompostion[1];

	/*disp(B);
		disp(Lambda);*/

	Matrix& X = *new DenseMatrix(d, cases);
	std::default_random_engine generator(time(NULL));
	std::normal_distribution<double> normal(0.0, 1.0);
	double sigma = 0;
	for (int i = 0; i < d; i++) {
		sigma = Lambda.getEntry(i, i);
		if (sigma == 0) {
			X.setRowMatrix(i, zeros(1, cases));
			continue;
		}
		if (sigma < 0) {
			errf("Covariance matrix should be positive semi-definite!\n");
			exit(1);
		}
		for (int n = 0; n < cases; n++) {
			X.setEntry(i, n, normal(generator) * pow(sigma, 0.5));
		}
	}

	Matrix& Y = plus(mtimes(B, X), repmat(MU.transpose(), 1, cases)).transpose();

	return Y;

}
Exemplo n.º 6
0
 static void chsp(A0& c, size_t n,  size_t k)
 {
   typedef container::table<V> tab_t;
   // k = 1 case obtained from k = 0 case with one bigger n.
   --n;
   BOOST_AUTO_TPL(x, nt2::colvect(nt2::cospi(nt2::linspace(Zero<V>(), nt2::One<V>(), n+1))));
   tab_t d = nt2::ones(n+1,1,T());
   d(1) = Two<V>();
   d(n+1) = Two<V>();
   BOOST_AUTO_TPL(c1, mtimes(d, nt2::rowvect(nt2::rec(d))));
   BOOST_AUTO_TPL(c2, nt2::sx(nt2::tag::minus_(), x, nt2::rowvect(x)));
   c = c1/c2;
   //  Now fix diagonal and signs.
   c(1) = (nt2::Two<V>()*nt2::sqr(n)+nt2::One<V>())/nt2::Six<V>();
   for (size_t i=2; i <= n+1;  ++i)
   {
     if(nt2::is_even(i))
     {
       c(_, i) = -c(_, i);
       c(i, _) = -c(i, _);
     }
     if(i <= n)
     {
       c(i,i) = -x(i)/(Two<V>()*(nt2::oneminus(nt2::sqr(x(i)))));
     }
     else
     {
       c(i, i) = -c(1, 1);
     }
   }
   if (k == 1)
   {
     tab_t c1_ = c(nt2::_(2, n+1), nt2::_(2, n+1));
     c = c1_; //ALIASING
   }
 }
Exemplo n.º 7
0
Arquivo: svd.hpp Projeto: KWMalik/nt2
 tab_t pinv(base_t epsi = -1 )const
 {
   epsi = epsi < 0 ? nt2::eps(w_(1)) : epsi;
   tab_t w1 = nt2::if_else( gt(w_, length(a_)*epsi), nt2::rec(w_), Zero<base_t>());
   return mtimes(trans(vt_), mtimes(from_diag(w1), trans(u_)));
 }
Exemplo n.º 8
0
Arquivo: svd.hpp Projeto: KWMalik/nt2
template<class XPR> result_type solve(const XPR & b,
                                      base_t epsi = Mone<base_t>() )const{
  epsi =  epsi < 0 ? nt2::eps(w_(1)) : epsi;
  tab_t w1 = nt2::if_else( gt(w_, length(a_)*epsi), nt2::rec(w_), Zero<base_t>());
  return mtimes(trans(vt_), mtimes(from_diag(w1), mtimes(trans(u_), b)));
  }
Exemplo n.º 9
0
 void run() const
 {
   for(int i = 0; i < 100; ++i)
     a3 = mtimes(trans(a1), a2);
 }
Exemplo n.º 10
0
 void operator()()
 {
   for(int i = 0; i < 100; ++i)
     a3 = mtimes(a1, a2);
 }
Exemplo n.º 11
0
  Function qpsol_nlp(const std::string& name, const std::string& solver,
                     const std::map<std::string, M>& qp, const Dict& opts) {
    // We have: minimize    f(x) = 1/2 * x' H x + c'x
    //          subject to  lbx <= x <= ubx
    //                      lbg <= g(x) = A x + b <= ubg
    M x, p, f, g;
    for (auto&& i : qp) {
      if (i.first=="x") {
        x = i.second;
      } else if (i.first=="p") {
        p = i.second;
      } else if (i.first=="f") {
        f = i.second;
      } else if (i.first=="g") {
        g = i.second;
      } else {
        casadi_error("No such field: " + i.first);
      }
    }
    if (g.is_empty(true)) g = M(0, 1); // workaround

    // Gradient of the objective: gf == Hx + g
    M gf = M::gradient(f, x);

    // Identify the linear term in the objective
    M c = substitute(gf, x, M::zeros(x.sparsity()));

    // Identify the quadratic term in the objective
    M H = M::jacobian(gf, x, true);

    // Identify the constant term in the constraints
    M b = substitute(g, x, M::zeros(x.sparsity()));

    // Identify the linear term in the constraints
    M A = M::jacobian(g, x);

    // Create a function for calculating the required matrices vectors
    Function prob(name + "_qp", {x, p}, {H, c, A, b});

    // Create the QP solver
    Function conic_f = conic(name + "_qpsol", solver,
                             {{"h", H.sparsity()}, {"a", A.sparsity()}}, opts);

    // Create an MXFunction with the right signature
    vector<MX> ret_in(NLPSOL_NUM_IN);
    ret_in[NLPSOL_X0] = MX::sym("x0", x.sparsity());
    ret_in[NLPSOL_P] = MX::sym("p", p.sparsity());
    ret_in[NLPSOL_LBX] = MX::sym("lbx", x.sparsity());
    ret_in[NLPSOL_UBX] = MX::sym("ubx", x.sparsity());
    ret_in[NLPSOL_LBG] = MX::sym("lbg", g.sparsity());
    ret_in[NLPSOL_UBG] = MX::sym("ubg", g.sparsity());
    ret_in[NLPSOL_LAM_X0] = MX::sym("lam_x0", x.sparsity());
    ret_in[NLPSOL_LAM_G0] = MX::sym("lam_g0", g.sparsity());
    vector<MX> ret_out(NLPSOL_NUM_OUT);

    // Get expressions for the QP matrices and vectors
    vector<MX> v(NL_NUM_IN);
    v[NL_X] = ret_in[NLPSOL_X0];
    v[NL_P] = ret_in[NLPSOL_P];
    v = prob(v);

    // Call the QP solver
    vector<MX> w(CONIC_NUM_IN);
    w[CONIC_H] = v.at(0);
    w[CONIC_G] = v.at(1);
    w[CONIC_A] = v.at(2);
    w[CONIC_LBX] = ret_in[NLPSOL_LBX];
    w[CONIC_UBX] = ret_in[NLPSOL_UBX];
    w[CONIC_LBA] = ret_in[NLPSOL_LBG] - v.at(3);
    w[CONIC_UBA] = ret_in[NLPSOL_UBG] - v.at(3);
    w[CONIC_X0] = ret_in[NLPSOL_X0];
    w[CONIC_LAM_X0] = ret_in[NLPSOL_LAM_X0];
    w[CONIC_LAM_A0] = ret_in[NLPSOL_LAM_G0];
    w = conic_f(w);

    // Get expressions for the solution
    ret_out[NLPSOL_X] = w[CONIC_X];
    ret_out[NLPSOL_F] = w[CONIC_COST];
    ret_out[NLPSOL_G] = mtimes(v.at(2), w[CONIC_X]) + v.at(3);
    ret_out[NLPSOL_LAM_X] = w[CONIC_LAM_X];
    ret_out[NLPSOL_LAM_G] = w[CONIC_LAM_A];
    ret_out[NLPSOL_LAM_P] = MX::nan(p.sparsity());
    return Function(name, ret_in, ret_out, nlpsol_in(), nlpsol_out(),
                    {{"default_in", nlpsol_default_in()}});
  }
Exemplo n.º 12
0
Matrix& LASSO::train(Matrix& X, Matrix& Y, Options& options) {

	int p = size(X, 2);
	int ny = size(Y, 2);
	double epsilon = options.epsilon;
	int maxIter = options.maxIter;
	double lambda = options.lambda;
	bool calc_OV = options.calc_OV;
	bool verbose = options.verbose;

	/*XNX = [X, -X];
		H_G = XNX' * XNX;
		D = repmat(diag(H_G), [1, n_y]);
		XNXTY = XNX' * Y;
	    A = (X' * X + lambda  * eye(p)) \ (X' * Y);*/

	Matrix& XNX = horzcat(2, &X, &uminus(X));
	Matrix& H_G = XNX.transpose().mtimes(XNX);
	double* Q = new double[size(H_G, 1)];
	for (int i = 0; i < size(H_G, 1); i++) {
		Q[i] = H_G.getEntry(i, i);
	}
	Matrix& XNXTY = XNX.transpose().mtimes(Y);
	Matrix& A = mldivide(
			plus(X.transpose().mtimes(X), times(lambda, eye(p))),
			X.transpose().mtimes(Y)
	);

	/*AA = [subplus(A); subplus(-A)];
		C = -XNXTY + lambda;
		Grad = C + H_G * AA;
		tol = epsilon * norm(Grad);
		PGrad = zeros(size(Grad));*/

	Matrix& AA = vertcat(2, &subplus(A), &subplus(uminus(A)));
	Matrix& C = plus(uminus(XNXTY), lambda);
	Matrix& Grad = plus(C, mtimes(H_G, AA));
	double tol = epsilon * norm(Grad);
	Matrix& PGrad = zeros(size(Grad));

	std::list<double> J;
	double fval = 0;
	// J(1) = sum(sum((Y - X * A).^2)) / 2 + lambda * sum(sum(abs(A)));
	if (calc_OV) {
		fval = sum(sum(pow(minus(Y, mtimes(X, A)), 2))) / 2 +
				lambda * sum(sum(abs(A)));
		J.push_back(fval);
	}

	Matrix& I_k = Grad.copy();
	double d = 0;
	int k = 0;

	DenseVector& SFPlusCi = *new DenseVector(AA.getColumnDimension());
	Matrix& S = H_G;
	Vector** SRows = null;
	if (typeid(H_G) == typeid(DenseMatrix))
		SRows = denseMatrix2DenseRowVectors(S);
	else
		SRows = sparseMatrix2SparseRowVectors(S);

	Vector** CRows = null;
	if (typeid(C) == typeid(DenseMatrix))
		CRows = denseMatrix2DenseRowVectors(C);
	else
		CRows = sparseMatrix2SparseRowVectors(C);

	double** FData = ((DenseMatrix&) AA).getData();
	double* FRow = null;
	double* pr = null;
	int K = 2 * p;

	while (true) {

		/*I_k = Grad < 0 | AA > 0;
		    I_k_com = not(I_k);
		    PGrad(I_k) = Grad(I_k);
		    PGrad(I_k_com) = 0;*/

		_or(I_k, lt(Grad, 0), gt(AA, 0));
		Matrix& I_k_com = _not(I_k);
		assign(PGrad, Grad);
		logicalIndexingAssignment(PGrad, I_k_com, 0);

		d = norm(PGrad, inf);
		if (d < tol) {
			if (verbose)
				println("Converge successfully!");
			break;
		}

		/*for i = 1:2*p
		            AA(i, :) = max(AA(i, :) - (C(i, :) + H_G(i, :) * AA) ./ (D(i, :)), 0);
		    end
		    A = AA(1:p,:) - AA(p+1:end,:);*/

		for (int i = 0; i < K; i++) {
			// SFPlusCi = SRows[i].operate(AA);
			operate(SFPlusCi, *SRows[i], AA);
			plusAssign(SFPlusCi, *CRows[i]);
			timesAssign(SFPlusCi, 1 / Q[i]);
			pr = SFPlusCi.getPr();
			// F(i, :) = max(F(i, :) - (S(i, :) * F + C(i, :)) / D[i]), 0);
			// F(i, :) = max(F(i, :) - SFPlusCi, 0)
			FRow = FData[i];
			for (int j = 0; j < AA.getColumnDimension(); j++) {
				FRow[j] = max(FRow[j] - pr[j], 0);
			}
		}

		// Grad = plus(C, mtimes(H_G, AA));
		plus(Grad, C, mtimes(H_G, AA));

		k = k + 1;
		if (k > maxIter) {
			if (verbose)
				println("Maximal iterations");
			break;
		}

		if (calc_OV) {
			fval = sum(sum(pow(minus(Y, mtimes(XNX, AA)), 2))) / 2 +
					lambda * sum(sum(abs(AA)));
			J.push_back(fval);
		}

		if (k % 10 == 0 && verbose) {
			if (calc_OV)
				fprintf("Iter %d - ||PGrad||: %f, ofv: %f\n", k, d, J.back());
			else
				fprintf("Iter %d - ||PGrad||: %f\n", k, d);

		}

	}

	Matrix& res = minus(
			AA.getSubMatrix(0, p - 1, 0, ny - 1),
			AA.getSubMatrix(p, 2 * p - 1, 0, ny - 1)
	);

	return res;

}