VectorXd HarmonicGradient(const vector<spring> &springlist,
                  const VectorXd &XY,
                  const double g11,
                  const double g12,
                  const double g22)
{
  VectorXd gradE(XY.size());
  for(int i=0;i<gradE.size();i++){
    gradE(i)=0;
  }
  VectorXd X(XY.size()/2);
  VectorXd Y(XY.size()/2);
  X=XY.head(XY.size()/2);
  Y=XY.tail(XY.size()/2);

  for(int i=0;i<springlist.size();i++){
    int one=springlist[i].one;
    int two=springlist[i].two;
    int num=XY.size()/2;
    double dx=X(one)-(X(two)+springlist[i].wlr);
    double dy=Y(one)-(Y(two)+springlist[i].wud);
    double k=springlist[i].k;
    double L=springlist[i].rlen;
    double dist=sqrt( g11*dx*dx+ 2*g12*dx*dy+ g22*dy*dy );
   
    double gradx= k*(dist-L)*(g11*dx+g12*dy)/dist;
    double grady= k*(dist-L)*(g22*dy+g12*dx)/dist;

    gradE(one) += gradx;
    gradE(two) -= gradx;
    gradE(one+num) += grady;
    gradE(two+num) -= grady;
  }
return gradE;
}
Beispiel #2
0
 double Model::score(const VectorXd& psi) const
 {
   ROS_ASSERT(psi.rows() == eweights_.rows() + nweights_.rows());
   double val = psi.head(eweights_.rows()).dot(eweights_);
   val += psi.tail(nweights_.rows()).dot(nweights_);
   return val;
 }
Beispiel #3
0
void Optimizer::augment_sparse_linear_system(SparseSystem& W,
    const Properties& prop) {
  if (prop.method == DOG_LEG) {
    // We're using the incremental version of Powell's Dog-Leg, so we need
    // to form the updated gradient.
    const VectorXd& f_new = W.rhs();

    // Augment the running count for the sum-of-squared errors at the current
    // linearization point.
    current_SSE_at_linpoint += f_new.squaredNorm();

    // Allocate the new gradient vector
    VectorXd g_new(W.num_cols());

    // Compute W^T \cdot f_new
    VectorXd increment = mul_SparseMatrixTrans_Vector(W, f_new);

    // Set g_new = (g_old 0)^T + W^T f_new.
    g_new.head(gradient.size()) = gradient + increment.head(gradient.size());
    g_new.tail(W.num_cols() - gradient.size()) = increment.tail(
        W.num_cols() - gradient.size());

    // Cache the new gradient vector
    gradient = g_new;
  }

  // Apply Givens to QR factorize the newly augmented sparse system.
  for (int i = 0; i < W.num_rows(); i++) {
    SparseVector new_row = W.get_row(i);
    function_system._R.add_row_givens(new_row, W.rhs()(i));
  }
}
bool inBounds(VectorXd state) {
	Vector2d pos, vel;
	pos = state.head(2);
	vel = state.tail(2);
	if (inside_rectangular_obs(pos, setup_values.x_min, setup_values.x_max,
			setup_values.x_min, setup_values.x_max) &&
		inside_rectangular_obs(vel, setup_values.v_min, setup_values.v_max,
			setup_values.v_min, setup_values.v_max)) {
		return true;
	}
	return false;
}
Beispiel #5
0
void linear::poisson(const VectorXd& lapl,  VectorXd& f) {

  if(stiffp1.size()==0)  fill_stiff();
  if(mass.size()==0)     fill_mass();

  VectorXd massl = mass * lapl ;

  int N=massl.size();

  VectorXd massll = massl.tail( N-1 );

  VectorXd ff= solver_stiffp1.solve( massll );

  f(0) = 0;
  f.tail(N-1) = ff;

 // zero mean

  f = f.array() - f.mean();

  return;

}
// [[Rcpp::export()]]
VectorXd sample_delta_c_Eigen(
    VectorXd delta,
    VectorXd tauh,
    Map<MatrixXd> Lambda_prec,
    double delta_1_shape,
    double delta_1_rate,
    double delta_2_shape,
    double delta_2_rate,
    Map<MatrixXd> randg_draws,  // all done with rate = 1;
    Map<MatrixXd> Lambda2
) {
  int times = randg_draws.rows();
  int k = tauh.size();

  MatrixXd scores_mat = Lambda_prec.cwiseProduct(Lambda2);
  VectorXd scores = scores_mat.colwise().sum();

  double rate,delta_old;
  for(int i = 0; i < times; i++){
    delta_old = delta(0);
    rate = delta_1_rate + 0.5 * (1/delta(0)) * tauh.dot(scores);
    delta(0) = randg_draws(i,0) / rate;
    // tauh = cumprod(delta);
    tauh *= delta(0)/delta_old;   // replaces re-calculating cumprod

    for(int h = 1; h < k-1; h++) {
      delta_old = delta(h);
      rate = delta_2_rate + 0.5*(1/delta(h))*tauh.tail(k-h).dot(scores.tail(k-h));
      delta(h) = randg_draws(i,h) / rate;
      // tauh = cumprod(delta);
      tauh.tail(k-h) *= delta(h)/delta_old; // replaces re-calculating cumprod
      // Rcout << (tauh - cumprod(delta)).sum() << std::endl;
    }
  }
  return(delta);
}
/* Implementation of byfree */
void byfree(VectorXd target_r, VectorXd target_i, VectorXd zetaf_r, VectorXd zetaf_i, VectorXd gammaf, VectorXd& pot_r, VectorXd& pot_i){

    /*
    timeval time1, time2;
    double ttl1, ttl2, diffl12;
    gettimeofday(&time1, NULL); ttl1 = (time1.tv_sec * 1000000 + time1.tv_usec);
    */

    int nt = target_r.size(), ns = zetaf_i.size();

    MatrixXd dx(nt,ns), dy(nt,ns), dz(nt,ns), F_r(nt,ns-1), F_i(nt,ns-1);
    VectorXd distr, distr_r(ns-1), distr_i(ns-1);

    distr = -(gammaf.tail(ns-1) - gammaf.head(ns-1));
        for (int k=0; k<ns-1; k++) {
            double dxnorm = (zetaf_r(k+1) - zetaf_r(k))*(zetaf_r(k+1) - zetaf_r(k)) + (zetaf_i(k+1) - zetaf_i(k))*(zetaf_i(k+1) - zetaf_i(k));
            distr_r(k) = (zetaf_r(k+1) - zetaf_r(k))*distr(k)/dxnorm;
            distr_i(k) = -(zetaf_i(k+1) - zetaf_i(k))*distr(k)/dxnorm;

        }


    for (int j=0; j<nt; j++) {
        for (int k=0; k<ns; k++) {
            dx(j,k)=(target_r(j)-zetaf_r(k));
            dy(j,k)=(target_i(j)-zetaf_i(k));
            dz(j,k)=dx(j,k)*dx(j,k)+dy(j,k)*dy(j,k);
            }
        }

    for (int j=0; j<nt; j++) {
        for (int k=0; k<ns-1; k++) {
            F_r(j,k) = log(dz(j,k)/dz(j,k+1))/2;
            F_i(j,k) = atan2((-dx(j,k)*dy(j,k+1)+dy(j,k)*dx(j,k+1)),(dx(j,k)*dx(j,k+1)+dy(j,k)*dy(j,k+1)));
        }
    }
    
    pot_r = F_r*distr_r - F_i*distr_i;
    pot_i = F_i*distr_r + F_r*distr_i;;

    /*
    gettimeofday(&time2, NULL); ttl2 = (time2.tv_sec * 1000000 + time2.tv_usec);
    diffl12 = (double)(ttl2 - ttl1)/1000000;
    printf("Elapsed %f seconds!\n", diffl12);
    */

    return ;
}
Beispiel #8
0
        void addConstraint(const measurement& _meas)
        {
            t_managing_ = clock();

            assert(_meas.jacobians.size() == _meas.nodes_idx.size());
            assert(_meas.error.size() == _meas.dim);

            n_measurements++;
            measurements_.push_back(_meas);
            measurements_.back().location = A_.rows();

            // Resize problem
            A_.conservativeResize(A_.rows() + _meas.dim, A_.cols());
            b_.conservativeResize(b_.size() + _meas.dim);
            A_nodes_.conservativeResize(n_measurements,n_nodes_);

            // ADD MEASUREMENTS
            first_ordered_node_ = n_nodes_;
            for (unsigned int j = 0; j < _meas.nodes_idx.size(); j++)
            {
                assert(acc_node_permutation_.indices()(_meas.nodes_idx.at(j)) == nodes_.at(_meas.nodes_idx.at(j)).order);

                int ordered_node = nodes_.at(_meas.nodes_idx.at(j)).order;//acc_permutation_nodes_.indices()(_nodes_idx.at(j));

                addSparseBlock(_meas.jacobians.at(j), A_, A_.rows()-_meas.dim, nodes_.at(_meas.nodes_idx.at(j)).location);

                A_nodes_.coeffRef(A_nodes_.rows()-1, ordered_node) = 1;


                assert(_meas.jacobians.at(j).cols() == nodes_.at(_meas.nodes_idx.at(j)).dim);
                assert(_meas.jacobians.at(j).rows() == _meas.dim);

                // store minimum ordered node
                if (first_ordered_node_ > ordered_node)
                    first_ordered_node_ = ordered_node;
            }

            // error
            b_.tail(_meas.dim) = _meas.error;

            time_managing_ += ((double) clock() - t_managing_) / CLOCKS_PER_SEC;
        }
void MotionModel::predict_state_and_covariance(VectorXd x_k_k, MatrixXd p_k_k, string type, double std_a,
	double std_alpha, VectorXd *X_km1_k, MatrixXd *P_km1_k)
{
	size_t x_k_size = x_k_k.size(), p_k_size = p_k_k.rows();
	double delta_t = 1, linear_acceleration_noise_covariance, angular_acceleration_noise_covariance;
	VectorXd Xv_km1_k(18), Pn_diag(6);
	MatrixXd F = MatrixXd::Identity(18,18), // The camera state size is assumed to be 18
		Pn, Q, G;

	// Camera motion prediction
	fv(x_k_k.head(18), delta_t, type, std_a, std_alpha, &Xv_km1_k);

	// Feature prediction
	(*X_km1_k).resize(x_k_size);
	// Add the feature points to the state vector after cam motion prediction
	(*X_km1_k) << Xv_km1_k, x_k_k.tail(x_k_size - 18);

	// State transition equation derivatives
	dfv_by_dxv(x_k_k.head(18), delta_t, type, &F);

	// State noise
	linear_acceleration_noise_covariance = (std_a * delta_t) * (std_a * delta_t);
	angular_acceleration_noise_covariance = (std_alpha * delta_t) * (std_alpha * delta_t);
	Pn_diag << linear_acceleration_noise_covariance, linear_acceleration_noise_covariance, linear_acceleration_noise_covariance, 
		angular_acceleration_noise_covariance, angular_acceleration_noise_covariance, angular_acceleration_noise_covariance;
	Pn = Pn_diag.asDiagonal();
	func_Q(x_k_k.head(18), Pn, delta_t, type, &G, &Q);

	// P_km1_k resize and update 
	// With the property of symmetry for the covariance matrix, only the Pxx and Pxy 
	// which means the camera state covariance and camera feature points covariance can be calculated
	(*P_km1_k).resize(p_k_size,p_k_size);
	(*P_km1_k).block(0,0,18,18) = F * p_k_k.block(0,0,18,18) * F.transpose() + Q;
	(*P_km1_k).block(0,18,18,p_k_size - 18) = F * p_k_k.block(0,18,18,p_k_size - 18);
	(*P_km1_k).block(18,0,p_k_size - 18,18) = p_k_k.block(18,0,p_k_size - 18,18) * F.transpose();
	(*P_km1_k).block(18,18,p_k_size - 18,p_k_size - 18) = p_k_k.block(18,18,p_k_size - 18,p_k_size - 18);

}
Beispiel #10
0
void MyWindow::setState(const VectorXd& newState) {
    mDofVels = newState.tail(mDofVels.size());
    mDofs = newState.head(mDofs.size());
}
Beispiel #11
0
        bool solve(const int mode)
        {
            bool batch = (mode !=2 || first_ordered_node_ == 0);
            bool order = (mode !=0 && n_nodes_ > 1);

            // BATCH
            if (batch)
            {
                // REORDER
                if (order)
                    ordering(0);

                //print_problem();

                // SOLVE
                t_solving_ = clock();
                A_.makeCompressed();
                solver_.compute(A_);
                if (solver_.info() != Success)
                {
                    std::cout << "decomposition failed" << std::endl;
                    return 0;
                }
                x_incr_ = solver_.solve(b_);
                R_ = solver_.matrixR();
                //std::cout << "R" << std::endl << MatrixXd::Identity(R_.cols(), R_.cols()) * R_ << std::endl;
                time_solving_ += ((double) clock() - t_solving_) / CLOCKS_PER_SEC;
            }
            // INCREMENTAL
            else
            {
                // REORDER SUBPROBLEM
                ordering(first_ordered_node_);
                //print_problem();

                // SOLVE ORDERED SUBPROBLEM
                t_solving_= clock();
                A_nodes_.makeCompressed();
                A_.makeCompressed();

                // finding measurements block
                SparseMatrix<int> measurements_to_initial = A_nodes_.col(first_ordered_node_);
        //        std::cout << "measurements_to_initial " << measurements_to_initial << std::endl;
        //        std::cout << "measurements_to_initial.innerIndexPtr()[measurements_to_initial.outerIndexPtr()[0]] " << measurements_to_initial.innerIndexPtr()[measurements_to_initial.outerIndexPtr()[0]] << std::endl;
                int first_ordered_measurement = measurements_to_initial.innerIndexPtr()[measurements_to_initial.outerIndexPtr()[0]];
                int ordered_measurements = A_.rows() - measurements_.at(first_ordered_measurement).location;
                int ordered_variables = A_.cols() - nodes_.at(first_ordered_node_).location;
                int unordered_variables = nodes_.at(first_ordered_node_).location;

                SparseMatrix<double, ColMajor> A_partial = A_.bottomRightCorner(ordered_measurements, ordered_variables);
                solver_.compute(A_partial);
                if (solver_.info() != Success)
                {
                    std::cout << "decomposition failed" << std::endl;
                    return 0;
                }
                //std::cout << "R new" << std::endl << MatrixXd::Identity(A_partial.cols(), A_partial.cols()) * solver_.matrixR() << std::endl;
                x_incr_.tail(ordered_variables) = solver_.solve(b_.tail(ordered_measurements));

                // store new part of R
                eraseSparseBlock(R_, unordered_variables, unordered_variables, ordered_variables, ordered_variables);
                //std::cout << "R" << std::endl << MatrixXd::Identity(R_.rows(), R_.rows()) * R_ << std::endl;
                addSparseBlock(solver_.matrixR(), R_, unordered_variables, unordered_variables);
                //std::cout << "R" << std::endl << MatrixXd::Identity(R_.rows(), R_.rows()) * R_ << std::endl;
                R_.makeCompressed();

                // solving not ordered subproblem
                if (unordered_variables > 0)
                {
                    //std::cout << "--------------------- solving unordered part" << std::endl;
                    SparseMatrix<double, ColMajor> R1 = R_.topLeftCorner(unordered_variables, unordered_variables);
                    //std::cout << "R1" << std::endl << MatrixXd::Identity(R1.rows(), R1.rows()) * R1 << std::endl;
                    SparseMatrix<double, ColMajor> R2 = R_.topRightCorner(unordered_variables, ordered_variables);
                    //std::cout << "R2" << std::endl << MatrixXd::Identity(R2.rows(), R2.rows()) * R2 << std::endl;
                    solver_.compute(R1);
                    if (solver_.info() != Success)
                    {
                        std::cout << "decomposition failed" << std::endl;
                        return 0;
                    }
                    x_incr_.head(unordered_variables) = solver_.solve(b_.head(unordered_variables) - R2 * x_incr_.tail(ordered_variables));
                }

            }
            // UNDO ORDERING FOR RESULT
            PermutationMatrix<Dynamic, Dynamic, int> acc_permutation(A_.cols());
            nodePermutation2VariablesPermutation(acc_node_permutation_, acc_permutation); // TODO via pointers
            x_incr_ = acc_permutation.inverse() * x_incr_;

            time_solving_ += ((double) clock() - t_solving_) / CLOCKS_PER_SEC;

            return 1;
        }
Beispiel #12
0
MatrixXd SMRFilter::expandingTPS(MatrixXd cx, MatrixXd cy, MatrixXd cz)
{
    log()->get(LogLevel::Info) << "TPS: Reticulating splines...\n";

    MatrixXd S = cz;

    int num_nan_detect(0);
    int num_nan_replace(0);

    for (auto outer_col = 0; outer_col < m_numCols; ++outer_col)
    {
        for (auto outer_row = 0; outer_row < m_numRows; ++outer_row)
        {
            if (!std::isnan(S(outer_row, outer_col)))
                continue;

            num_nan_detect++;

            // Further optimizations are achieved by estimating only the
            // interpolated surface within a local neighbourhood (e.g. a 7 x 7
            // neighbourhood is used in our case) of the cell being filtered.
            int radius = 3;
            bool solution = false;

            while (!solution)
            {
                // std::cerr << radius;
                int cs = Utils::clamp(outer_col-radius, 0, m_numCols-1);
                int ce = Utils::clamp(outer_col+radius, 0, m_numCols-1);
                int col_size = ce - cs + 1;
                int rs = Utils::clamp(outer_row-radius, 0, m_numRows-1);
                int re = Utils::clamp(outer_row+radius, 0, m_numRows-1);
                int row_size = re - rs + 1;

                MatrixXd Xn = cx.block(rs, cs, row_size, col_size);
                MatrixXd Yn = cy.block(rs, cs, row_size, col_size);
                MatrixXd Hn = cz.block(rs, cs, row_size, col_size);

                int nsize = Hn.size();
                VectorXd T = VectorXd::Zero(nsize);
                MatrixXd P = MatrixXd::Zero(nsize, 3);
                MatrixXd K = MatrixXd::Zero(nsize, nsize);

                int numK(0);
                for (auto id = 0; id < Hn.size(); ++id)
                {
                    double xj = Xn(id);
                    double yj = Yn(id);
                    double zj = Hn(id);
                    if (std::isnan(zj))
                        continue;
                    numK++;
                    T(id) = zj;
                    P.row(id) << 1, xj, yj;
                    for (auto id2 = 0; id2 < Hn.size(); ++id2)
                    {
                        if (id == id2)
                            continue;
                        double xk = Xn(id2);
                        double yk = Yn(id2);
                        double rsqr = (xj - xk) * (xj - xk) + (yj - yk) * (yj - yk);
                        if (rsqr == 0.0)
                            continue;
                        K(id, id2) = rsqr * std::log10(std::sqrt(rsqr));
                    }
                }

                // if (numK < 20)
                //     continue;

                MatrixXd A = MatrixXd::Zero(nsize+3, nsize+3);
                A.block(0,0,nsize,nsize) = K;
                A.block(0,nsize,nsize,3) = P;
                A.block(nsize,0,3,nsize) = P.transpose();

                VectorXd b = VectorXd::Zero(nsize+3);
                b.head(nsize) = T;

                VectorXd x = A.fullPivHouseholderQr().solve(b);

                Vector3d a = x.tail(3);
                VectorXd w = x.head(nsize);

                double sum = 0.0;
                double xi2 = cx(outer_row, outer_col);
                double yi2 = cy(outer_row, outer_col);
                for (auto j = 0; j < nsize; ++j)
                {
                    double xj = Xn(j);
                    double yj = Yn(j);
                    double rsqr = (xj - xi2) * (xj - xi2) + (yj - yi2) * (yj - yi2);
                    if (rsqr == 0.0)
                        continue;
                    sum += w(j) * rsqr * std::log10(std::sqrt(rsqr));
                }

                double val = a(0) + a(1)*xi2 + a(2)*yi2 + sum;
                solution = !std::isnan(val);

                if (!solution)
                {
                    std::cerr << "..." << radius << std::endl;;
                    ++radius;
                    continue;
                }

                S(outer_row, outer_col) = val;
                num_nan_replace++;

                // std::cerr << std::endl;

                // std::cerr << std::fixed;
                // std::cerr << std::setprecision(3)
                //           << std::left
                //           << "S(" << outer_row << "," << outer_col << "): "
                //           << std::setw(10)
                //           << S(outer_row, outer_col)
                //           // << std::setw(3)
                //           // << "\tz: "
                //           // << std::setw(10)
                //           // << zi
                //           // << std::setw(7)
                //           // << "\tzdiff: "
                //           // << std::setw(5)
                //           // << zi - S(outer_row, outer_col)
                //           // << std::setw(7)
                //           // << "\txdiff: "
                //           // << std::setw(5)
                //           // << xi2 - xi
                //           // << std::setw(7)
                //           // << "\tydiff: "
                //           // << std::setw(5)
                //           // << yi2 - yi
                //           << std::setw(7)
                //           << "\t# pts: "
                //           << std::setw(3)
                //           << nsize
                //           << std::setw(5)
                //           << "\tsum: "
                //           << std::setw(10)
                //           << sum
                //           << std::setw(9)
                //           << "\tw.sum(): "
                //           << std::setw(5)
                //           << w.sum()
                //           << std::setw(6)
                //           << "\txsum: "
                //           << std::setw(5)
                //           << w.dot(P.col(1))
                //           << std::setw(6)
                //           << "\tysum: "
                //           << std::setw(5)
                //           << w.dot(P.col(2))
                //           << std::setw(3)
                //           << "\ta: "
                //           << std::setw(8)
                //           << a.transpose()
                //           << std::endl;
            }
        }
    }

    double frac = static_cast<double>(num_nan_replace);
    frac /= static_cast<double>(num_nan_detect);
    log()->get(LogLevel::Info) << "TPS: Filled " << num_nan_replace << " of "
                               << num_nan_detect << " holes ("
                               << frac * 100.0 << "%)\n";

    return S;
}
Beispiel #13
0
double drwnLPSolver::solve()
{
    DRWN_FCN_TIC;
    const int n = _A.cols();
    const int m = _A.rows();

    // find feasible starting point
    if (_x.rows() == 0) {
        _x = VectorXd::Zero(n);
        for (int i = 0; i < n; i++) {
            if (isUnbounded(i)) continue;

            if (_ub[i] == DRWN_DBL_MAX) {
                _x[i] = _lb[i] + 1.0;
            } else if (_lb[i] == -DRWN_DBL_MAX) {
                _x[i] = _ub[i] - 1.0;
            } else {
                _x[i] = 0.5 * (_lb[i] + _ub[i]);
            }
        }
    }

    // initialize kkt system variables
    MatrixXd F = MatrixXd::Zero(n + m, n + m);
    VectorXd g = VectorXd::Zero(n + m);

    F.block(n, 0, m, n) = _A;
    F.block(0, n, n, m) = _A.transpose();

    // initialize dual variables (if needed)
    VectorXd nu = VectorXd::Zero(m);

    // iterate on interior point method
    double t = t0;
    while (1) {
        // determine feasibility
        const bool bFeasible = ((_b - _A * _x).squaredNorm() < eps);
        if (!bFeasible) {
            DRWN_LOG_VERBOSE("...finding feasible point");
        }

        // centering step and update
        for (unsigned iter = 0; iter < maxiters; iter++) {

            //! \todo solve with blockwise elimination
            // construct KKT system, Fx = g
            // | H A^T | | dx | = |  - g   |
            // | A  0  | | w  |   | b - Ax |

            for (int i = 0; i < n; i++) {
                F(i, i) = 0.0;
                if (_ub[i] != DRWN_DBL_MAX) {
                    F(i, i) += 1.0 / ((_ub[i] - _x[i]) * (_ub[i] - _x[i]));
                }
                if (_lb[i] != -DRWN_DBL_MAX) {
                    F(i, i) += 1.0 / ((_x[i] - _lb[i]) * (_x[i] - _lb[i]));
                }
            }

            for (int i = 0; i < n; i++) {
                g[i] = - t * _c[i];
                if (_ub[i] != DRWN_DBL_MAX) {
                    g[i] += 1.0 / (_x[i] - _ub[i]);
                }
                if (_lb[i] != -DRWN_DBL_MAX) {
                    g[i] += 1.0 / (_x[i] - _lb[i]);
                }
            }
            g.tail(m) = _b - _A * _x;

            // check terminating condition
            const double r_primal = g.tail(m).squaredNorm();
            const double r_dual = (_A.transpose() * nu - g.head(n)).squaredNorm();
            //if (!bFeasible && (r_primal + r_dual < drwnLPSolver::eps)) break;
            if (!bFeasible && (r_primal < drwnLPSolver::eps)) break;

            // solve KKT system
            const VectorXd dxnu = F.fullPivLu().solve(g);

            const double lambda_sqr = g.head(n).dot(dxnu.head(n));
            if (bFeasible && (0.5 * lambda_sqr < 1.0e-6)) break;

            if (bFeasible) {
                // feasible line search
                const double f_prev = t * _c.dot(_x) + barrierFunction(_x);
                const double delta_f = alpha * g.dot(dxnu.head(n));

                double step = 1.0;
                while (1) {
                    const VectorXd nx = _x + step * dxnu.head(n);
                    if (isWithinBounds(nx)) {
                        const double f = t * _c.dot(nx) + barrierFunction(nx);
                        if (f - f_prev < step * delta_f) {
                            _x = nx;
                            break;
                        }
                    }
                    step *= beta;
                }
            } else {
                // infeasible start line search
                double step = 1.0;
                while (1) {
                    const VectorXd nx = _x + step * dxnu.head(n);
                    if (isWithinBounds(nx)) {
                        const VectorXd nnu = (1.0 - step) * nu + step * dxnu.tail(m);
                        const double r = (_A.transpose() * nnu - g.head(n)).squaredNorm() +
                            (_b - _A * nx).squaredNorm();
                        if (r <= (1.0 - alpha * step) * (r_primal + r_dual)) {
                            _x = nx;
                            nu = nnu;
                            break;
                        }
                    }
                    step *= beta;
                }
            }
        }

        // check if feasible point was found
        if (!bFeasible && ((_b - _A * _x).squaredNorm() > eps)) {
            DRWN_LOG_WARNING("...could not find a feasible point (residual norm is " <<
                (_b - _A * _x).norm() << ")");
            DRWN_FCN_TOC;
            return DRWN_DBL_MAX;
        }

        DRWN_LOG_VERBOSE("...objective is " << _c.dot(_x));

        // check stopping criteria
        if (m < eps * t) break;

        // update barrier function multiplier
        t *= mu;
    }

    // compute true objective and return
    DRWN_FCN_TOC;
    return _c.dot(_x);
}
Beispiel #14
0
double drwnSparseLPSolver::solve()
{
    DRWN_FCN_TIC;
    const int n = _A.cols();
    const int m = _A.rows();

    // find feasible starting point
    if (_x.rows() == 0) {
        _x = VectorXd::Zero(n);
        for (int i = 0; i < n; i++) {
            if (isUnbounded(i)) continue;

            if (_ub[i] == DRWN_DBL_MAX) {
                _x[i] = _lb[i] + 1.0;
            } else if (_lb[i] == -DRWN_DBL_MAX) {
                _x[i] = _ub[i] - 1.0;
            } else {
                _x[i] = 0.5 * (_lb[i] + _ub[i]);
            }
        }
    }

    // initialize kkt system variables
    SparseMatrix<double> F(n + m, n + m);
    VectorXd g = VectorXd::Zero(n + m);

    vector<Eigen::Triplet<double> > entries;
    entries.reserve(n + 2 * _A.nonZeros());
    for (int i = 0; i < n; i++) {
        entries.push_back(Eigen::Triplet<double>(i, i, 1.0));
    }

    for (int k = 0; k < _A.outerSize(); k++) {
        for (SparseMatrix<double>::InnerIterator it(_A, k); it; ++it) {
            entries.push_back(Eigen::Triplet<double>(n + it.row(), it.col(), it.value()));
            entries.push_back(Eigen::Triplet<double>(it.col(), n + it.row(), it.value()));
        }
    }

    F.setFromTriplets(entries.begin(), entries.end());

    // initialize dual variables (if needed)
    VectorXd nu = VectorXd::Zero(m);

    // iterate on interior point method
    double t = drwnLPSolver::t0;
    while (1) {
        // determine feasibility
        const bool bFeasible = ((_b - _A * _x).squaredNorm() < drwnLPSolver::eps);
        if (!bFeasible) {
            DRWN_LOG_VERBOSE("...finding feasible point");
        }

        // centering step and update
        for (unsigned iter = 0; iter < drwnLPSolver::maxiters; iter++) {

            //! \todo solve with blockwise elimination
            // construct KKT system, Fx = g
            // | H A^T | | dx | = |  - g   |
            // | A  0  | | w  |   | b - Ax |

            for (int i = 0; i < n; i++) {
                F.coeffRef(i, i) = 0.0;
                if (_ub[i] != DRWN_DBL_MAX) {
                    F.coeffRef(i, i) += 1.0 / ((_ub[i] - _x[i]) * (_ub[i] - _x[i]));
                }
                if (_lb[i] != -DRWN_DBL_MAX) {
                    F.coeffRef(i, i) += 1.0 / ((_x[i] - _lb[i]) * (_x[i] - _lb[i]));
                }
            }

            for (int i = 0; i < n; i++) {
                g[i] = - t * _c[i];
                if (_ub[i] != DRWN_DBL_MAX) {
                    g[i] += 1.0 / (_x[i] - _ub[i]);
                }
                if (_lb[i] != -DRWN_DBL_MAX) {
                    g[i] += 1.0 / (_x[i] - _lb[i]);
                }
            }
            g.tail(m) = _b - _A * _x;

            // check terminating condition
            const double r_primal = g.tail(m).squaredNorm();
            const double r_dual = (_A.transpose() * nu - g.head(n)).squaredNorm();
            //if (!bFeasible && (r_primal + r_dual < drwnLPSolver::eps)) break;
            if (!bFeasible && (r_primal < drwnLPSolver::eps)) break;

            // solve KKT system
            SimplicialLDLT<SparseMatrix<double> > chol(F);
            const VectorXd dxnu = chol.solve(g);

            const double lambda_sqr = g.head(n).dot(dxnu.head(n));
            if (bFeasible && (0.5 * lambda_sqr < 1.0e-6)) break;

            if (bFeasible) {
                // feasible line search
                const double f_prev = t * _c.dot(_x) + barrierFunction(_x);
                const double delta_f = drwnLPSolver::alpha * g.dot(dxnu.head(n));

                double step = 1.0;
                while (1) {
                    const VectorXd nx = _x + step * dxnu.head(n);
                    if (isWithinBounds(nx)) {
                        const double f = t * _c.dot(nx) + barrierFunction(nx);
                        if (f - f_prev < step * delta_f) {
                            _x = nx;
                            break;
                        }
                    }
                    step *= drwnLPSolver::beta;
                }
            } else {
                // infeasible start line search
                DRWN_LOG_DEBUG("...iteration " << iter << ", primal residual " << r_primal
                    << ", dual residual " << r_dual);

                double step = 1.0;
                while (1) {
                    const VectorXd nx = _x + step * dxnu.head(n);
                    if (isWithinBounds(nx)) {
                        const VectorXd nnu = (1.0 - step) * nu + step * dxnu.tail(m);
                        const double r = (_A.transpose() * nnu - g.head(n)).squaredNorm() +
                            (_b - _A * nx).squaredNorm();
                        if (r <= (1.0 - drwnLPSolver::alpha * step) * (r_primal + r_dual)) {
                            _x = nx;
                            nu = nnu;
                            break;
                        }
                    }
                    step *= drwnLPSolver::beta;
                }
            }
        }

        // check if feasible point was found
        if (!bFeasible && ((_b - _A * _x).squaredNorm() > drwnLPSolver::eps)) {
            DRWN_LOG_WARNING("...could not find a feasible point (residual norm is " <<
                (_b - _A * _x).norm() << ")");
            DRWN_FCN_TOC;
            return DRWN_DBL_MAX;
        }

        DRWN_LOG_VERBOSE("...objective is " << _c.dot(_x));

        // check stopping criteria
        if (m < drwnLPSolver::eps * t) break;

        // update barrier function multiplier
        t *= drwnLPSolver::mu;
    }

    // compute true objective and return
    DRWN_FCN_TOC;
    return _c.dot(_x);
}
Beispiel #15
0
inline void update_z(VectorXd& z, const MatrixXd& J, const VectorXd& d,  int iq)
{
  z = J.rightCols(z.size()-iq) * d.tail(d.size()-iq);
}
Beispiel #16
0
VectorXd optimum_lorentzian_calc_a1acta3(const VectorXd x, VectorXd y, double H_l, double fc_l, double f_s, double eta, double a3, 
		double b, double alpha, double asym, double gamma_l, const int l, VectorXd V, double step, const double c){
/*
	function that calculates the lorentzian on a optimized range of frequency. It returns a Vector of same size as the original vector x
	that contains the lorentzian model.
	BEWARE: USES build_l_mode_asym_act() ==> Include mode asymetry and effect of activity

*/
	//const double c=20.;
	double pmin, pmax;
	long imin, imax;
	VectorXd m0, x_l;
	//VectorXd mall(x.size());

	if(gamma_l >= 1 && f_s >= 1){
		if(l != 0){
			pmin=fc_l - c*(l*f_s + gamma_l);
			pmax=fc_l + c*(l*f_s + gamma_l);
		} else{
			pmin=fc_l - c*gamma_l*2.2;
			pmax=fc_l + c*gamma_l*2.2;
		}
	}
	if(gamma_l <= 1 && f_s >= 1){
		if(l !=0){
			pmin=fc_l -c*(l*f_s + 1);
			pmax=fc_l + c*(l*f_s + 1);
		} else{
			pmin=fc_l - c*2.2;
			pmax=fc_l + c*2.2;
		}
	}
	if(gamma_l >= 1 && f_s <= 1){
		if(l != 0){
			pmin=fc_l - c*(l + gamma_l);
			pmax=fc_l + c*(l + gamma_l);
		} else{
			pmin=fc_l - c*2.2*gamma_l;
			pmax=fc_l + c*2.2*gamma_l;
		}
	}
	if(gamma_l <= 1 && f_s <= 1){
		if(l !=0){
			pmin=fc_l - c*(l+1);
			pmax=fc_l + c*(l+1);
		} else{
			pmin=fc_l -c*2.2;
			pmax=fc_l +c*2.2;
		}
	}
	// ---------- Handling boundaries ----------
	//     case when the proposed values lead to (pmax - step) < min(x)....
	if( (pmax - step) < x.head(1)(0)){ 
		pmax=x.head(1)(0) +c;  // bundary = first value of x plus c
	}
	//     case when the proposed values lead to (pmin + step) >= max(x)...
	if( (pmin + step) >= x.tail(1)(0)){
		pmin=x.tail(1)(0) - c; // bundary = last value of x minus c
	}

	imin=floor((pmin-x.head(1)(0))/step); // Here it is assumed that there is a regular grid WITHOUT WHOLES (CANNOT USE a .FILT file)
	imax=ceil((pmax-x.head(1)(0))/step);

	if(imin < 0){imin=0;} // ensure that we always stay within the vector x
	if(imax > x.size()){imax=x.size();} // ensure that we always stay within the vector x
	if(imax-imin <= 0){
		std::cout << "Warning imax -imin <= 0 : imin=" << imin << "   imax=" << imax << std::endl;
		std::cout << " - pmin=" << pmin << "   pmax=" << pmax << std::endl;
		std::cout << " - step=" << step << std::endl;
		std::cout << " --------" << std::endl;
		std::cout << " - l=" << l << std::endl;
		std::cout << " - fc_l=" << fc_l << std::endl;
		std::cout << " - H_l=" << H_l << std::endl;
		std::cout << " - gamma_l=" << gamma_l << std::endl;
		std::cout << " - f_s=" << f_s << std::endl;
		std::cout << " - eta=" << eta << std::endl;
		std::cout << " - a3=" << a3 << std::endl;
		std::cout << " - b=" << b << std::endl;
		std::cout << " - alpha=" << alpha << std::endl;
		std::cout << " - asym=" << asym << std::endl;
		std::cout << " --------" << std::endl;
		exit(EXIT_FAILURE);
	}
	
	//std::cout << "xmin=" << x.head(1) << "(microHz)" << std::endl;
	//std::cout << "xmax=" << x.tail(1) << "(microHz)" << std::endl;
	
	//std::cout << "step=" << step << std::endl;
	//std::cout << "pmin=" << pmin << "(microHz)  pmax=" << pmax << "(microHz)" << std::endl;
	//std::cout << "imin=" << imin << "  imax=" << imax << std::endl;
    //std::cin.ignore();

	x_l=x.segment(imin, imax-imin);
 
	//m0=build_l_mode(x_l, H_l, fc_l, f_s, a2, a3, gamma_l, l, V);
	m0=build_l_mode_asym_act(x_l, H_l, fc_l, f_s, eta, a3, b, alpha, asym, gamma_l, l, V);
	//mall.setZero();
	//mall.segment(imin, imax-imin)=m0;
        y.segment(imin, imax-imin)= y.segment(imin, imax-imin) + m0;
return y;
}