Esempio n. 1
0
  Function IdasInterface::getJ(bool backward) const {
    vector<MatType> a = MatType::get_input(oracle_);
    vector<MatType> r = const_cast<Function&>(oracle_)(a);
    MatType cj = MatType::sym("cj");

    // Get the Jacobian in the Newton iteration
    if (backward) {
      MatType jac = MatType::jacobian(r[DE_RODE], a[DE_RX]) + cj*MatType::eye(nrx_);
      if (nrz_>0) {
        jac = horzcat(vertcat(jac,
                              MatType::jacobian(r[DE_RALG], a[DE_RX])),
                      vertcat(MatType::jacobian(r[DE_RODE], a[DE_RZ]),
                              MatType::jacobian(r[DE_RALG], a[DE_RZ])));
      }
      return Function("jacB", {a[DE_T], a[DE_RX], a[DE_RZ], a[DE_RP],
                               a[DE_X], a[DE_Z], a[DE_P], cj}, {jac});
     } else {
      MatType jac = MatType::jacobian(r[DE_ODE], a[DE_X]) - cj*MatType::eye(nx_);
      if (nz_>0) {
        jac = horzcat(vertcat(jac,
                              MatType::jacobian(r[DE_ALG], a[DE_X])),
                      vertcat(MatType::jacobian(r[DE_ODE], a[DE_Z]),
                              MatType::jacobian(r[DE_ALG], a[DE_Z])));
      }
      return Function("jacF", {a[DE_T], a[DE_X], a[DE_Z], a[DE_P], cj}, {jac});
    }
  }
Esempio n. 2
0
  void LrDpleToDple::init() {
    // Initialize the base classes
    LrDpleInternal::init();

    MX As = MX::sym("As", input(LR_DPLE_A).sparsity());
    MX Vs = MX::sym("Vs", input(LR_DPLE_V).sparsity());
    MX Cs = MX::sym("Cs", input(LR_DPLE_C).sparsity());
    MX Hs = MX::sym("Hs", input(LR_DPLE_H).sparsity());

    int n_ = A_[0].size1();

    // Chop-up the arguments
    std::vector<MX> As_ = horzsplit(As, n_);
    std::vector<MX> Vs_ = horzsplit(Vs, V_[0].size2());
    std::vector<MX> Cs_ = horzsplit(Cs, V_[0].size2());
    std::vector<MX> Hss_ = horzsplit(Hs, Hsi_);

    std::vector<MX> V_(Vs_.size());

    for (int k=0;k<V_.size();++k) {
      V_[k] = mul(Cs_[k], mul(Vs_[k], Cs_[k].T()));
    }

    std::vector<Sparsity> Vsp(Vs_.size());
    for (int k=0;k<V_.size();++k) {
      Vsp[k] = V_[k].sparsity();
    }

    // Solver options
    Dict options;
    if (hasSetOption(optionsname())) {
      options = getOption(optionsname());
    }

    // Create an dplesolver instance
    std::map<std::string, std::vector<Sparsity> > tmp;
    tmp["a"] = A_;
    tmp["v"] = Vsp;
    solver_ = DpleSolver("solver", getOption(solvername()), tmp, options);

    MX P = solver_(make_map("a", horzcat(As_), "v", horzcat(V_))).at("p");
    std::vector<MX> Ps_ = horzsplit(P, n_);

    std::vector<MX> HPH(K_);

    for (int k=0;k<K_;++k) {
      std::vector<MX> hph = horzsplit(Hss_[k], cumsum0(Hs_[k]));

      for (int kk=0;kk<hph.size();++kk) {
        hph[kk] = mul(hph[kk].T(), mul(Ps_[k], hph[kk]));
      }
      HPH[k] = diagcat(hph);
    }


    f_ = MXFunction(name_, lrdpleIn("a", As, "v", Vs, "c", Cs, "h", Hs),
                    lrdpleOut("y", horzcat(HPH)));

    Wrapper<LrDpleToDple>::checkDimensions();
  }
bool SRUKF::measurementUpdate(const Matrix& measurement, const Matrix& measurementNoise, const Matrix& predictedMeasurementSigmas, const Matrix& stateEstimateSigmas)
{

    int numberOfSigmaPoints = stateEstimateSigmas.getn();
    debug << "Predicted measurement sigmas:" << std::endl << predictedMeasurementSigmas;

    Matrix predictedMeasurement = CalculateMeanFromSigmas(predictedMeasurementSigmas);

    Matrix Mz(m_numStates,numberOfSigmaPoints, false);
    Matrix Mx(m_numStates,numberOfSigmaPoints, false);

    for(int i = 0; i < numberOfSigmaPoints; i++)
    {
        Mz.setCol(i, m_sqrtSigmaWeights[0][i] * (predictedMeasurementSigmas.getCol(i) - predictedMeasurement));
        Mx.setCol(i, m_sqrtSigmaWeights[0][i] * (stateEstimateSigmas.getCol(i) - m_mean));
    }

    Matrix Sz = horzcat(Mz,measurementNoise);
    Matrix Pxz = Mx*Mz.transp();
    Matrix K = Pxz * Invert22(Sz*Sz.transp());

    debug << "K:" << std::endl << K;
    m_mean  = m_mean + K * (measurement - predictedMeasurement);

    m_sqrtCovariance = HT(horzcat(Mx-K*Mz,K*measurementNoise));
    //m_covariance = HT(horzcat(sigmaPoints-m_mean*m_sigmaWeights - K*predictedObservationSigmas +
    //                          K*predictedObservation*m_sigmaWeights,K*measurementNoise));
    return true;
}
Esempio n. 4
0
  std::vector<Sparsity>
  LrDpleInternal::getSparsity(const std::map<std::string, std::vector<Sparsity> >& st,
                              const std::vector< std::vector<int> > &Hs_) {
    // Chop-up the arguments
    std::vector<Sparsity> As, Vs, Cs, Hs;
    if (st.count("a")) As = st.at("a");
    if (st.count("v")) Vs = st.at("v");
    if (st.count("c")) Cs = st.at("c");
    if (st.count("h")) Hs = st.at("h");

    bool with_H = !Hs.empty();

    int K = As.size();

    Sparsity A;
    if (K==1) {
      A = As[0];
    } else {
      Sparsity AL = diagcat(vector_slice(As, range(As.size()-1)));

      Sparsity AL2 = horzcat(AL, Sparsity(AL.size1(), As[0].size2()));
      Sparsity AT = horzcat(Sparsity(As[0].size1(), AL.size2()), As.back());
      A = vertcat(AT, AL2);
    }

    Sparsity V = diagcat(Vs.back(), diagcat(vector_slice(Vs, range(Vs.size()-1))));
    Sparsity C;

    if (!Cs.empty()) {
      C = diagcat(Cs.back(), diagcat(vector_slice(Cs, range(Cs.size()-1))));
    }
    Sparsity H;
    std::vector<int> Hs_agg;

    std::vector<int> Hi(1, 0);
    if (Hs_.size()>0 && with_H) {
      H = diagcat(Hs.back(), diagcat(vector_slice(Hs, range(Hs.size()-1))));
      casadi_assert(K==Hs_.size());
      for (int k=0;k<K;++k) {
        Hs_agg.insert(Hs_agg.end(), Hs_[k].begin(), Hs_[k].end());
        int sum=0;
        for (int i=0;i<Hs_[k].size();++i) {
          sum+= Hs_[k][i];
        }
        Hi.push_back(Hi.back()+sum);
      }
    }

    Sparsity res = LrDleInternal::getSparsity(make_map("a", A, "v", V, "c", C, "h", H), Hs_agg);

    if (with_H) {
      return diagsplit(res, Hi);
    } else {
      return diagsplit(res, As[0].size2());
    }
  }
  void CondensingIndefDpleInternal::init() {
    // Initialize the base classes
    DpleInternal::init();

    casadi_assert_message(!pos_def_,
      "pos_def option set to True: Solver only handles the indefinite case.");
    casadi_assert_message(const_dim_,
      "const_dim option set to False: Solver only handles the True case.");

    n_ = A_[0].size1();


    MX As = MX::sym("A", horzcat(A_));
    MX Vs = MX::sym("V", horzcat(V_));

    std::vector< MX > Vss = horzsplit(Vs, n_);
    std::vector< MX > Ass = horzsplit(As, n_);

    for (int k=0;k<K_;++k) {
      Vss[k] = (Vss[k]+Vss[k].T())/2;
    }

    MX R = MX::zeros(n_, n_);

    for (int k=0;k<K_;++k) {
      R = mul(mul(Ass[k], R), Ass[k].T()) + Vss[k];
    }

    std::vector< MX > Assr(K_);
    std::reverse_copy(Ass.begin(), Ass.end(), Assr.begin());

    MX Ap = mul(Assr);

    // Create an dlesolver instance
    solver_ = DleSolver(getOption(solvername()), dleStruct("a", Ap.sparsity(), "v", R.sparsity()));
    solver_.setOption(getOption(optionsname()));

    // Initialize the NLP solver
    solver_.init();

    std::vector<MX> Pr = solver_.call(dpleIn("a", Ap, "v", R));

    std::vector<MX> Ps(K_);
    Ps[0] = Pr[0];

    for (int k=0;k<K_-1;++k) {
      Ps[k+1] = mul(mul(Ass[k], Ps[k]), Ass[k].T()) + Vss[k];
    }

    f_ = MXFunction(dpleIn("a", As, "v", Vs), dpleOut("p", horzcat(Ps)));
    f_.init();

    Wrapper::checkDimensions();

  }
Esempio n. 6
0
T cross(const GenericMatrix<T> &a, const GenericMatrix<T> &b, int dim) {
  casadi_assert_message(a.size1()==b.size1() && a.size2()==b.size2(),"cross(a,b): Inconsistent dimensions. Dimension of a (" << a.dimString() << " ) must equal that of b (" << b.dimString() << ").");
  
  casadi_assert_message(a.size1()==3 || a.size2()==3,"cross(a,b): One of the dimensions of a should have length 3, but got " << a.dimString() << ".");
  casadi_assert_message(dim==-1 || dim==1 || dim==2,"cross(a,b,dim): Dim must be 1, 2 or -1 (automatic).");
  
  
  std::vector<T> ret(3);
  
  
  bool t = a.size1()==3;
  
  if (dim==1) t = true;
  if (dim==2) t = false;
  
  T a1 = t ? a(0,ALL) : a(ALL,0);
  T a2 = t ? a(1,ALL) : a(ALL,1);
  T a3 = t ? a(2,ALL) : a(ALL,2);

  T b1 = t ? b(0,ALL) : b(ALL,0);
  T b2 = t ? b(1,ALL) : b(ALL,1);
  T b3 = t ? b(2,ALL) : b(ALL,2);
    
  ret[0] = a2*b3-a3*b2;
  ret[1] = a3*b1-a1*b3;
  ret[2] = a1*b2-a2*b1;
    
  return t ? vertcat(ret) : horzcat(ret);
  
}
Esempio n. 7
0
  void Horzsplit::evalAdj(const std::vector<pv_MX>& adjSeed, const std::vector<pv_MX>& adjSens) {
    int nadj = adjSeed.size();
    int nx = offset_.size()-1;

    // Get column offsets
    vector<int> col_offset;
    col_offset.reserve(offset_.size());
    col_offset.push_back(0);
    for (std::vector<Sparsity>::const_iterator it=output_sparsity_.begin();
        it!=output_sparsity_.end();
        ++it) {
      col_offset.push_back(col_offset.back() + it->size2());
    }

    for (int d=0; d<nadj; ++d) {
      if (adjSens[d][0]!=0) {
        vector<MX> v;
        for (int i=0; i<nx; ++i) {
          MX* x_i = adjSeed[d][i];
          if (x_i!=0) {
            v.push_back(*x_i);
            *x_i = MX();
          } else {
            v.push_back(MX(output_sparsity_[i].shape()));
          }
        }
        adjSens[d][0]->addToSum(horzcat(v));
      }
    }
  }
Esempio n. 8
0
  void SymbolicQr::evaluateSXGen(const SXPtrV& input, SXPtrV& output, bool tr) {
    // Get arguments
    casadi_assert(input.at(0)!=0);
    SX r = *input.at(0);
    casadi_assert(input.at(1)!=0);
    SX A = *input.at(1);

    // Number of right hand sides
    int nrhs = r.size2();

    // Factorize A
    vector<SX> v = fact_fcn_(A);

    // Select solve function
    Function& solv = tr ? solv_fcn_T_ : solv_fcn_N_;

    // Solve for every right hand side
    vector<SX> resv;
    v.resize(3);
    for (int i=0; i<nrhs; ++i) {
      v[2] = r(Slice(), i);
      resv.push_back(solv(v).at(0));
    }

    // Collect the right hand sides
    casadi_assert(output[0]!=0);
    *output.at(0) = horzcat(resv);
  }
Esempio n. 9
0
  void QcqpToSocp::init() {
    // Initialize the base classes
    QcqpSolverInternal::init();

    // Collection of sparsities that will make up SOCP_SOLVER_G
    std::vector<Sparsity> socp_g;

    // Allocate Cholesky solvers
    cholesky_.push_back(LinearSolver("csparsecholesky", st_[QCQP_STRUCT_H]));
    for (int i=0;i<nq_;++i) {
      cholesky_.push_back(
        LinearSolver("csparsecholesky",
          DMatrix(st_[QCQP_STRUCT_P])(range(i*n_, (i+1)*n_), ALL).sparsity()));
    }

    for (int i=0;i<nq_+1;++i) {
      // Initialize Cholesky solve
      cholesky_[i].init();

      // Harvest Cholsesky sparsity patterns
      // Note that we add extra scalar to make room for the epigraph-reformulation variable
      socp_g.push_back(blkdiag(
        cholesky_[i].getFactorizationSparsity(false), Sparsity::dense(1, 1)));
    }

    // Create an SocpSolver instance
    solver_ = SocpSolver(getOption(solvername()),
                         socpStruct("g", horzcat(socp_g),
                                    "a", horzcat(input(QCQP_SOLVER_A).sparsity(),
                                                 Sparsity::sparse(nc_, 1))));
    //solver_.setQCQPOptions();
    if (hasSetOption(optionsname())) solver_.setOption(getOption(optionsname()));

    std::vector<int> ni(nq_+1);
    for (int i=0;i<nq_+1;++i) {
      ni[i] = n_+1;
    }
    solver_.setOption("ni", ni);

    // Initialize the SocpSolver
    solver_.init();
  }
Matrix GaussJordanInverse(const Matrix& mat)
{
    // Augment matrix with I - [mat | I]
    Matrix A = horzcat(mat, Matrix(mat.getm(), mat.getn(), true));

    int i,j;
    unsigned int i_max = 0;
    int k = 0;
    double C;
    for (k = 0; k < A.getm(); ++k)
    {
        // find max pivot.
        i_max = k;
        for (i = k; i < A.getm(); ++i)
        {
            if(fabs(A[i_max][k]) < fabs(A[i][k]))
                i_max = i;
        }

        // Check if singular
        if(A[i_max][k] == 0)
            std::cout << "Matrix is singular" << std::endl;

        A.swapRows(k, i_max);
        for(i = k+1; i < A.getm(); ++i)
        {
            C = A[i][k] / A[k][k];
            for(j = k+1; j < A.getn(); ++j)
            {
                A[i][j] -= A[k][j] * C;
            }
            A[i][k] = 0;
        }
    }
    // Back substitution
    for(k = A.getm()-1; k >= 0; --k)
    {
        C = A[k][k];
        for(i=0; i < k; ++i)
        {
            for(j=A.getn()-1; j > k-1; --j)
            {
                A[i][j] -= A[k][j] * A[i][k] / C;
            }
        }
        A.setRow(k, A.getRow(k) / C);
    }

    // Un-Augment matrix from I - [I | result]
    Matrix result(mat.getm(), mat.getn(), false);
    for(i=0; i < mat.getn(); ++i)
        result.setCol(i, A.getCol(i + mat.getn()));
    return result;
}
Esempio n. 11
0
  void Horzcat::evaluateMX(const MXPtrV& input, MXPtrV& output, const MXPtrVV& fwdSeed,
                           MXPtrVV& fwdSens, const MXPtrVV& adjSeed, MXPtrVV& adjSens,
                           bool output_given) {
    int nfwd = fwdSens.size();
    int nadj = adjSeed.size();

    // Non-differentiated output
    if (!output_given) {
      *output[0] = horzcat(getVector(input));
    }

    // Forward sensitivities
    for (int d = 0; d<nfwd; ++d) {
      *fwdSens[d][0] = horzcat(getVector(fwdSeed[d]));
    }

    // Quick return?
    if (nadj==0) return;

    // Get offsets for each column
    vector<int> col_offset(ndep()+1, 0);
    for (int i=0; i<ndep(); ++i) {
      int ncol = dep(i).sparsity().size2();
      col_offset[i+1] = col_offset[i] + ncol;
    }

    // Adjoint sensitivities
    for (int d=0; d<nadj; ++d) {
      MX& aseed = *adjSeed[d][0];
      vector<MX> s = horzsplit(aseed, col_offset);
      aseed = MX();
      for (int i=0; i<ndep(); ++i) {
        adjSens[d][i]->addToSum(s[i]);
      }
    }
  }
  void SimpleIndefDpleInternal::init() {

    DpleInternal::init();

    casadi_assert_message(!pos_def_,
      "pos_def option set to True: Solver only handles the indefinite case.");
    casadi_assert_message(const_dim_,
      "const_dim option set to False: Solver only handles the True case.");

    n_ = A_[0].size1();


    MX As = MX::sym("A", n_, K_*n_);
    MX Vs = MX::sym("V", n_, K_*n_);

    std::vector< MX > Vss = horzsplit(Vs, n_);
    std::vector< MX > Ass = horzsplit(As, n_);

    for (int k=0;k<K_;++k) {
      Vss[k]=(Vss[k]+Vss[k].T())/2;
    }

    std::vector< MX > AA_list(K_);
    for (int k=0;k<K_;++k) {
      AA_list[k] = kron(Ass[k], Ass[k]);
    }

    MX AA = blkdiag(AA_list);

    MX A_total = DMatrix::eye(n_*n_*K_) -
        vertcat(AA(range(K_*n_*n_-n_*n_, K_*n_*n_), range(K_*n_*n_)),
                AA(range(K_*n_*n_-n_*n_), range(K_*n_*n_)));

    std::vector<MX> Vss_shift;
    Vss_shift.push_back(Vss.back());
    Vss_shift.insert(Vss_shift.end(), Vss.begin(), Vss.begin()+K_-1);

    MX Pf = solve(A_total, vec(horzcat(Vss_shift)), getOption("linear_solver"));
    MX P = reshape(Pf, n_, K_*n_);

    std::vector<MX> v_in;
    v_in.push_back(As);
    v_in.push_back(Vs);
    f_ = MXFunction(v_in, P);
    f_.setInputScheme(SCHEME_DPLEInput);
    f_.setOutputScheme(SCHEME_DPLEOutput);
    f_.init();
  }
Esempio n. 13
0
  void Horzsplit::evalAdj(const std::vector<std::vector<MX> >& aseed,
                          std::vector<std::vector<MX> >& asens) {
    int nadj = aseed.size();

    // Get column offsets
    vector<int> col_offset;
    col_offset.reserve(offset_.size());
    col_offset.push_back(0);
    for (std::vector<Sparsity>::const_iterator it=output_sparsity_.begin();
         it!=output_sparsity_.end(); ++it) {
      col_offset.push_back(col_offset.back() + it->size2());
    }

    for (int d=0; d<nadj; ++d) {
      asens[d][0] += horzcat(aseed[d]);
    }
  }
Esempio n. 14
0
void UKF4::timeUpdate(float timePassed)
{

  updateUncertainties[0][2] = timePassed; // x velx
  updateUncertainties[1][3] = timePassed; // y vely



  //-----------------------Update for velocity
  // Estimated velocity and position, combined with an estimated decay on velocity (friction) is used to predict the new position
  stateEstimates[0][0] += stateEstimates[2][0]*timePassed; // Update x position by x velocity.
  stateEstimates[1][0] += stateEstimates[3][0]*timePassed; // Update y position by y velocity.
  stateEstimates[2][0] *= ukfParams.vel_decay_rate; // Reduce x velocity assuming deceleration
  stateEstimates[3][0] *= ukfParams.vel_decay_rate; // Reduce y velocity assuming deceleration

  // Householder transform. Unscented KF algorithm. Takes a while.
  stateStandardDeviations=HT(horzcat(updateUncertainties*stateStandardDeviations, sqrtOfProcessNoise));

  return;
}
Esempio n. 15
0
  void StabilizedQpToQp::init() {
    // Initialize the base classes
    StabilizedQpSolverInternal::init();

    // Form augmented QP
    Sparsity H_sparsity_qp = diagcat(st_[QP_STRUCT_H], Sparsity::diag(nc_));
    Sparsity A_sparsity_qp = horzcat(st_[QP_STRUCT_A], Sparsity::diag(nc_));
    std::string qp_solver_name = getOption("qp_solver");
    qp_solver_ = QpSolver(qp_solver_name,
                          qpStruct("h", H_sparsity_qp, "a", A_sparsity_qp));

    // Pass options if provided
    if (hasSetOption("qp_solver_options")) {
      Dictionary qp_solver_options = getOption("qp_solver_options");
      qp_solver_.setOption(qp_solver_options);
    }

    // Initialize the QP solver
    qp_solver_.init();
  }
  void ImplicitFunctionInternal::evaluateMX(
      MXNode* node, const MXPtrV& arg, MXPtrV& res,
      const MXPtrVV& fseed, MXPtrVV& fsens, const MXPtrVV& aseed, MXPtrVV& asens,
      bool output_given) {
    // Evaluate non-differentiated
    vector<MX> argv = MXNode::getVector(arg);
    MX z; // the solution to the system of equations
    if (output_given) {
      z = *res[iout_];
    } else {
      vector<MX> resv = callSelf(argv);
      for (int i=0; i<resv.size(); ++i) {
        if (res[i]!=0) *res[i] = resv[i];
      }
      z = resv[iout_];
    }

    // Quick return if no derivatives
    int nfwd = fsens.size();
    int nadj = aseed.size();
    if (nfwd==0 && nadj==0) return;

    // Temporaries
    vector<int> col_offset(1, 0);
    vector<MX> rhs;
    vector<int> rhs_loc;

    // Arguments when calling f/f_der
    vector<MX> v;
    v.reserve(getNumInputs()*(1+nfwd) + nadj);
    v.insert(v.end(), argv.begin(), argv.end());
    v[iin_] = z;

    // Get an expression for the Jacobian
    MX J = jac_.call(v).front();

    // Directional derivatives of f
    Function f_der = f_.derivative(nfwd, nadj);

    // Forward sensitivities, collect arguments for calling f_der
    for (int d=0; d<nfwd; ++d) {
      argv = MXNode::getVector(fseed[d]);
      argv[iin_] = MX::zeros(input(iin_).sparsity());
      v.insert(v.end(), argv.begin(), argv.end());
    }

    // Adjoint sensitivities, solve to get arguments for calling f_der
    if (nadj>0) {
      for (int d=0; d<nadj; ++d) {
        for (int i=0; i<getNumOutputs(); ++i) {
          if (aseed[d][i]!=0) {
            if (i==iout_) {
              rhs.push_back(*aseed[d][i]);
              col_offset.push_back(col_offset.back()+1);
              rhs_loc.push_back(v.size()); // where to store it
              v.push_back(MX());
            } else {
              v.push_back(*aseed[d][i]);
            }
          }
          *aseed[d][i] = MX();
        }
      }

      // Solve for all right-hand-sides at once
      rhs = horzsplit(J->getSolve(horzcat(rhs), true, linsol_), col_offset);
      for (int d=0; d<rhs.size(); ++d) {
        v[rhs_loc[d]] = rhs[d];
      }
      col_offset.resize(1);
      rhs.clear();
    }

    // Propagate through the implicit function
    v = f_der.call(v);
    vector<MX>::const_iterator v_it = v.begin();

    // Discard non-differentiated evaluation (change?)
    v_it += getNumOutputs();

    // Forward directional derivatives
    if (nfwd>0) {
      for (int d=0; d<nfwd; ++d) {
        for (int i=0; i<getNumOutputs(); ++i) {
          if (i==iout_) {
            // Collect the arguments
            rhs.push_back(*v_it++);
            col_offset.push_back(col_offset.back()+1);
          } else {
            // Auxiliary output
            if (fsens[d][i]!=0) {
              *fsens[d][i] = *v_it++;
            }
          }
        }
      }

      // Solve for all the forward derivatives at once
      rhs = horzsplit(J->getSolve(horzcat(rhs), false, linsol_), col_offset);
      for (int d=0; d<nfwd; ++d) {
        if (fsens[d][iout_]!=0) {
          *fsens[d][iout_] = -rhs[d];
        }
      }

      col_offset.resize(1);
      rhs.clear();
    }

    // Collect adjoint sensitivities
    for (int d=0; d<nadj; ++d) {
      for (int i=0; i<asens[d].size(); ++i, ++v_it) {
        if (i!=iin_ && asens[d][i]!=0 && !v_it->isNull()) {
          *asens[d][i] += - *v_it;
        }
      }
    }
    casadi_assert(v_it==v.end());
  }
Esempio n. 17
0
// update based on an opponent detection
ukf4UpdateResult UKF4::opponentDetection(float obsDistance, float obsBearing, float distanceErrorOffset, float distanceErrorRelative, float bearingError)
{

  WorldObject* self = &(worldObjects->objects_[robotState->WO_SELF]);

  oppLog((70, "update using self %i, loc (%5.3f, %5.3f), orient %5.3f, sd (%5.5f, %5.5f), sd orient %5.5f", robotState->WO_SELF, self->loc.x, self->loc.y, self->orientation, self->sd.x, self->sd.y, self->sdOrientation));

  // TODO: incorporate robot's own sd in x,y,theta
  float oppX = obsDistance * cos(obsBearing + self->orientation) + self->loc.x;
  float oppY = obsDistance * sin(obsBearing + self->orientation) + self->loc.y;

  oppLog((70, "update opp filter with robot at dist %5.3f, bear %5.3f, global (%5.3f, %5.3f)", obsDistance, obsBearing, oppX, oppY));

  // if robot is off field.... return outlier
  if (fabs(oppX) > (GRASS_X/20.0) || fabs(oppY) > (GRASS_Y/20.0)){
    oppLog((10, "observed opponent off field, outlier"));
    return UKF4_OUTLIER;
  }

  float R_range = distanceErrorOffset + distanceErrorRelative * powf(obsDistance, 2);
  float R_bearing = bearingError;

  // Calculate update uncertainties - S_obj_rel & R_obj_rel
  NMatrix S_obj_rel = NMatrix(2,5,false);

  // Todd: convert dist/bearing/locx/y/theta error to abs x/y error
  S_obj_rel[0][0] = cos(obsBearing + self->orientation) * sqrtf(R_range);              // oppX/drange   * r_range
  S_obj_rel[0][1] = -sin(obsBearing + self->orientation)*obsDistance*sqrtf(R_bearing);   // oppX/dbear    * r_bear
  S_obj_rel[0][2] = self->sd.x;             // oppX/locx     * r_locx
  S_obj_rel[0][3] = 0.0;             // oppX/locy     * r_locy
  S_obj_rel[0][4] = -sin(obsBearing+self->orientation)*obsDistance*self->sdOrientation;             // oppX/theta    * r_theta

  S_obj_rel[1][0] = sin(obsBearing + self->orientation) * sqrtf(R_range);  // oppY/drange   * r_range
  S_obj_rel[1][1] = cos(obsBearing + self->orientation) * obsDistance * sqrtf(R_bearing);             // oppY/dbear    * r_bear
  S_obj_rel[1][2] = 0.0;             // oppY/locx     * r_locx
  S_obj_rel[1][3] = self->sd.y;             // oppY/locy     * r_locy
  S_obj_rel[1][4] = cos(obsBearing + self->orientation) * obsDistance * self->sdOrientation;             // oppY/theta    * r_theta

  //  oppLog((10, "sd range %5.5f, sd bearing %5.5f, sdx %5.5f, sdy %5.5f, sdtheta %5.5f", R_range, R_bearing, self->sd.x, self->sd.y, self->sdOrientation));
  //oppLog((10, "from those errors to globalx: %5.3f, %5.3f, %5.3f, %5.3f, %5.3f", S_obj_rel[0][0], S_obj_rel[0][1], S_obj_rel[0][2], S_obj_rel[0][3], S_obj_rel[0][4]));
  //oppLog((10, "from those errors to globaly: %5.3f, %5.3f, %5.3f, %5.3f, %5.3f", S_obj_rel[1][0], S_obj_rel[1][1], S_obj_rel[1][2], S_obj_rel[1][3], S_obj_rel[1][4]));

  NMatrix R_obj_rel = S_obj_rel * S_obj_rel.transp(); // R = S^2

  // Unscented KF Stuff.
  NMatrix yBar;                                    //reset
  NMatrix Py;
  NMatrix Pxy=NMatrix(4, 2, false);                    //Pxy=[0;0;0];
  NMatrix scriptX=NMatrix(stateEstimates.getm(), 2 * nStates + 1, false);
  scriptX.setCol(0, stateEstimates);                         //scriptX(:,1)=Xhat;

  //----------------Saturate ScriptX angle sigma points to not wrap
  for(int i=1;i<nStates+1;i++){//hack to make test points distributed
    // Addition Portion.
    scriptX.setCol(i, stateEstimates + sqrtf((float)nStates + ukfParams.kappa) * stateStandardDeviations.getCol(i - 1));
    // Subtraction Portion.
    scriptX.setCol(nStates + i,stateEstimates - sqrtf((float)nStates + ukfParams.kappa) * stateStandardDeviations.getCol(i - 1));
  }
  //----------------------------------------------------------------
  NMatrix scriptY = NMatrix(2, 2 * nStates + 1, false);
  NMatrix temp = NMatrix(2, 1, false);

  for(int i = 0; i < 2 * nStates + 1; i++){
    // expected values for each sigma point (abs x/y location)
    temp[0][0] = scriptX[0][i];
    temp[1][0] = scriptX[1][i];
    scriptY.setCol(i, temp.getCol(0));
  }
  NMatrix Mx = NMatrix(scriptX.getm(), 2 * nStates + 1, false);
  NMatrix My = NMatrix(scriptY.getm(), 2 * nStates + 1, false);
  for(int i = 0; i < 2 * nStates + 1; i++){
    Mx.setCol(i, sqrtOfTestWeightings[0][i] * scriptX.getCol(i));
    My.setCol(i, sqrtOfTestWeightings[0][i] * scriptY.getCol(i));
  }

  NMatrix M1 = sqrtOfTestWeightings;
  yBar = My * M1.transp(); // Predicted Measurement.
  Py = (My - yBar * M1) * (My - yBar * M1).transp();
  Pxy = (Mx - stateEstimates * M1) * (My - yBar * M1).transp();

  NMatrix K = Pxy * Invert22(Py + R_obj_rel); // K = Kalman filter gain.

  NMatrix y = NMatrix(2,1,false); // Measurement. I terms of relative (x,y).
  y[0][0] = oppX;
  y[1][0] = oppY; 
  //end of standard ukf stuff
  //RHM: 20/06/08 Outlier rejection.
  float innovation2 = convDble((yBar - y).transp() * Invert22(Py + R_obj_rel) * (yBar - y));

  // Update Alpha
  float innovation2measError = convDble((yBar - y).transp() * Invert22(R_obj_rel) * (yBar - y));
  alpha *= 1 / (1 + innovation2measError);

  locLog(70, "Y: %5.3f, %5.3f, Ybar: %5.3f, %5.3f, yBar-y: %5.3f, %5.3f", y[0][0], y[1][0], yBar[0][0], yBar[1][0], yBar[0][0]-y[0][0], yBar[1][0]-y[1][0]);

  oppLog((70, "innovation2: %5.3f, outlier thresh: %5.3f", innovation2, ukfParams.outlier_rejection_thresh));

  lastInnov2 = innovation2;
  lastStateEstimates = stateEstimates;
  lastStateStandardDeviations = stateStandardDeviations;
  lastFrameUpdated = frameUpdated;


  if (innovation2 > ukfParams.outlier_rejection_thresh){
    return UKF4_OUTLIER;
  }
  if (innovation2 < 0){
    //std::cout<<"**********************************"<<std::endl;
    //Py.print();
    //R_obj_rel.print();
    //std::cout<<"**********************************"<<std::endl;
  }


  // only update this from actual seen observations
  frameUpdated = frameInfo->frame_id;


  oppLog((70, "Y: %5.3f, %5.3f, Ybar: %5.3f, %5.3f, yBar-y: %5.3f, %5.3f", y[0][0], y[1][0], yBar[0][0], yBar[1][0], yBar[0][0]-y[0][0], yBar[1][0]-y[1][0]));

  NMatrix change = NMatrix(4,1,false) - K*(yBar - y);
  //  oppLog((70, "Change: %5.3f, %5.3f, %5.3f, %5.3f", change[0][0], change[1][0], change[2][0], change[3][0]));

  stateStandardDeviations = HT( horzcat(Mx - stateEstimates*M1 - K*My + K*yBar*M1, K*S_obj_rel) );
  stateEstimates = stateEstimates - K*(yBar - y);
  return UKF4_OK;
}
Esempio n. 18
0
  void DpleInternal::init() {

    const_dim_ = getOption("const_dim");
    pos_def_ = getOption("pos_def");
    error_unstable_ = getOption("error_unstable");
    eps_unstable_ = getOption("eps_unstable");
    A_ = st_[Dple_STRUCT_A];
    V_ = st_[Dple_STRUCT_V];

    // Dimension sanity checks
    casadi_assert_message(A_.size()==V_.size(), "A and V arguments must be of same length, but got "
                          << A_.size() << " and " << V_.size() << ".");
    K_ = A_.size();
    for (int k=0;k<K_;++k) {
      casadi_assert_message(V_[k].issymmetric(), "V_i must be symmetric but got "
                            << V_[k].dimString() << " for i = " << k << ".");

      casadi_assert_message(A_[k].size1()==V_[k].size1(), "First dimension of A ("
                            << A_[k].size1() << ") must match dimension of symmetric V_i ("
                            << V_[k].size1() << ")" << " for i = " << k << ".");
    }

    if (const_dim_) {
      int n = A_[0].size1();
       for (int k=1;k<K_;++k) {
         casadi_assert_message(A_[k].size1()==n, "You have set const_dim option, but found "
                               "an A_i with dimension ( " << A_[k].dimString()
                               << " ) deviating from n = " << n << " at i = " << k << ".");
      }
    }

    // Allocate inputs
    ibuf_.resize(1+nrhs_);

    if (const_dim_) {
      input(0)  = DMatrix::zeros(horzcat(A_));
    } else {
      input(0)  = DMatrix::zeros(diagcat(A_));
    }

    for (int i=0;i<nrhs_;++i) {
      if (const_dim_) {
        input(1+i)  = DMatrix::zeros(horzcat(V_));
      } else {
        input(1+i)  = DMatrix::zeros(diagcat(V_));
      }
    }

    // Allocate outputs
    std::map<std::string, std::vector<Sparsity> > tmp;
    tmp["a"] = A_;
    tmp["v"] = V_;
    std::vector<Sparsity> P = LrDpleInternal::getSparsity(tmp);
    obuf_.resize(nrhs_);
    for (int i=0;i<nrhs_;++i) {
      if (const_dim_) {
        output(i) = DMatrix::zeros(horzcat(P));
      } else {
        output(i) = DMatrix::zeros(diagcat(P));
      }
    }

    FunctionInternal::init();

  }
Esempio n. 19
0
  void SDPSDQPInternal::init() {
    // Initialize the base classes
    SdqpSolverInternal::init();

    cholesky_ = LinearSolver("csparsecholesky", st_[SDQP_STRUCT_H]);
    cholesky_.init();

    MX g_socp = MX::sym("x", cholesky_.getFactorizationSparsity(true));
    MX h_socp = MX::sym("h", n_);

    MX f_socp = sqrt(inner_prod(h_socp, h_socp));
    MX en_socp = 0.5/f_socp;

    MX f_sdqp = MX::sym("f", input(SDQP_SOLVER_F).sparsity());
    MX g_sdqp = MX::sym("g", input(SDQP_SOLVER_G).sparsity());

    std::vector<MX> fi(n_+1);
    MX znp = MX::sparse(n_+1, n_+1);
    for (int k=0;k<n_;++k) {
      MX gk = vertcat(g_socp(ALL, k), DMatrix::sparse(1, 1));
      MX fk = -blockcat(znp, gk, gk.T(), DMatrix::sparse(1, 1));
      // TODO(Joel): replace with ALL
      fi.push_back(blkdiag(f_sdqp(ALL, Slice(f_sdqp.size1()*k, f_sdqp.size1()*(k+1))), fk));
    }
    MX fin = en_socp*DMatrix::eye(n_+2);
    fin(n_, n_+1) = en_socp;
    fin(n_+1, n_) = en_socp;

    fi.push_back(blkdiag(DMatrix::sparse(f_sdqp.size1(), f_sdqp.size1()), -fin));

    MX h0 = vertcat(h_socp, DMatrix::sparse(1, 1));
    MX g = blockcat(f_socp*DMatrix::eye(n_+1), h0, h0.T(), f_socp);

    g = blkdiag(g_sdqp, g);

    IOScheme mappingIn("g_socp", "h_socp", "f_sdqp", "g_sdqp");
    IOScheme mappingOut("f", "g");

    mapping_ = MXFunction(mappingIn("g_socp", g_socp, "h_socp", h_socp,
                                    "f_sdqp", f_sdqp, "g_sdqp", g_sdqp),
                          mappingOut("f", horzcat(fi), "g", g));
    mapping_.init();

    // Create an sdpsolver instance
    std::string sdpsolver_name = getOption("sdp_solver");
    sdpsolver_ = SdpSolver(sdpsolver_name,
                           sdpStruct("g", mapping_.output("g").sparsity(),
                                     "f", mapping_.output("f").sparsity(),
                                     "a", horzcat(input(SDQP_SOLVER_A).sparsity(),
                                                  Sparsity::sparse(nc_, 1))));

    if (hasSetOption("sdp_solver_options")) {
      sdpsolver_.setOption(getOption("sdp_solver_options"));
    }

    // Initialize the SDP solver
    sdpsolver_.init();

    sdpsolver_.input(SDP_SOLVER_C).at(n_)=1;

    // Output arguments
    setNumOutputs(SDQP_SOLVER_NUM_OUT);
    output(SDQP_SOLVER_X) = DMatrix::zeros(n_, 1);

    std::vector<int> r = range(input(SDQP_SOLVER_G).size1());
    output(SDQP_SOLVER_P) = sdpsolver_.output(SDP_SOLVER_P).isEmpty() ? DMatrix() :
        sdpsolver_.output(SDP_SOLVER_P)(r, r);
    output(SDQP_SOLVER_DUAL) = sdpsolver_.output(SDP_SOLVER_DUAL).isEmpty() ? DMatrix() :
        sdpsolver_.output(SDP_SOLVER_DUAL)(r, r);
    output(SDQP_SOLVER_COST) = 0.0;
    output(SDQP_SOLVER_DUAL_COST) = 0.0;
    output(SDQP_SOLVER_LAM_X) = DMatrix::zeros(n_, 1);
    output(SDQP_SOLVER_LAM_A) = DMatrix::zeros(nc_, 1);
  }
Esempio n. 20
0
//
//RHM: 26/6/08 New function, needed for shared ball stuff (and maybe others....)
//======================================================================
//
ukf4UpdateResult UKF4::linear2MeasurementUpdate(float Y1,float Y2, float SR11, float SR12, float SR22, int index1, int index2)
//
// KF update (called for example by shared ball) where a pair of measurements, [Y1;Y2];
// with variance Square Root of Covariance (SR) [SR11 SR12; 0 SR22]
// are predicted to be Xhat[index1][0] and Xhat[index2][0] respectively
// This is based on the function fieldObjectmeas, but simplified.
//
// Example Call (given data from wireless: ballX, ballY, SRballXX, SRballXY, SRballYY)
//      linear2MeasurementUpdate( ballX, ballY, SRballXX, SRballXY, SRballYY, 3, 4 )
{
  NMatrix SR = NMatrix(2,2,false);
  SR[0][0] = SR11;
  SR[0][1] = SR12;
  SR[1][1] = SR22;

  NMatrix R = NMatrix(2,2,false);
  R = SR * SR.transp();

  NMatrix Py = NMatrix(2, 2, false);
  NMatrix Pxy = NMatrix(4, 2, false);

  NMatrix CS = NMatrix(2, 4, false);
  CS.setRow(0, stateStandardDeviations.getRow(index1));
  CS.setRow(1, stateStandardDeviations.getRow(index2));

  Py = CS * CS.transp();
  Pxy = stateStandardDeviations * CS.transp();

  NMatrix K = Pxy * Invert22(Py + R);   //Invert22

  NMatrix y = NMatrix(2, 1, false);
  y[0][0] = Y1;
  y[1][0] = Y2;

  NMatrix yBar = NMatrix(2, 1, false); //Estimated values of the measurements Y1,Y2
  yBar[0][0] = stateEstimates[index1][0];
  yBar[1][0] = stateEstimates[index2][0];
  //RHM: (3) Outlier rejection.
  float innovation2 = convDble( (yBar - y).transp() * Invert22(Py + SR * SR.transp()) * (yBar - y) );

  //  oppLog((30, "innovation2: %5.3f, outlier thresh: %5.3f", innovation2, ukfParams.outlier_rejection_thresh));

  lastInnov2 = innovation2;
  lastStateEstimates = stateEstimates;
  lastStateStandardDeviations = stateStandardDeviations;
  lastFrameUpdated = frameUpdated;

  if (innovation2 > ukfParams.outlier_rejection_thresh){
    //std::cout<<"+++++++++++++++++not update+++++++++++++++++"<<std::endl;
    return UKF4_OUTLIER;
  }
  if(innovation2 < 0){
    //std::cout<<"**********************************"<<std::endl;
    //Py.print();
    //std::cout<<"**********************************"<<std::endl;
  }

  stateStandardDeviations = HT(horzcat(stateStandardDeviations - K * CS, K * SR));
  stateEstimates = stateEstimates - K * (yBar - y);
  return UKF4_OK;
}
Esempio n. 21
0
  void SdqpToSdp::init() {
    // Initialize the base classes
    SdqpSolverInternal::init();

    cholesky_ = LinearSolver("cholesky", "csparsecholesky", st_[SDQP_STRUCT_H]);

    MX g_socp = MX::sym("x", cholesky_.getFactorizationSparsity(true));
    MX h_socp = MX::sym("h", n_);

    MX f_socp = sqrt(inner_prod(h_socp, h_socp));
    MX en_socp = 0.5/f_socp;

    MX f_sdqp = MX::sym("f", input(SDQP_SOLVER_F).sparsity());
    MX g_sdqp = MX::sym("g", input(SDQP_SOLVER_G).sparsity());

    std::vector<MX> fi(n_+1);
    MX znp = MX(n_+1, n_+1);
    for (int k=0;k<n_;++k) {
      MX gk = vertcat(g_socp(ALL, k), MX(1, 1));
      MX fk = -blockcat(znp, gk, gk.T(), MX(1, 1));
      // TODO(Joel): replace with ALL
      fi.push_back(diagcat(f_sdqp(ALL, Slice(f_sdqp.size1()*k, f_sdqp.size1()*(k+1))), fk));
    }
    MX fin = en_socp*DMatrix::eye(n_+2);
    fin(n_, n_+1) = en_socp;
    fin(n_+1, n_) = en_socp;

    fi.push_back(diagcat(DMatrix(f_sdqp.size1(), f_sdqp.size1()), -fin));

    MX h0 = vertcat(h_socp, DMatrix(1, 1));
    MX g = blockcat(f_socp*DMatrix::eye(n_+1), h0, h0.T(), f_socp);

    g = diagcat(g_sdqp, g);

    Dict opts;
    opts["input_scheme"] = IOScheme("g_socp", "h_socp", "f_sdqp", "g_sdqp");
    opts["output_scheme"] = IOScheme("f", "g");
    mapping_ = MXFunction("mapping", make_vector(g_socp, h_socp, f_sdqp, g_sdqp),
                          make_vector(horzcat(fi), g), opts);

    Dict options;
    if (hasSetOption(optionsname())) options = getOption(optionsname());
    // Create an SdpSolver instance
    solver_ = SdpSolver("sdpsolver", getOption(solvername()),
                        make_map("g", mapping_.output("g").sparsity(),
                                 "f", mapping_.output("f").sparsity(),
                                 "a", horzcat(input(SDQP_SOLVER_A).sparsity(),
                                              Sparsity(nc_, 1))),
                        options);

    solver_.input(SDP_SOLVER_C).at(n_)=1;

    // Output arguments
    obuf_.resize(SDQP_SOLVER_NUM_OUT);
    output(SDQP_SOLVER_X) = DMatrix::zeros(n_, 1);

    std::vector<int> r = range(input(SDQP_SOLVER_G).size1());
    output(SDQP_SOLVER_P) = solver_.output(SDP_SOLVER_P).isempty() ? DMatrix() :
        solver_.output(SDP_SOLVER_P)(r, r);
    output(SDQP_SOLVER_DUAL) = solver_.output(SDP_SOLVER_DUAL).isempty() ? DMatrix() :
        solver_.output(SDP_SOLVER_DUAL)(r, r);
    output(SDQP_SOLVER_COST) = 0.0;
    output(SDQP_SOLVER_DUAL_COST) = 0.0;
    output(SDQP_SOLVER_LAM_X) = DMatrix::zeros(n_, 1);
    output(SDQP_SOLVER_LAM_A) = DMatrix::zeros(nc_, 1);
  }
  void LiftingLrDpleInternal::init() {

    form_ = getOptionEnumValue("form");

    // Initialize the base classes
    LrDpleInternal::init();

    casadi_assert_message(!pos_def_,
      "pos_def option set to True: Solver only handles the indefinite case.");
    casadi_assert_message(const_dim_,
      "const_dim option set to False: Solver only handles the True case.");

    // We will construct an MXFunction to facilitate the calculation of derivatives

    MX As = MX::sym("As", input(LR_DLE_A).sparsity());
    MX Vs = MX::sym("Vs", input(LR_DLE_V).sparsity());
    MX Cs = MX::sym("Cs", input(LR_DLE_C).sparsity());
    MX Hs = MX::sym("Hs", input(LR_DLE_H).sparsity());

    n_ = A_[0].size1();

    // Chop-up the arguments
    std::vector<MX> As_ = horzsplit(As, n_);
    std::vector<MX> Vs_ = horzsplit(Vs, V_[0].size2());
    std::vector<MX> Cs_ = horzsplit(Cs, V_[0].size2());
    std::vector<MX> Hs_;
    if (with_H_) {
      Hs_ = horzsplit(Hs, Hsi_);
    }

    MX A;
    if (K_==1) {
      A = As;
    } else {
      if (form_==0) {
        MX AL = diagcat(vector_slice(As_, range(As_.size()-1)));

        MX AL2 = horzcat(AL, MX::sparse(AL.size1(), As_[0].size2()));
        MX AT = horzcat(MX::sparse(As_[0].size1(), AL.size2()), As_.back());
        A = vertcat(AT, AL2);
      } else {
        MX AL = diagcat(reverse(vector_slice(As_, range(As_.size()-1))));

        MX AL2 = horzcat(MX::sparse(AL.size1(), As_[0].size2()), AL);
        MX AT = horzcat(As_.back(), MX::sparse(As_[0].size1(), AL.size2()));
        A = vertcat(AL2, AT);
      }
    }

    MX V;
    MX C;

    MX H;

    if (form_==0) {
      V = diagcat(Vs_.back(), diagcat(vector_slice(Vs_, range(Vs_.size()-1))));
      if (with_C_) {
        C = diagcat(Cs_.back(), diagcat(vector_slice(Cs_, range(Cs_.size()-1))));
      }
    } else {
      V = diagcat(diagcat(reverse(vector_slice(Vs_, range(Vs_.size()-1)))), Vs_.back());
      if (with_C_) {
        C = diagcat(diagcat(reverse(vector_slice(Cs_, range(Cs_.size()-1)))), Cs_.back());
      }
    }

    if (with_H_) {
      H = diagcat(form_==0? Hs_ : reverse(Hs_));
    }

    // Create an LrDleSolver instance
    solver_ = LrDleSolver(getOption(solvername()),
                          lrdleStruct("a", A.sparsity(),
                                      "v", V.sparsity(),
                                      "c", C.sparsity(),
                                      "h", H.sparsity()));
    solver_.setOption("Hs", Hss_);
    if (hasSetOption(optionsname())) solver_.setOption(getOption(optionsname()));
    solver_.init();

    std::vector<MX> v_in(LR_DPLE_NUM_IN);
    v_in[LR_DLE_A] = As;
    v_in[LR_DLE_V] = Vs;
    if (with_C_) {
      v_in[LR_DLE_C] = Cs;
    }
    if (with_H_) {
      v_in[LR_DLE_H] = Hs;
    }

    std::vector<MX> Pr = solver_.call(lrdpleIn("a", A, "v", V, "c", C, "h", H));

    MX Pf = Pr[0];

    std::vector<MX> Ps = with_H_ ? diagsplit(Pf, Hsi_) : diagsplit(Pf, n_);

    if (form_==1) {
      Ps = reverse(Ps);
    }

    f_ = MXFunction(v_in, dpleOut("p", horzcat(Ps)));
    f_.setInputScheme(SCHEME_LR_DPLEInput);
    f_.setOutputScheme(SCHEME_LR_DPLEOutput);
    f_.init();

    Wrapper::checkDimensions();

  }
Esempio n. 23
0
// RHM 7/7/08: Additional function for resetting
void UKF4::Reset()
{
  // Add extra uncertainty
  stateStandardDeviations = HT(horzcat(stateStandardDeviations, sqrtOfProcessNoiseReset));
}
Esempio n. 24
0
  void LrDpleInternal::init() {

    const_dim_ = getOption("const_dim");
    pos_def_ = getOption("pos_def");
    error_unstable_ = getOption("error_unstable");
    eps_unstable_ = getOption("eps_unstable");
    Hs_ = getOption("Hs");

    casadi_assert(const_dim_);

    A_ = st_[LR_Dple_STRUCT_A];
    casadi_assert(A_.size()>0);
    int n = A_[0].size1();

    V_ = st_[LR_Dple_STRUCT_V];
    C_ = st_[LR_Dple_STRUCT_C];
    H_ = st_[LR_Dple_STRUCT_H];

    with_H_ = true;

    if (H_.empty()) {
      with_H_ = false;
    }



    with_C_ = true;
    if (C_.empty()) {
      with_C_ = false;
    }

    if (with_H_) {
      casadi_assert_message(A_.size()==H_.size(),
                          "A and H arguments must be of same length, but got "
                          << A_.size() << " and " << H_.size() << ".");
      casadi_assert_message(H_.size()==Hs_.size(),
                          "H and Hs arguments must be of same length, but got "
                          << H_.size() << " and " << Hs_.size() << ".");

      Hsi_.resize(1, 0);
      for (int k=0;k<H_.size();++k) {

        // Default hs: [H.size2()]
        if (Hs_[k].size()==0) {
          Hs_[k].push_back(H_[k].size2());
        }

        // Assert that sum of Hs entries match up to H.size2()
        double sum = 0;
        for (int i=0;i<Hs_[k].size();++i) {
          sum += Hs_[k][i];
        }

        Hsi_.push_back(Hsi_.back()+sum);

        casadi_assert_message(H_[k].size2()==sum,
                       "Number of columns in H @ k = " << k << "  (" << H_[k].size2() << "),"
                       << "must match sum(Hs[k]): " << sum << ".");
      }

    }


    // Dimension sanity checks
    casadi_assert_message(A_.size()==V_.size(), "A and V arguments must be of same length, but got "
                          << A_.size() << " and " << V_.size() << ".");
    K_ = A_.size();

    int m = with_C_? V_[0].size1(): n;

    for (int k=0;k<K_;++k) {
      casadi_assert_message(V_[k].issymmetric(), "V_i must be symmetric but got "
                            << V_[k].dimString() << " for i = " << k << ".");
      if (with_C_) {
        casadi_assert_message(n==C_[k].size1(), "Number of rows in C ("
                              << C_[k].size1() << ") must match dimension of square A @ k = "
                              << k << " (" << n << ")" << ".");
        casadi_assert_message(m==C_[k].size2(), "Number of columns in C ("
                              << C_[k].size2() << ") must match dimension of symmetric V @ k = "
                              << k << " (" << m << ")" << ".");
      } else {
          casadi_assert_message(A_[k].size1()==V_[k].size1(), "First dimension of A ("
                            << A_[k].size1() << ") must match dimension of symmetric V @ k = "
                            << k << " (" << V_[k].size1() << ")" << ".");
      }
    }



    if (const_dim_) {
      int n = A_[0].size1();
       for (int k=1;k<K_;++k) {
         casadi_assert_message(A_[k].size1()==n, "You have set const_dim option, but found "
                               "an A_i with dimension ( " << A_[k].dimString()
                               << " ) deviating from n = " << n << " at i = " << k << ".");
      }
    }

    Hss_.resize(0);

    if (Hs_.size()>0) {
      for (int k=0;k<K_;++k) {
        Hss_.insert(Hss_.end(), Hs_[k].begin(), Hs_[k].end());
      }
    }

    // Allocate inputs
    ibuf_.resize(LR_DPLE_NUM_IN);

    if (const_dim_) {
      input(LR_DPLE_A)  = DMatrix::zeros(horzcat(A_));

      if (with_C_) {
        input(LR_DPLE_C)  = DMatrix::zeros(horzcat(C_));
      }
      input(LR_DPLE_V)  = DMatrix::zeros(horzcat(V_));
      if (with_H_) {
        input(LR_DPLE_H)  = DMatrix::zeros(horzcat(H_));
      }
    } else {
      input(LR_DPLE_A)  = DMatrix::zeros(diagcat(A_));
    }

    /**for (int i=0;i<nrhs_;++i) {
      if (const_dim_) {
        input(1+i)  = DMatrix::zeros(horzcat(V_));
      } else {
        input(1+i)  = DMatrix::zeros(diagcat(V_));
      }
    }*/

    // Allocate outputs
    std::map<std::string, std::vector<Sparsity> > tmp;
    tmp["a"] = st_[LR_Dple_STRUCT_A];
    tmp["v"] = st_[LR_Dple_STRUCT_V];
    tmp["c"] = st_[LR_Dple_STRUCT_C];
    tmp["h"] = st_[LR_Dple_STRUCT_H];
    std::vector<Sparsity> P = getSparsity(tmp, Hs_);
    obuf_.resize(nrhs_*LR_DPLE_NUM_OUT);
    for (int i=0;i<nrhs_;++i) {
      if (const_dim_) {
        output(i) = DMatrix::zeros(horzcat(P));
      } else {
        output(i) = DMatrix::zeros(diagcat(P));
      }
    }

    FunctionInternal::init();

  }
Esempio n. 25
0
Matrix& LASSO::train(Matrix& X, Matrix& Y, Options& options) {

	int p = size(X, 2);
	int ny = size(Y, 2);
	double epsilon = options.epsilon;
	int maxIter = options.maxIter;
	double lambda = options.lambda;
	bool calc_OV = options.calc_OV;
	bool verbose = options.verbose;

	/*XNX = [X, -X];
		H_G = XNX' * XNX;
		D = repmat(diag(H_G), [1, n_y]);
		XNXTY = XNX' * Y;
	    A = (X' * X + lambda  * eye(p)) \ (X' * Y);*/

	Matrix& XNX = horzcat(2, &X, &uminus(X));
	Matrix& H_G = XNX.transpose().mtimes(XNX);
	double* Q = new double[size(H_G, 1)];
	for (int i = 0; i < size(H_G, 1); i++) {
		Q[i] = H_G.getEntry(i, i);
	}
	Matrix& XNXTY = XNX.transpose().mtimes(Y);
	Matrix& A = mldivide(
			plus(X.transpose().mtimes(X), times(lambda, eye(p))),
			X.transpose().mtimes(Y)
	);

	/*AA = [subplus(A); subplus(-A)];
		C = -XNXTY + lambda;
		Grad = C + H_G * AA;
		tol = epsilon * norm(Grad);
		PGrad = zeros(size(Grad));*/

	Matrix& AA = vertcat(2, &subplus(A), &subplus(uminus(A)));
	Matrix& C = plus(uminus(XNXTY), lambda);
	Matrix& Grad = plus(C, mtimes(H_G, AA));
	double tol = epsilon * norm(Grad);
	Matrix& PGrad = zeros(size(Grad));

	std::list<double> J;
	double fval = 0;
	// J(1) = sum(sum((Y - X * A).^2)) / 2 + lambda * sum(sum(abs(A)));
	if (calc_OV) {
		fval = sum(sum(pow(minus(Y, mtimes(X, A)), 2))) / 2 +
				lambda * sum(sum(abs(A)));
		J.push_back(fval);
	}

	Matrix& I_k = Grad.copy();
	double d = 0;
	int k = 0;

	DenseVector& SFPlusCi = *new DenseVector(AA.getColumnDimension());
	Matrix& S = H_G;
	Vector** SRows = null;
	if (typeid(H_G) == typeid(DenseMatrix))
		SRows = denseMatrix2DenseRowVectors(S);
	else
		SRows = sparseMatrix2SparseRowVectors(S);

	Vector** CRows = null;
	if (typeid(C) == typeid(DenseMatrix))
		CRows = denseMatrix2DenseRowVectors(C);
	else
		CRows = sparseMatrix2SparseRowVectors(C);

	double** FData = ((DenseMatrix&) AA).getData();
	double* FRow = null;
	double* pr = null;
	int K = 2 * p;

	while (true) {

		/*I_k = Grad < 0 | AA > 0;
		    I_k_com = not(I_k);
		    PGrad(I_k) = Grad(I_k);
		    PGrad(I_k_com) = 0;*/

		_or(I_k, lt(Grad, 0), gt(AA, 0));
		Matrix& I_k_com = _not(I_k);
		assign(PGrad, Grad);
		logicalIndexingAssignment(PGrad, I_k_com, 0);

		d = norm(PGrad, inf);
		if (d < tol) {
			if (verbose)
				println("Converge successfully!");
			break;
		}

		/*for i = 1:2*p
		            AA(i, :) = max(AA(i, :) - (C(i, :) + H_G(i, :) * AA) ./ (D(i, :)), 0);
		    end
		    A = AA(1:p,:) - AA(p+1:end,:);*/

		for (int i = 0; i < K; i++) {
			// SFPlusCi = SRows[i].operate(AA);
			operate(SFPlusCi, *SRows[i], AA);
			plusAssign(SFPlusCi, *CRows[i]);
			timesAssign(SFPlusCi, 1 / Q[i]);
			pr = SFPlusCi.getPr();
			// F(i, :) = max(F(i, :) - (S(i, :) * F + C(i, :)) / D[i]), 0);
			// F(i, :) = max(F(i, :) - SFPlusCi, 0)
			FRow = FData[i];
			for (int j = 0; j < AA.getColumnDimension(); j++) {
				FRow[j] = max(FRow[j] - pr[j], 0);
			}
		}

		// Grad = plus(C, mtimes(H_G, AA));
		plus(Grad, C, mtimes(H_G, AA));

		k = k + 1;
		if (k > maxIter) {
			if (verbose)
				println("Maximal iterations");
			break;
		}

		if (calc_OV) {
			fval = sum(sum(pow(minus(Y, mtimes(XNX, AA)), 2))) / 2 +
					lambda * sum(sum(abs(AA)));
			J.push_back(fval);
		}

		if (k % 10 == 0 && verbose) {
			if (calc_OV)
				fprintf("Iter %d - ||PGrad||: %f, ofv: %f\n", k, d, J.back());
			else
				fprintf("Iter %d - ||PGrad||: %f\n", k, d);

		}

	}

	Matrix& res = minus(
			AA.getSubMatrix(0, p - 1, 0, ny - 1),
			AA.getSubMatrix(p, 2 * p - 1, 0, ny - 1)
	);

	return res;

}