Ejemplo n.º 1
0
  void Sqpmethod::eval_g(const std::vector<double>& x, std::vector<double>& g) {
    try {
      double time1 = clock();

      // Quick return if no constraints
      if (ng_==0) return;

      // Pass the argument to the function
      nlp_.setInputNZ(x, NL_X);
      nlp_.setInput(input(NLP_SOLVER_P), NL_P);

      // Evaluate the function and tape
      nlp_.evaluate();

      // Ge the result
      nlp_.output(NL_G).get(g);

      // Printing
      if (monitored("eval_g")) {
        userOut() << "x = " << nlp_.input(NL_X) << endl;
        userOut() << "g = " << nlp_.output(NL_G) << endl;
      }

      double time2 = clock();
      t_eval_g_ += (time2-time1)/CLOCKS_PER_SEC;
      n_eval_g_ += 1;
    } catch(exception& ex) {
      userOut<true, PL_WARN>() << "eval_g failed: " << ex.what() << endl;
      throw;
    }
  }
Ejemplo n.º 2
0
  void Sqpmethod::eval_f(const std::vector<double>& x, double& f) {
    try {
       // Log time
      double time1 = clock();

      // Pass the argument to the function
      nlp_.setInputNZ(x, NL_X);
      nlp_.setInput(input(NLP_SOLVER_P), NL_P);

      // Evaluate the function
      nlp_.evaluate();

      // Get the result
      nlp_.getOutput(f, NL_F);

      // Printing
      if (monitored("eval_f")) {
        userOut() << "x = " << nlp_.input(NL_X) << endl;
        userOut() << "f = " << f << endl;
      }
      double time2 = clock();
      t_eval_f_ += (time2-time1)/CLOCKS_PER_SEC;
      n_eval_f_ += 1;

    } catch(exception& ex) {
      userOut<true, PL_WARN>() << "eval_f failed: " << ex.what() << endl;
      throw;
    }
  }
Ejemplo n.º 3
0
  void SXFunctionInternal::allocOpenCL() {
    // OpenCL return flag
    cl_int ret;

    // Generate the kernel source code
    stringstream ss;

    // Add kernel prefix
    ss << "__kernel ";

    // Generate the function
    CodeGenerator gen;
    generateFunction(ss, "evaluate", "__global const double*", "__global double*", "double", gen);

    // Form c-string
    std::string s = ss.str();
    if (verbose()) {
      userOut() << "Kernel source code for numerical evaluation:" << endl;
      userOut() << " ***** " << endl;
      userOut() << s;
      userOut() << " ***** " << endl;
    }
    const char* cstr = s.c_str();

    // Parse kernel source code
    program_ = clCreateProgramWithSource(sparsity_propagation_kernel_.context, 1,
                                         static_cast<const char **>(&cstr), 0, &ret);
    casadi_assert(ret == CL_SUCCESS);
    casadi_assert(program_ != 0);

    // Build Kernel Program
    compileProgram(program_);

    // Create OpenCL kernel for forward propatation
    kernel_ = clCreateKernel(program_, "evaluate", &ret);
    casadi_assert(ret == CL_SUCCESS);

    // Memory buffer for each of the input arrays
    input_memobj_.resize(nIn(), static_cast<cl_mem>(0));
    for (int i=0; i<input_memobj_.size(); ++i) {
      input_memobj_[i] = clCreateBuffer(sparsity_propagation_kernel_.context,
                                        CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR,
                                        inputNoCheck(i).size() * sizeof(cl_double),
                                        static_cast<void*>(inputNoCheck(i).ptr()), &ret);
      casadi_assert(ret == CL_SUCCESS);
    }

    // Memory buffer for each of the output arrays
    output_memobj_.resize(nOut(), static_cast<cl_mem>(0));
    for (int i=0; i<output_memobj_.size(); ++i) {
      output_memobj_[i] = clCreateBuffer(sparsity_propagation_kernel_.context,
                                         CL_MEM_WRITE_ONLY | CL_MEM_USE_HOST_PTR,
                                         outputNoCheck(i).size() * sizeof(cl_double),
                                         static_cast<void*>(outputNoCheck(i).ptr()), &ret);
      casadi_assert(ret == CL_SUCCESS);
    }


  }
Ejemplo n.º 4
0
  void LapackLuDense::prepare() {
    double time_start=0;
    if (CasadiOptions::profiling && CasadiOptions::profilingBinary) {
      time_start = getRealTime(); // Start timer
      profileWriteEntry(CasadiOptions::profilingLog, this);
    }
    prepared_ = false;

    // Get the elements of the matrix, dense format
    input(0).get(mat_);

    if (equilibriate_) {
      // Calculate the col and row scaling factors
      double colcnd, rowcnd; // ratio of the smallest to the largest col/row scaling factor
      double amax; // absolute value of the largest matrix element
      int info = -100;
      dgeequ_(&ncol_, &nrow_, getPtr(mat_), &ncol_, getPtr(r_),
              getPtr(c_), &colcnd, &rowcnd, &amax, &info);
      if (info < 0)
          throw CasadiException("LapackQrDense::prepare: "
                                "dgeequ_ failed to calculate the scaling factors");
      if (info>0) {
        stringstream ss;
        ss << "LapackLuDense::prepare: ";
        if (info<=ncol_)  ss << (info-1) << "-th row (zero-based) is exactly zero";
        else             ss << (info-1-ncol_) << "-th col (zero-based) is exactly zero";

        userOut() << "Warning: " << ss.str() << endl;



        if (allow_equilibration_failure_)  userOut() << "Warning: " << ss.str() << endl;
        else                              casadi_error(ss.str());
      }

      // Equilibrate the matrix if scaling was successful
      if (info!=0)
        dlaqge_(&ncol_, &nrow_, getPtr(mat_), &ncol_, getPtr(r_), getPtr(c_),
                &colcnd, &rowcnd, &amax, &equed_);
      else
        equed_ = 'N';
    }

    // Factorize the matrix
    int info = -100;
    dgetrf_(&ncol_, &ncol_, getPtr(mat_), &ncol_, getPtr(ipiv_), &info);
    if (info != 0) throw CasadiException("LapackLuDense::prepare: "
                                         "dgetrf_ failed to factorize the Jacobian");

    // Success if reached this point
    prepared_ = true;

    if (CasadiOptions::profiling && CasadiOptions::profilingBinary) {
      double time_stop = getRealTime(); // Stop timer
      profileWriteTime(CasadiOptions::profilingLog, this, 0, time_stop-time_start,
                       time_stop-time_start);
      profileWriteExit(CasadiOptions::profilingLog, this, time_stop-time_start);
    }
  }
Ejemplo n.º 5
0
  void Sqpmethod::reset_h() {
    // Initial Hessian approximation of BFGS
    if (!exact_hessian_) {
      Bk_.set(B_init_);
    }

    if (monitored("eval_h")) {
      userOut() << "x = " << x_ << endl;
      userOut() << "H = " << endl;
      Bk_.printSparse();
    }
  }
Ejemplo n.º 6
0
  void SXFunctionInternal::evalSX(const SXElement** arg, SXElement** res,
                                  int* iw, SXElement* w) {
    if (verbose()) userOut() << "SXFunctionInternal::evalSXsparse begin" << endl;

    // Iterator to the binary operations
    vector<SXElement>::const_iterator b_it=operations_.begin();

    // Iterator to stack of constants
    vector<SXElement>::const_iterator c_it = constants_.begin();

    // Iterator to free variables
    vector<SXElement>::const_iterator p_it = free_vars_.begin();

    // Evaluate algorithm
    if (verbose()) {
      userOut() << "SXFunctionInternal::evalSXsparse evaluating algorithm forward" << endl;
    }
    for (vector<AlgEl>::const_iterator it = algorithm_.begin(); it!=algorithm_.end(); ++it) {
      switch (it->op) {
      case OP_INPUT:
        w[it->i0] = arg[it->i1]==0 ? 0 : arg[it->i1][it->i2];
        break;
      case OP_OUTPUT:
        if (res[it->i0]!=0) res[it->i0][it->i2] = w[it->i1];
        break;
      case OP_CONST:
        w[it->i0] = *c_it++;
        break;
      case OP_PARAMETER:
        w[it->i0] = *p_it++; break;
      default:
        {
          // Evaluate the function to a temporary value
          // (as it might overwrite the children in the work vector)
          SXElement f;
          switch (it->op) {
            CASADI_MATH_FUN_BUILTIN(w[it->i1], w[it->i2], f)
          }

          // If this new expression is identical to the expression used
          // to define the algorithm, then reuse
          const int depth = 2; // NOTE: a higher depth could possibly give more savings
          f.assignIfDuplicate(*b_it++, depth);

          // Finally save the function value
          w[it->i0] = f;
        }
      }
    }
    if (verbose()) userOut() << "SXFunctionInternal::evalSX end" << endl;
  }
Ejemplo n.º 7
0
  void CSparseCholeskyInternal::prepare() {

    prepared_ = false;

    // Get a reference to the nonzeros of the linear system
    const vector<double>& linsys_nz = input().data();

    // Make sure that all entries of the linear system are valid
    for (int k=0; k<linsys_nz.size(); ++k) {
      casadi_assert_message(!isnan(linsys_nz[k]), "Nonzero " << k << " is not-a-number");
      casadi_assert_message(!isinf(linsys_nz[k]), "Nonzero " << k << " is infinite");
    }

    if (verbose()) {
      userOut() << "CSparseCholeskyInternal::prepare: numeric factorization" << endl;
      userOut() << "linear system to be factorized = " << endl;
      input(0).printSparse();
    }

    if (L_) cs_nfree(L_);
    L_ = cs_chol(&AT_, S_) ;                 // numeric Cholesky factorization
    if (L_==0) {
      DMatrix temp = input();
      temp.makeSparse();
      if (temp.sparsity().issingular()) {
        stringstream ss;
        ss << "CSparseCholeskyInternal::prepare: factorization failed due "
          "to matrix being singular. Matrix contains numerical zeros which are"
          " structurally non-zero. Promoting these zeros to be structural "
          "zeros, the matrix was found to be structurally rank deficient. "
          "sprank: " << sprank(temp.sparsity()) << " <-> " << temp.size2() << endl;
        if (verbose()) {
          ss << "Sparsity of the linear system: " << endl;
          input(LINSOL_A).sparsity().print(ss); // print detailed
        }
        throw CasadiException(ss.str());
      } else {
        stringstream ss;
        ss << "CSparseCholeskyInternal::prepare: factorization failed, "
            "check if Jacobian is singular" << endl;
        if (verbose()) {
          ss << "Sparsity of the linear system: " << endl;
          input(LINSOL_A).sparsity().print(ss); // print detailed
        }
        throw CasadiException(ss.str());
      }
    }
    casadi_assert(L_!=0);

    prepared_ = true;
  }
Ejemplo n.º 8
0
  void CSparseCholeskyInternal::init() {
    // Call the init method of the base class
    LinearSolverInternal::init();

    AT_.nzmax = input().nnz();  // maximum number of entries
    AT_.m = input().size1(); // number of cols
    AT_.n = input().size2(); // number of rows
    AT_.p = const_cast<int*>(input().colind()); // row pointers (size n+1)
                                                         // or row indices (size nzmax)
    AT_.i = const_cast<int*>(input().row()); // col indices, size nzmax
    AT_.x = &input().front(); // col indices, size nzmax
    AT_.nz = -1; // of entries in triplet matrix, -1 for compressed-row

    // Temporary
    temp_.resize(AT_.n);

    if (verbose()) {
      userOut() << "CSparseCholeskyInternal::prepare: symbolic factorization" << endl;
    }

    // ordering and symbolic analysis
    int order = 0; // ordering?
    if (S_) cs_sfree(S_);
    S_ = cs_schol(order, &AT_) ;
  }
 void SimpleIndefDleInternal::evaluate() {
   userOut() << "eval" << std::endl;
   input(DLE_A).printDense();
   for (int i=0;i<nIn();++i) {
     std::copy(input(i).begin(), input(i).end(), f_.input(i).begin());
   }
   f_.evaluate();
   for (int i=0;i<nOut();++i) {
     std::copy(f_.output(i).begin(), f_.output(i).end(), output(i).begin());
   }
 }
Ejemplo n.º 10
0
  void Sqpmethod::solve_QP(const Matrix<double>& H, const std::vector<double>& g,
                             const std::vector<double>& lbx, const std::vector<double>& ubx,
                             const Matrix<double>& A, const std::vector<double>& lbA,
                             const std::vector<double>& ubA,
                             std::vector<double>& x_opt, std::vector<double>& lambda_x_opt,
                             std::vector<double>& lambda_A_opt) {

    // Pass data to QP solver
    qp_solver_.setInput(H, QP_SOLVER_H);
    qp_solver_.setInputNZ(g, QP_SOLVER_G);

    // Hot-starting if possible
    qp_solver_.setInputNZ(x_opt, QP_SOLVER_X0);

    //TODO(Joel): Fix hot-starting of dual variables
    //qp_solver_.setInput(lambda_A_opt, QP_SOLVER_LAMBDA_INIT);

    // Pass simple bounds
    qp_solver_.setInputNZ(lbx, QP_SOLVER_LBX);
    qp_solver_.setInputNZ(ubx, QP_SOLVER_UBX);

    // Pass linear bounds
    if (ng_>0) {
      qp_solver_.setInput(A, QP_SOLVER_A);
      qp_solver_.setInputNZ(lbA, QP_SOLVER_LBA);
      qp_solver_.setInputNZ(ubA, QP_SOLVER_UBA);
    }

    if (monitored("qp")) {
      userOut() << "H = " << endl;
      H.printDense();
      userOut() << "A = " << endl;
      A.printDense();
      userOut() << "g = " << g << endl;
      userOut() << "lbx = " << lbx << endl;
      userOut() << "ubx = " << ubx << endl;
      userOut() << "lbA = " << lbA << endl;
      userOut() << "ubA = " << ubA << endl;
    }

    // Solve the QP
    qp_solver_.evaluate();

    // Get the optimal solution
    qp_solver_.getOutputNZ(x_opt, QP_SOLVER_X);
    qp_solver_.getOutputNZ(lambda_x_opt, QP_SOLVER_LAM_X);
    qp_solver_.getOutputNZ(lambda_A_opt, QP_SOLVER_LAM_A);
    if (monitored("dx")) {
      userOut() << "dx = " << x_opt << endl;
    }
  }
Ejemplo n.º 11
0
  XmlNode TinyXmlInterface::addNode(TiXmlNode* n) {
    if (!n) throw CasadiException("Error in TinyXmlInterface::addNode: Node is 0");
    XmlNode ret;

    // Save name
    ret.setName(n->Value());

    // Save attributes
    int type = n->Type();
    if (type == TiXmlNode::TINYXML_ELEMENT) {
      if (n->ToElement()!=0) {
        for (TiXmlAttribute* pAttrib=n->ToElement()->FirstAttribute();
             pAttrib;
             pAttrib=pAttrib->Next()) {
          ret.setAttribute(pAttrib->Name(), pAttrib->Value());
        }
      }
    } else if (type == TiXmlNode::TINYXML_DOCUMENT) {
      // do nothing
    } else {
      throw CasadiException("TinyXmlInterface::addNode");
    }

    // Count the number of children
    int num_children = 0;
    for (TiXmlNode* child = n->FirstChild(); child != 0; child= child->NextSibling()) {
      num_children++;
    }
    ret.children_.reserve(num_children);

    // add children
    int ch = 0;
    for (TiXmlNode* child = n->FirstChild(); child != 0; child= child->NextSibling(), ++ch) {
      int childtype = child->Type();

      if (childtype == TiXmlNode::TINYXML_ELEMENT) {
        XmlNode newnode = addNode(child);
        ret.children_.push_back(newnode);
        ret.child_indices_[newnode.getName()] = ch;
      } else if (childtype == TiXmlNode::TINYXML_COMMENT) {
        ret.comment_ = child->Value();
      } else if (childtype == TiXmlNode::TINYXML_TEXT) {
        ret.text_ = child->ToText()->Value();
      } else if (childtype == TiXmlNode::TINYXML_DECLARATION) {
        userOut() << "Warning: Skipped TiXmlNode::TINYXML_DECLARATION" << endl;
      } else {
        throw CasadiException("Error in TinyXmlInterface::addNode: Unknown node type");
      }
    }

    // Note: Return value optimization
    return ret;
  }
Ejemplo n.º 12
0
  void Sqpmethod::eval_grad_f(const std::vector<double>& x, double& f,
                                std::vector<double>& grad_f) {
    try {
      double time1 = clock();

      // Get function
      Function& gradF = this->gradF();

      // Pass the argument to the function
      gradF.setInputNZ(x, NL_X);
      gradF.setInput(input(NLP_SOLVER_P), NL_P);

      // Evaluate, adjoint mode
      gradF.evaluate();

      // Get the result
      gradF.output().get(grad_f);
      gradF.output(1+NL_X).get(f);

      // Printing
      if (monitored("eval_f")) {
        userOut() << "x = " << x << endl;
        userOut() << "f = " << f << endl;
      }

      if (monitored("eval_grad_f")) {
        userOut() << "x      = " << x << endl;
        userOut() << "grad_f = " << grad_f << endl;
      }
      double time2 = clock();
      t_eval_grad_f_ += (time2-time1)/CLOCKS_PER_SEC;
      n_eval_grad_f_ += 1;

    } catch(exception& ex) {
      userOut<true, PL_WARN>() << "eval_grad_f failed: " << ex.what() << endl;
      throw;
    }
  }
Ejemplo n.º 13
0
  void Sqpmethod::eval_jac_g(const std::vector<double>& x, std::vector<double>& g,
                               Matrix<double>& J) {
    try {
      double time1 = clock();

      // Quich finish if no constraints
      if (ng_==0) return;

      // Get function
      Function& jacG = this->jacG();

      // Pass the argument to the function
      jacG.setInputNZ(x, NL_X);
      jacG.setInput(input(NLP_SOLVER_P), NL_P);

      // Evaluate the function
      jacG.evaluate();

      // Get the output
      jacG.output(1+NL_G).get(g);
      jacG.output().get(J);

      if (monitored("eval_jac_g")) {
        userOut() << "x = " << x << endl;
        userOut() << "g = " << g << endl;
        userOut() << "J = " << endl;
        J.printSparse();
      }

      double time2 = clock();
      t_eval_jac_g_ += (time2-time1)/CLOCKS_PER_SEC;
      n_eval_jac_g_ += 1;

    } catch(exception& ex) {
      userOut<true, PL_WARN>() << "eval_jac_g failed: " << ex.what() << endl;
      throw;
    }
  }
Ejemplo n.º 14
0
  void Sqpmethod::eval_h(const std::vector<double>& x, const std::vector<double>& lambda,
                           double sigma, Matrix<double>& H) {
    try {
      // Get function
      Function& hessLag = this->hessLag();

      // Pass the argument to the function
      hessLag.setInputNZ(x, HESSLAG_X);
      hessLag.setInput(input(NLP_SOLVER_P), HESSLAG_P);
      hessLag.setInput(sigma, HESSLAG_LAM_F);
      hessLag.setInputNZ(lambda, HESSLAG_LAM_G);

      // Evaluate
      hessLag.evaluate();

      // Get results
      hessLag.getOutput(H);

      if (monitored("eval_h")) {
        userOut() << "x = " << x << endl;
        userOut() << "H = " << endl;
        H.printSparse();
      }

      // Determing regularization parameter with Gershgorin theorem
      if (regularize_) {
        reg_ = getRegularization(H);
        if (reg_ > 0) {
          regularize(H, reg_);
        }
      }

    } catch(exception& ex) {
      userOut<true, PL_WARN>() << "eval_h failed: " << ex.what() << endl;
      throw;
    }
  }
Ejemplo n.º 15
0
  SX SXFunctionInternal::hess(int iind, int oind) {
    casadi_assert_message(output(oind).numel() == 1, "Function must be scalar");
    SX g = grad(iind, oind);
    g.makeDense();
    if (verbose())  userOut() << "SXFunctionInternal::hess: calculating gradient done " << endl;

    // Create function
    Dict opts;
    opts["verbose"] = getOption("verbose");
    SXFunction gfcn("gfcn", make_vector(inputv_.at(iind)),
                    make_vector(g), opts);

    // Calculate jacobian of gradient
    if (verbose()) {
      userOut() << "SXFunctionInternal::hess: calculating Jacobian " << endl;
    }
    SX ret = gfcn.jac(0, 0, false, true);
    if (verbose()) {
      userOut() << "SXFunctionInternal::hess: calculating Jacobian done" << endl;
    }

    // Return jacobian of the gradient
    return ret;
  }
Ejemplo n.º 16
0
  void Conic::print_fstats(const ConicMemory* m) const {

    size_t maxNameLen=0;

    // Retrieve all qp keys
    std::vector<std::string> keys;
    for (auto &&s : m->fstats) {
      maxNameLen = max(s.first.size(), maxNameLen);
      keys.push_back(s.first);
    }

    // Print header
    std::stringstream s;
    std::string blankName(maxNameLen, ' ');
    s
      << blankName
      << "      proc           wall      num           mean             mean"
      << endl << blankName
      << "      time           time     evals       proc time        wall time";
    userOut() << s.str() << endl;

    std::sort(keys.begin(), keys.end());
    for (auto k : keys) {
      const FStats& fs = m->fstats.at(k);
      print_stats_line(maxNameLen, k, fs.n_call, fs.t_proc, fs.t_wall);
    }

    // Sum the previously printed stats
    double t_wall_all_previous = 0;
    double t_proc_all_previous = 0;
    for (auto k : keys) {
      const FStats& fs = m->fstats.at(k);
      t_proc_all_previous += fs.t_proc;
      t_wall_all_previous += fs.t_wall;
    }
    print_stats_line(maxNameLen, "all previous", -1, t_proc_all_previous, t_wall_all_previous);

  }
Ejemplo n.º 17
0
  void SXFunctionInternal::init() {

    // Call the init function of the base class
    XFunctionInternal<SXFunction, SXFunctionInternal, SX, SXNode>::init();

    // Stack used to sort the computational graph
    stack<SXNode*> s;

    // All nodes
    vector<SXNode*> nodes;

    // Add the list of nodes
    int ind=0;
    for (vector<SX >::iterator it = outputv_.begin(); it != outputv_.end(); ++it, ++ind) {
      int nz=0;
      for (vector<SXElement>::iterator itc = it->begin(); itc != it->end(); ++itc, ++nz) {
        // Add outputs to the list
        s.push(itc->get());
        sort_depth_first(s, nodes);

        // A null pointer means an output instruction
        nodes.push_back(static_cast<SXNode*>(0));
      }
    }

    // Set the temporary variables to be the corresponding place in the sorted graph
    for (int i=0; i<nodes.size(); ++i) {
      if (nodes[i]) {
        nodes[i]->temp = i;
      }
    }

    // Sort the nodes by type
    constants_.clear();
    operations_.clear();
    for (vector<SXNode*>::iterator it = nodes.begin(); it != nodes.end(); ++it) {
      SXNode* t = *it;
      if (t) {
        if (t->isConstant())
          constants_.push_back(SXElement::create(t));
        else if (!t->isSymbolic())
          operations_.push_back(SXElement::create(t));
      }
    }

    // Use live variables?
    bool live_variables = getOption("live_variables");

    // Input instructions
    vector<pair<int, SXNode*> > symb_loc;

    // Current output and nonzero, start with the first one
    int curr_oind, curr_nz=0;
    for (curr_oind=0; curr_oind<outputv_.size(); ++curr_oind) {
      if (outputv_[curr_oind].nnz()!=0) {
        break;
      }
    }

    // Count the number of times each node is used
    vector<int> refcount(nodes.size(), 0);

    // Get the sequence of instructions for the virtual machine
    algorithm_.resize(0);
    algorithm_.reserve(nodes.size());
    for (vector<SXNode*>::iterator it=nodes.begin(); it!=nodes.end(); ++it) {
      // Current node
      SXNode* n = *it;

      // New element in the algorithm
      AlgEl ae;

      // Get operation
      ae.op = n==0 ? OP_OUTPUT : n->getOp();

      // Get instruction
      switch (ae.op) {
      case OP_CONST: // constant
        ae.d = n->getValue();
        ae.i0 = n->temp;
        break;
      case OP_PARAMETER: // a parameter or input
        symb_loc.push_back(make_pair(algorithm_.size(), n));
        ae.i0 = n->temp;
        break;
      case OP_OUTPUT: // output instruction
        ae.i0 = curr_oind;
        ae.i1 = outputv_[curr_oind].at(curr_nz)->temp;
        ae.i2 = curr_nz;

        // Go to the next nonzero
        curr_nz++;
        if (curr_nz>=outputv_[curr_oind].nnz()) {
          curr_nz=0;
          curr_oind++;
          for (; curr_oind<outputv_.size(); ++curr_oind) {
            if (outputv_[curr_oind].nnz()!=0) {
              break;
            }
          }
        }
        break;
      default:       // Unary or binary operation
        ae.i0 = n->temp;
        ae.i1 = n->dep(0).get()->temp;
        ae.i2 = n->dep(1).get()->temp;
      }

      // Number of dependencies
      int ndeps = casadi_math<double>::ndeps(ae.op);

      // Increase count of dependencies
      for (int c=0; c<ndeps; ++c) {
        refcount.at(c==0 ? ae.i1 : ae.i2)++;
      }
      // Add to algorithm
      algorithm_.push_back(ae);
    }

    // Place in the work vector for each of the nodes in the tree (overwrites the reference counter)
    vector<int> place(nodes.size());

    // Stack with unused elements in the work vector
    stack<int> unused;

    // Work vector size
    size_t worksize = 0;

    // Find a place in the work vector for the operation
    for (vector<AlgEl>::iterator it=algorithm_.begin(); it!=algorithm_.end(); ++it) {

      // Number of dependencies
      int ndeps = casadi_math<double>::ndeps(it->op);

      // decrease reference count of children
      // reverse order so that the first argument will end up at the top of the stack
      for (int c=ndeps-1; c>=0; --c) {
        int ch_ind = c==0 ? it->i1 : it->i2;
        int remaining = --refcount.at(ch_ind);
        if (remaining==0) unused.push(place[ch_ind]);
      }

      // Find a place to store the variable
      if (it->op!=OP_OUTPUT) {
        if (live_variables && !unused.empty()) {
          // Try to reuse a variable from the stack if possible (last in, first out)
          it->i0 = place[it->i0] = unused.top();
          unused.pop();
        } else {
          // Allocate a new variable
          it->i0 = place[it->i0] = worksize++;
        }
      }

      // Save the location of the children
      for (int c=0; c<ndeps; ++c) {
        if (c==0) {
          it->i1 = place[it->i1];
        } else {
          it->i2 = place[it->i2];
        }
      }

      // If binary, make sure that the second argument is the same as the first one
      // (in order to treat all operations as binary) NOTE: ugly
      if (ndeps==1 && it->op!=OP_OUTPUT) {
        it->i2 = it->i1;
      }
    }

    if (verbose()) {
      if (live_variables) {
        userOut() << "Using live variables: work array is "
             <<  worksize << " instead of " << nodes.size() << endl;
      } else {
        userOut() << "Live variables disabled." << endl;
      }
    }

    // Allocate work vectors (symbolic/numeric)
    alloc_w(worksize);
    alloc();
    s_work_.resize(worksize);

    // Reset the temporary variables
    for (int i=0; i<nodes.size(); ++i) {
      if (nodes[i]) {
        nodes[i]->temp = 0;
      }
    }

    // Now mark each input's place in the algorithm
    for (vector<pair<int, SXNode*> >::const_iterator it=symb_loc.begin();
         it!=symb_loc.end(); ++it) {
      it->second->temp = it->first+1;
    }

    // Add input instructions
    for (int ind=0; ind<inputv_.size(); ++ind) {
      int nz=0;
      for (vector<SXElement>::iterator itc = inputv_[ind].begin();
          itc != inputv_[ind].end();
          ++itc, ++nz) {
        int i = itc->getTemp()-1;
        if (i>=0) {
          // Mark as input
          algorithm_[i].op = OP_INPUT;

          // Location of the input
          algorithm_[i].i1 = ind;
          algorithm_[i].i2 = nz;

          // Mark input as read
          itc->setTemp(0);
        }
      }
    }

    // Locate free variables
    free_vars_.clear();
    for (vector<pair<int, SXNode*> >::const_iterator it=symb_loc.begin();
         it!=symb_loc.end(); ++it) {
      if (it->second->temp!=0) {
        // Save to list of free parameters
        free_vars_.push_back(SXElement::create(it->second));

        // Remove marker
        it->second->temp=0;
      }
    }

    // Initialize just-in-time compilation for numeric evaluation using OpenCL
    just_in_time_opencl_ = getOption("just_in_time_opencl");
    if (just_in_time_opencl_) {
#ifdef WITH_OPENCL
      freeOpenCL();
      allocOpenCL();
#else // WITH_OPENCL
      casadi_error("Option \"just_in_time_opencl\" true requires CasADi "
                   "to have been compiled with WITH_OPENCL=ON");
#endif // WITH_OPENCL
    }

    // Initialize just-in-time compilation for sparsity propagation using OpenCL
    just_in_time_sparsity_ = getOption("just_in_time_sparsity");
    if (just_in_time_sparsity_) {
#ifdef WITH_OPENCL
      spFreeOpenCL();
      spAllocOpenCL();
#else // WITH_OPENCL
      casadi_error("Option \"just_in_time_sparsity\" true requires CasADi to "
                   "have been compiled with WITH_OPENCL=ON");
#endif // WITH_OPENCL
    }

    if (CasadiOptions::profiling && CasadiOptions::profilingBinary) {

      profileWriteName(CasadiOptions::profilingLog, this, getOption("name"),
                       ProfilingData_FunctionType_SXFunction, algorithm_.size());
      int alg_counter = 0;

      // Iterator to free variables
      vector<SXElement>::const_iterator p_it = free_vars_.begin();

      std::stringstream stream;
      for (vector<AlgEl>::const_iterator it = algorithm_.begin(); it!=algorithm_.end(); ++it) {
        stream.str("");
        if (it->op==OP_OUTPUT) {
          stream << "output[" << it->i0 << "][" << it->i2 << "] = @" << it->i1;
        } else {
          stream << "@" << it->i0 << " = ";
          if (it->op==OP_INPUT) {
            stream << "input[" << it->i1 << "][" << it->i2 << "]";
          } else {
            if (it->op==OP_CONST) {
              stream << it->d;
            } else if (it->op==OP_PARAMETER) {
              stream << *p_it++;
            } else {
              int ndep = casadi_math<double>::ndeps(it->op);
              casadi_math<double>::printPre(it->op, stream);
              for (int c=0; c<ndep; ++c) {
                if (c==0) {
                  stream << "@" << it->i1;
                } else {
                  casadi_math<double>::printSep(it->op, stream);
                  stream << "@" << it->i2;
                }

              }
              casadi_math<double>::printPost(it->op, stream);
            }
          }
        }
        stream << std::endl;
        profileWriteSourceLine(CasadiOptions::profilingLog, this,
                               alg_counter++, stream.str(), it->op);
      }
    }

    // Print
    if (verbose()) {
      userOut() << "SXFunctionInternal::init Initialized " << getOption("name") << " ("
           << algorithm_.size() << " elementary operations)" << endl;
    }
  }
Ejemplo n.º 18
0
  void Sqpmethod::evaluate() {
    if (inputs_check_) checkInputs();
    checkInitialBounds();

    if (gather_stats_) {
      Dict iterations;
      iterations["inf_pr"] = std::vector<double>();
      iterations["inf_du"] = std::vector<double>();
      iterations["ls_trials"] = std::vector<double>();
      iterations["d_norm"] = std::vector<double>();
      iterations["obj"] = std::vector<double>();
      stats_["iterations"] = iterations;
    }


    // Get problem data
    const vector<double>& x_init = input(NLP_SOLVER_X0).data();
    const vector<double>& lbx = input(NLP_SOLVER_LBX).data();
    const vector<double>& ubx = input(NLP_SOLVER_UBX).data();
    const vector<double>& lbg = input(NLP_SOLVER_LBG).data();
    const vector<double>& ubg = input(NLP_SOLVER_UBG).data();

    // Set linearization point to initial guess
    copy(x_init.begin(), x_init.end(), x_.begin());

    // Initialize Lagrange multipliers of the NLP
    copy(input(NLP_SOLVER_LAM_G0).begin(), input(NLP_SOLVER_LAM_G0).end(), mu_.begin());
    copy(input(NLP_SOLVER_LAM_X0).begin(), input(NLP_SOLVER_LAM_X0).end(), mu_x_.begin());

    t_eval_f_ = t_eval_grad_f_ = t_eval_g_ = t_eval_jac_g_ = t_eval_h_ =
        t_callback_fun_ = t_callback_prepare_ = t_mainloop_ = 0;

    n_eval_f_ = n_eval_grad_f_ = n_eval_g_ = n_eval_jac_g_ = n_eval_h_ = 0;

    double time1 = clock();

    // Initial constraint Jacobian
    eval_jac_g(x_, gk_, Jk_);

    // Initial objective gradient
    eval_grad_f(x_, fk_, gf_);

    // Initialize or reset the Hessian or Hessian approximation
    reg_ = 0;
    if (exact_hessian_) {
      eval_h(x_, mu_, 1.0, Bk_);
    } else {
      reset_h();
    }

    // Evaluate the initial gradient of the Lagrangian
    copy(gf_.begin(), gf_.end(), gLag_.begin());
    if (ng_>0) casadi_mv_t(Jk_.ptr(), Jk_.sparsity(), getPtr(mu_), getPtr(gLag_));
    // gLag += mu_x_;
    transform(gLag_.begin(), gLag_.end(), mu_x_.begin(), gLag_.begin(), plus<double>());

    // Number of SQP iterations
    int iter = 0;

    // Number of line-search iterations
    int ls_iter = 0;

    // Last linesearch successfull
    bool ls_success = true;

    // Reset
    merit_mem_.clear();
    sigma_ = 0.;    // NOTE: Move this into the main optimization loop

    // Default stepsize
    double t = 0;

    // MAIN OPTIMIZATION LOOP
    while (true) {

      // Primal infeasability
      double pr_inf = primalInfeasibility(x_, lbx, ubx, gk_, lbg, ubg);

      // inf-norm of lagrange gradient
      double gLag_norminf = norm_inf(gLag_);

      // inf-norm of step
      double dx_norminf = norm_inf(dx_);

      // Print header occasionally
      if (iter % 10 == 0) printIteration(userOut());

      // Printing information about the actual iterate
      printIteration(userOut(), iter, fk_, pr_inf, gLag_norminf, dx_norminf,
                     reg_, ls_iter, ls_success);

      if (gather_stats_) {
        Dict iterations = stats_["iterations"];
        std::vector<double> tmp=iterations["inf_pr"];
        tmp.push_back(pr_inf);
        iterations["inf_pr"] = tmp;

        tmp=iterations["inf_du"];
        tmp.push_back(gLag_norminf);
        iterations["inf_du"] = tmp;

        tmp=iterations["d_norm"];
        tmp.push_back(dx_norminf);
        iterations["d_norm"] = tmp;

        std::vector<int> tmp2=iterations["ls_trials"];
        tmp2.push_back(ls_iter);
        iterations["ls_trials"] = tmp2;

        tmp=iterations["obj"];
        tmp.push_back(fk_);
        iterations["obj"] = tmp;

        stats_["iterations"] = iterations;
      }

      // Call callback function if present
      if (!callback_.isNull()) {
        double time1 = clock();

        if (!output(NLP_SOLVER_F).isempty()) output(NLP_SOLVER_F).set(fk_);
        if (!output(NLP_SOLVER_X).isempty()) output(NLP_SOLVER_X).setNZ(x_);
        if (!output(NLP_SOLVER_LAM_G).isempty()) output(NLP_SOLVER_LAM_G).setNZ(mu_);
        if (!output(NLP_SOLVER_LAM_X).isempty()) output(NLP_SOLVER_LAM_X).setNZ(mu_x_);
        if (!output(NLP_SOLVER_G).isempty()) output(NLP_SOLVER_G).setNZ(gk_);

        Dict iteration;
        iteration["iter"] = iter;
        iteration["inf_pr"] = pr_inf;
        iteration["inf_du"] = gLag_norminf;
        iteration["d_norm"] = dx_norminf;
        iteration["ls_trials"] = ls_iter;
        iteration["obj"] = fk_;
        stats_["iteration"] = iteration;

        double time2 = clock();
        t_callback_prepare_ += (time2-time1)/CLOCKS_PER_SEC;
        time1 = clock();
        int ret = callback_(ref_, user_data_);
        time2 = clock();
        t_callback_fun_ += (time2-time1)/CLOCKS_PER_SEC;
        if (ret) {
          userOut() << endl;
          userOut() << "casadi::SQPMethod: aborted by callback..." << endl;
          stats_["return_status"] = "User_Requested_Stop";
          break;
        }
      }

      // Checking convergence criteria
      if (pr_inf < tol_pr_ && gLag_norminf < tol_du_) {
        userOut() << endl;
        userOut() << "casadi::SQPMethod: Convergence achieved after "
                  << iter << " iterations." << endl;
        stats_["return_status"] = "Solve_Succeeded";
        break;
      }

      if (iter >= max_iter_) {
        userOut() << endl;
        userOut() << "casadi::SQPMethod: Maximum number of iterations reached." << endl;
        stats_["return_status"] = "Maximum_Iterations_Exceeded";
        break;
      }

      if (iter > 0 && dx_norminf <= min_step_size_) {
        userOut() << endl;
        userOut() << "casadi::SQPMethod: Search direction becomes too small without "
            "convergence criteria being met." << endl;
        stats_["return_status"] = "Search_Direction_Becomes_Too_Small";
        break;
      }

      // Start a new iteration
      iter++;

      log("Formulating QP");
      // Formulate the QP
      transform(lbx.begin(), lbx.end(), x_.begin(), qp_LBX_.begin(), minus<double>());
      transform(ubx.begin(), ubx.end(), x_.begin(), qp_UBX_.begin(), minus<double>());
      transform(lbg.begin(), lbg.end(), gk_.begin(), qp_LBA_.begin(), minus<double>());
      transform(ubg.begin(), ubg.end(), gk_.begin(), qp_UBA_.begin(), minus<double>());

      // Solve the QP
      solve_QP(Bk_, gf_, qp_LBX_, qp_UBX_, Jk_, qp_LBA_, qp_UBA_, dx_, qp_DUAL_X_, qp_DUAL_A_);
      log("QP solved");

      // Detecting indefiniteness
      double gain = casadi_quad_form(Bk_.ptr(), Bk_.sparsity(), getPtr(dx_));
      if (gain < 0) {
        casadi_warning("Indefinite Hessian detected...");
      }

      // Calculate penalty parameter of merit function
      sigma_ = std::max(sigma_, 1.01*norm_inf(qp_DUAL_X_));
      sigma_ = std::max(sigma_, 1.01*norm_inf(qp_DUAL_A_));

      // Calculate L1-merit function in the actual iterate
      double l1_infeas = primalInfeasibility(x_, lbx, ubx, gk_, lbg, ubg);

      // Right-hand side of Armijo condition
      double F_sens = inner_prod(dx_, gf_);
      double L1dir = F_sens - sigma_ * l1_infeas;
      double L1merit = fk_ + sigma_ * l1_infeas;

      // Storing the actual merit function value in a list
      merit_mem_.push_back(L1merit);
      if (merit_mem_.size() > merit_memsize_) {
        merit_mem_.pop_front();
      }
      // Stepsize
      t = 1.0;
      double fk_cand;
      // Merit function value in candidate
      double L1merit_cand = 0;

      // Reset line-search counter, success marker
      ls_iter = 0;
      ls_success = true;

      // Line-search
      log("Starting line-search");
      if (max_iter_ls_>0) { // max_iter_ls_== 0 disables line-search

        // Line-search loop
        while (true) {
          for (int i=0; i<nx_; ++i) x_cand_[i] = x_[i] + t * dx_[i];

          try {
            // Evaluating objective and constraints
            eval_f(x_cand_, fk_cand);
            eval_g(x_cand_, gk_cand_);
          } catch(const CasadiException& ex) {
            // Silent ignore; line-search failed
            ls_iter++;
            // Backtracking
            t = beta_ * t;
            continue;
          }

          ls_iter++;

          // Calculating merit-function in candidate
          l1_infeas = primalInfeasibility(x_cand_, lbx, ubx, gk_cand_, lbg, ubg);

          L1merit_cand = fk_cand + sigma_ * l1_infeas;
          // Calculating maximal merit function value so far
          double meritmax = *max_element(merit_mem_.begin(), merit_mem_.end());
          if (L1merit_cand <= meritmax + t * c1_ * L1dir) {
            // Accepting candidate
            log("Line-search completed, candidate accepted");
            break;
          }

          // Line-search not successful, but we accept it.
          if (ls_iter == max_iter_ls_) {
            ls_success = false;
            log("Line-search completed, maximum number of iterations");
            break;
          }

          // Backtracking
          t = beta_ * t;
        }

        // Candidate accepted, update dual variables
        for (int i=0; i<ng_; ++i) mu_[i] = t * qp_DUAL_A_[i] + (1 - t) * mu_[i];
        for (int i=0; i<nx_; ++i) mu_x_[i] = t * qp_DUAL_X_[i] + (1 - t) * mu_x_[i];

        // Candidate accepted, update the primal variable
        copy(x_.begin(), x_.end(), x_old_.begin());
        copy(x_cand_.begin(), x_cand_.end(), x_.begin());

      } else {
        // Full step
        copy(qp_DUAL_A_.begin(), qp_DUAL_A_.end(), mu_.begin());
        copy(qp_DUAL_X_.begin(), qp_DUAL_X_.end(), mu_x_.begin());

        copy(x_.begin(), x_.end(), x_old_.begin());
        // x+=dx
        transform(x_.begin(), x_.end(), dx_.begin(), x_.begin(), plus<double>());
      }

      if (!exact_hessian_) {
        // Evaluate the gradient of the Lagrangian with the old x but new mu (for BFGS)
        copy(gf_.begin(), gf_.end(), gLag_old_.begin());
        if (ng_>0) casadi_mv_t(Jk_.ptr(), Jk_.sparsity(), getPtr(mu_), getPtr(gLag_old_));
        // gLag_old += mu_x_;
        transform(gLag_old_.begin(), gLag_old_.end(), mu_x_.begin(), gLag_old_.begin(),
                  plus<double>());
      }

      // Evaluate the constraint Jacobian
      log("Evaluating jac_g");
      eval_jac_g(x_, gk_, Jk_);

      // Evaluate the gradient of the objective function
      log("Evaluating grad_f");
      eval_grad_f(x_, fk_, gf_);

      // Evaluate the gradient of the Lagrangian with the new x and new mu
      copy(gf_.begin(), gf_.end(), gLag_.begin());
      if (ng_>0) casadi_mv_t(Jk_.ptr(), Jk_.sparsity(), getPtr(mu_), getPtr(gLag_));

      // gLag += mu_x_;
      transform(gLag_.begin(), gLag_.end(), mu_x_.begin(), gLag_.begin(), plus<double>());

      // Updating Lagrange Hessian
      if (!exact_hessian_) {
        log("Updating Hessian (BFGS)");
        // BFGS with careful updates and restarts
        if (iter % lbfgs_memory_ == 0) {
          // Reset Hessian approximation by dropping all off-diagonal entries
          const int* colind = Bk_.colind();      // Access sparsity (column offset)
          int ncol = Bk_.size2();
          const int* row = Bk_.row();            // Access sparsity (row)
          vector<double>& data = Bk_.data();             // Access nonzero elements
          for (int cc=0; cc<ncol; ++cc) {     // Loop over the columns of the Hessian
            for (int el=colind[cc]; el<colind[cc+1]; ++el) {
              // Loop over the nonzero elements of the column
              if (cc!=row[el]) data[el] = 0;               // Remove if off-diagonal entries
            }
          }
        }

        // Pass to BFGS update function
        bfgs_.setInput(Bk_, BFGS_BK);
        bfgs_.setInputNZ(x_, BFGS_X);
        bfgs_.setInputNZ(x_old_, BFGS_X_OLD);
        bfgs_.setInputNZ(gLag_, BFGS_GLAG);
        bfgs_.setInputNZ(gLag_old_, BFGS_GLAG_OLD);

        // Update the Hessian approximation
        bfgs_.evaluate();

        // Get the updated Hessian
        bfgs_.getOutput(Bk_);
        if (monitored("bfgs")) {
          userOut() << "x = " << x_ << endl;
          userOut() << "BFGS = "  << endl;
          Bk_.printSparse();
        }
      } else {
        // Exact Hessian
        log("Evaluating hessian");
        eval_h(x_, mu_, 1.0, Bk_);
      }
    }

    double time2 = clock();
    t_mainloop_ = (time2-time1)/CLOCKS_PER_SEC;

    // Save results to outputs
    output(NLP_SOLVER_F).set(fk_);
    output(NLP_SOLVER_X).setNZ(x_);
    output(NLP_SOLVER_LAM_G).setNZ(mu_);
    output(NLP_SOLVER_LAM_X).setNZ(mu_x_);
    output(NLP_SOLVER_G).setNZ(gk_);

    if (hasOption("print_time") && static_cast<bool>(getOption("print_time"))) {
      // Write timings
      userOut() << "time spent in eval_f: " << t_eval_f_ << " s.";
      if (n_eval_f_>0)
        userOut() << " (" << n_eval_f_ << " calls, " << (t_eval_f_/n_eval_f_)*1000
                  << " ms. average)";
      userOut() << endl;
      userOut() << "time spent in eval_grad_f: " << t_eval_grad_f_ << " s.";
      if (n_eval_grad_f_>0)
        userOut() << " (" << n_eval_grad_f_ << " calls, "
             << (t_eval_grad_f_/n_eval_grad_f_)*1000 << " ms. average)";
      userOut() << endl;
      userOut() << "time spent in eval_g: " << t_eval_g_ << " s.";
      if (n_eval_g_>0)
        userOut() << " (" << n_eval_g_ << " calls, " << (t_eval_g_/n_eval_g_)*1000
                  << " ms. average)";
      userOut() << endl;
      userOut() << "time spent in eval_jac_g: " << t_eval_jac_g_ << " s.";
      if (n_eval_jac_g_>0)
        userOut() << " (" << n_eval_jac_g_ << " calls, "
             << (t_eval_jac_g_/n_eval_jac_g_)*1000 << " ms. average)";
      userOut() << endl;
      userOut() << "time spent in eval_h: " << t_eval_h_ << " s.";
      if (n_eval_h_>1)
        userOut() << " (" << n_eval_h_ << " calls, " << (t_eval_h_/n_eval_h_)*1000
                  << " ms. average)";
      userOut() << endl;
      userOut() << "time spent in main loop: " << t_mainloop_ << " s." << endl;
      userOut() << "time spent in callback function: " << t_callback_fun_ << " s." << endl;
      userOut() << "time spent in callback preparation: " << t_callback_prepare_ << " s." << endl;
    }

    // Save statistics
    stats_["iter_count"] = iter;

    stats_["t_eval_f"] = t_eval_f_;
    stats_["t_eval_grad_f"] = t_eval_grad_f_;
    stats_["t_eval_g"] = t_eval_g_;
    stats_["t_eval_jac_g"] = t_eval_jac_g_;
    stats_["t_eval_h"] = t_eval_h_;
    stats_["t_mainloop"] = t_mainloop_;
    stats_["t_callback_fun"] = t_callback_fun_;
    stats_["t_callback_prepare"] = t_callback_prepare_;
    stats_["n_eval_f"] = n_eval_f_;
    stats_["n_eval_grad_f"] = n_eval_grad_f_;
    stats_["n_eval_g"] = n_eval_g_;
    stats_["n_eval_jac_g"] = n_eval_jac_g_;
    stats_["n_eval_h"] = n_eval_h_;
  }
Ejemplo n.º 19
0
  void SXFunctionInternal::spAllocOpenCL() {
    // OpenCL return flag
    cl_int ret;

    // Generate the kernel source code
    stringstream ss;

    const char* fcn_name[2] = {"sp_evaluate_fwd", "sp_evaluate_adj"};
    for (int kernel=0; kernel<2; ++kernel) {
      bool use_fwd = kernel==0;
      ss << "__kernel void " << fcn_name[kernel] << "(";
      bool first=true;
      for (int i=0; i<nIn(); ++i) {
        if (first) first=false;
        else      ss << ", ";
        ss << "__global unsigned long *x" << i;
      }
      for (int i=0; i<nOut(); ++i) {
        if (first) first=false;
        else      ss << ", ";
        ss << "__global unsigned long *r" << i;
      }
      ss << ") { " << endl;

      if (use_fwd) {
        // Which variables have been declared
        vector<bool> declared(n_w_, false);

        // Propagate sparsity forward
        for (vector<AlgEl>::iterator it=algorithm_.begin(); it!=algorithm_.end(); ++it) {
          if (it->op==OP_OUTPUT) {
            ss << "if (r" << it->i0 << "!=0) r" << it->i0 << "[" << it->i2 << "]=" << "a" << it->i1;
          } else {
            // Declare result if not already declared
            if (!declared[it->i0]) {
              ss << "ulong ";
              declared[it->i0]=true;
            }

            // Where to store the result
            ss << "a" << it->i0 << "=";

            // What to store
            if (it->op==OP_CONST || it->op==OP_PARAMETER) {
              ss << "0";
            } else if (it->op==OP_INPUT) {
              ss << "x" << it->i1 << "[" << it->i2 << "]";
            } else {
              int ndep = casadi_math<double>::ndeps(it->op);
              for (int c=0; c<ndep; ++c) {
                if (c==0) {
                  ss << "a" << it->i1;
                } else {
                  ss << "|";
                  ss << "a" << it->i2;
                }
              }
            }
          }
          ss  << ";" << endl;
        }

      } else { // Backward propagation
        // Temporary variable
        ss << "ulong t;" << endl;

        // Declare and initialize work vector
        for (int i=0; i<n_w_; ++i) {
          ss << "ulong a" << i << "=0;"<< endl;
        }

        // Propagate sparsity backward
        for (vector<AlgEl>::reverse_iterator it=algorithm_.rbegin(); it!=algorithm_.rend(); ++it) {
          if (it->op==OP_OUTPUT) {
            ss << "if (r" << it->i0 << "!=0) a" << it->i1
               << "|=r" << it->i0 << "[" << it->i2 << "];" << endl;
          } else {
            if (it->op==OP_INPUT) {
              ss << "x" << it->i1 << "[" << it->i2 << "]=a" << it->i0 << "; ";
              ss << "a" << it->i0 << "=0;" << endl;
            } else if (it->op==OP_CONST || it->op==OP_PARAMETER) {
              ss << "a" << it->i0 << "=0;" << endl;
            } else {
              int ndep = casadi_math<double>::ndeps(it->op);
              ss << "t=a" << it->i0 << "; ";
              ss << "a" << it->i0 << "=0; ";
              ss << "a" << it->i1 << "|=" << "t" << "; ";
              if (ndep>1) {
                ss << "a" << it->i2 << "|=" << "t" << "; ";
              }
              ss << endl;
            }
          }
        }
      }
      ss << "}" << endl << endl;
    }

    // Form c-string
    std::string s = ss.str();
    if (verbose()) {
      userOut() << "Kernel source code for sparsity propagation:" << endl;
      userOut() << " ***** " << endl;
      userOut() << s;
      userOut() << " ***** " << endl;
    }
    const char* cstr = s.c_str();

    // Parse kernel source code
    sp_program_ = clCreateProgramWithSource(sparsity_propagation_kernel_.context,
                                            1, static_cast<const char **>(&cstr), 0, &ret);
    casadi_assert(ret == CL_SUCCESS);
    casadi_assert(sp_program_ != 0);

    // Build Kernel Program
    compileProgram(sp_program_);

    // Create OpenCL kernel for forward propatation
    sp_fwd_kernel_ = clCreateKernel(sp_program_, fcn_name[0], &ret);
    casadi_assert(ret == CL_SUCCESS);

    // Create OpenCL kernel for backward propatation
    sp_adj_kernel_ = clCreateKernel(sp_program_, fcn_name[1], &ret);
    casadi_assert(ret == CL_SUCCESS);

    // Memory buffer for each of the input arrays
    sp_input_memobj_.resize(nIn(), static_cast<cl_mem>(0));
    for (int i=0; i<sp_input_memobj_.size(); ++i) {
      sp_input_memobj_[i] = clCreateBuffer(sparsity_propagation_kernel_.context,
                                           CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR,
                                           inputNoCheck(i).size() * sizeof(cl_ulong),
                                           reinterpret_cast<void*>(inputNoCheck(i).ptr()), &ret);
      casadi_assert(ret == CL_SUCCESS);
    }

    // Memory buffer for each of the output arrays
    sp_output_memobj_.resize(nOut(), static_cast<cl_mem>(0));
    for (int i=0; i<sp_output_memobj_.size(); ++i) {
      sp_output_memobj_[i] = clCreateBuffer(sparsity_propagation_kernel_.context,
                                            CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR,
                                            outputNoCheck(i).size() * sizeof(cl_ulong),
                                            reinterpret_cast<void*>(outputNoCheck(i).ptr()), &ret);
      casadi_assert(ret == CL_SUCCESS);
    }
  }
Ejemplo n.º 20
0
 /// Core of the class: the method that directs the messages
 virtual int print() {
   userOut() << messageBuffer_ << std::endl;
   return 0;
 }
Ejemplo n.º 21
0
  void Newton::solve(void* mem) const {
    auto m = static_cast<NewtonMemory*>(mem);

    // Get the initial guess
    casadi_copy(m->iarg[iin_], n_, m->x);

    // Perform the Newton iterations
    m->iter=0;
    bool success = true;
    while (true) {
      // Break if maximum number of iterations already reached
      if (m->iter >= max_iter_) {
        log("eval", "Max. iterations reached.");
        m->return_status = "max_iteration_reached";
        success = false;
        break;
      }

      // Start a new iteration
      m->iter++;

      // Use x to evaluate J
      copy_n(m->iarg, n_in(), m->arg);
      m->arg[iin_] = m->x;
      m->res[0] = m->jac;
      copy_n(m->ires, n_out(), m->res+1);
      m->res[1+iout_] = m->f;
      calc_function(m, "jac_f_z");

      // Check convergence
      double abstol = 0;
      if (abstol_ != numeric_limits<double>::infinity()) {
        for (int i=0; i<n_; ++i) {
          abstol = max(abstol, fabs(m->f[i]));
        }
        if (abstol <= abstol_) {
          casadi_msg("Converged to acceptable tolerance - abstol: " << abstol_);
          break;
        }
      }

      // Factorize the linear solver with J
      linsol_.factorize(m->jac);
      linsol_.solve(m->f, 1, false);

      // Check convergence again
      double abstolStep=0;
      if (numeric_limits<double>::infinity() != abstolStep_) {
        for (int i=0; i<n_; ++i) {
          abstolStep = max(abstolStep, fabs(m->f[i]));
        }
        if (abstolStep <= abstolStep_) {
          casadi_msg("Converged to acceptable tolerance - abstolStep: " << abstolStep_);
          break;
        }
      }

      if (print_iteration_) {
        // Only print iteration header once in a while
        if (m->iter % 10==0) {
          printIteration(userOut());
        }

        // Print iteration information
        printIteration(userOut(), m->iter, abstol, abstolStep);
      }

      // Update Xk+1 = Xk - J^(-1) F
      casadi_axpy(n_, -1., m->f, m->x);
    }

    // Get the solution
    casadi_copy(m->x, n_, m->ires[iout_]);

    // Store the iteration count
    if (success) m->return_status = "success";

    casadi_msg("Newton::solveNonLinear():end after " << m->iter << " steps");
  }
Ejemplo n.º 22
0
  void Newton::solveNonLinear() {
    casadi_msg("Newton::solveNonLinear:begin");

    // Set up timers for profiling
    double time_zero=0;
    double time_start=0;
    double time_stop=0;
    if (CasadiOptions::profiling && !CasadiOptions::profilingBinary) {
      time_zero = getRealTime();
      CasadiOptions::profilingLog  << "start " << this << ":" <<getOption("name") << std::endl;
    }

    // Pass the inputs to J
    for (int i=0; i<nIn(); ++i) {
      if (i!=iin_) jac_.setInput(input(i), i);
    }

    // Aliases
    DMatrix &u = output(iout_);
    DMatrix &J = jac_.output(0);
    DMatrix &F = jac_.output(1+iout_);

    // Perform the Newton iterations
    int iter=0;

    bool success = true;

    while (true) {
      // Break if maximum number of iterations already reached
      if (iter >= max_iter_) {
        log("evaluate", "Max. iterations reached.");
        stats_["return_status"] = "max_iteration_reached";
        success = false;
        break;
      }

      // Start a new iteration
      iter++;

      // Print progress
      if (monitored("step") || monitored("stepsize")) {
        userOut() << "Step " << iter << "." << std::endl;
      }

      if (monitored("step")) {
        userOut() << "  u = " << u << std::endl;
      }

      // Use u to evaluate J
      jac_.setInput(u, iin_);
      for (int i=0; i<nIn(); ++i)
        if (i!=iin_) jac_.setInput(input(i), i);

      if (CasadiOptions::profiling) {
        time_start = getRealTime(); // Start timer
      }

      jac_.evaluate();

      // Write out profiling information
      if (CasadiOptions::profiling && !CasadiOptions::profilingBinary) {
        time_stop = getRealTime(); // Stop timer
        CasadiOptions::profilingLog
            << (time_stop-time_start)*1e6 << " ns | "
            << (time_stop-time_zero)*1e3 << " ms | "
            << this << ":" << getOption("name") << ":0|" << jac_.get() << ":"
            << jac_.getOption("name") << "|evaluate jacobian" << std::endl;
      }

      if (monitored("F")) userOut() << "  F = " << F << std::endl;
      if (monitored("normF"))
        userOut() << "  F (min, max, 1-norm, 2-norm) = "
                  << (*std::min_element(F.data().begin(), F.data().end()))
                  << ", " << (*std::max_element(F.data().begin(), F.data().end()))
                  << ", " << norm_1(F) << ", " << norm_F(F) << std::endl;
      if (monitored("J")) userOut() << "  J = " << J << std::endl;

      double abstol = 0;
      if (numeric_limits<double>::infinity() != abstol_) {
        abstol = std::max((*std::max_element(F.data().begin(),
                                                  F.data().end())),
                               -(*std::min_element(F.data().begin(),
                                                   F.data().end())));
        if (abstol <= abstol_) {
          casadi_msg("Converged to acceptable tolerance - abstol: " << abstol_);
          break;
        }
      }

      // Prepare the linear solver with J
      linsol_.setInput(J, LINSOL_A);

      if (CasadiOptions::profiling) {
        time_start = getRealTime(); // Start timer
      }
      linsol_.prepare();
      // Write out profiling information
      if (CasadiOptions::profiling && !CasadiOptions::profilingBinary) {
        time_stop = getRealTime(); // Stop timer
        CasadiOptions::profilingLog
            << (time_stop-time_start)*1e6 << " ns | "
            << (time_stop-time_zero)*1e3 << " ms | "
            << this << ":" << getOption("name")
            << ":1||prepare linear system" << std::endl;
      }

      if (CasadiOptions::profiling) {
        time_start = getRealTime(); // Start timer
      }
      // Solve against F
      linsol_.solve(&F.front(), 1, false);
      if (CasadiOptions::profiling && !CasadiOptions::profilingBinary) {
        time_stop = getRealTime(); // Stop timer
        CasadiOptions::profilingLog
            << (time_stop-time_start)*1e6 << " ns | "
            << (time_stop-time_zero)*1e3 << " ms | "
            << this << ":" << getOption("name") << ":2||solve linear system" << std::endl;
      }

      if (monitored("step")) {
        userOut() << "  step = " << F << std::endl;
      }

      double abstolStep=0;
      if (numeric_limits<double>::infinity() != abstolStep_) {
        abstolStep = std::max((*std::max_element(F.data().begin(),
                                                  F.data().end())),
                               -(*std::min_element(F.data().begin(),
                                                   F.data().end())));
        if (monitored("stepsize")) {
          userOut() << "  stepsize = " << abstolStep << std::endl;
        }
        if (abstolStep <= abstolStep_) {
          casadi_msg("Converged to acceptable tolerance - abstolStep: " << abstolStep_);
          break;
        }
      }

      if (print_iteration_) {
        // Only print iteration header once in a while
        if (iter % 10==0) {
          printIteration(userOut());
        }

        // Print iteration information
        printIteration(userOut(), iter, abstol, abstolStep);
      }

      // Update Xk+1 = Xk - J^(-1) F
      std::transform(u.begin(), u.end(), F.begin(), u.begin(), std::minus<double>());

    }

    // Get auxiliary outputs
    for (int i=0; i<nOut(); ++i) {
      if (i!=iout_) jac_.getOutput(output(i), 1+i);
    }

    // Store the iteration count
    if (gather_stats_) stats_["iter"] = iter;

    if (success) stats_["return_status"] = "success";

    // Factorization up-to-date
    fact_up_to_date_ = true;

    casadi_msg("Newton::solveNonLinear():end after " << iter << " steps");
  }
Ejemplo n.º 23
0
  void Sqpmethod::init() {
    // Call the init method of the base class
    NlpSolverInternal::init();

    // Read options
    max_iter_ = getOption("max_iter");
    max_iter_ls_ = getOption("max_iter_ls");
    c1_ = getOption("c1");
    beta_ = getOption("beta");
    merit_memsize_ = getOption("merit_memory");
    lbfgs_memory_ = getOption("lbfgs_memory");
    tol_pr_ = getOption("tol_pr");
    tol_du_ = getOption("tol_du");
    regularize_ = getOption("regularize");
    exact_hessian_ = getOption("hessian_approximation")=="exact";
    min_step_size_ = getOption("min_step_size");

    // Get/generate required functions
    gradF();
    jacG();
    if (exact_hessian_) {
      hessLag();
    }

    // Allocate a QP solver
    Sparsity H_sparsity = exact_hessian_ ? hessLag().output().sparsity()
        : Sparsity::dense(nx_, nx_);
    H_sparsity = H_sparsity + Sparsity::diag(nx_);
    Sparsity A_sparsity = jacG().isNull() ? Sparsity(0, nx_)
        : jacG().output().sparsity();

    // QP solver options
    Dict qp_solver_options;
    if (hasSetOption("qp_solver_options")) {
      qp_solver_options = getOption("qp_solver_options");
    }

    // Allocate a QP solver
    qp_solver_ = QpSolver("qp_solver", getOption("qp_solver"),
                          make_map("h", H_sparsity, "a", A_sparsity),
                          qp_solver_options);

    // Lagrange multipliers of the NLP
    mu_.resize(ng_);
    mu_x_.resize(nx_);

    // Lagrange gradient in the next iterate
    gLag_.resize(nx_);
    gLag_old_.resize(nx_);

    // Current linearization point
    x_.resize(nx_);
    x_cand_.resize(nx_);
    x_old_.resize(nx_);

    // Constraint function value
    gk_.resize(ng_);
    gk_cand_.resize(ng_);

    // Hessian approximation
    Bk_ = DMatrix::zeros(H_sparsity);

    // Jacobian
    Jk_ = DMatrix::zeros(A_sparsity);

    // Bounds of the QP
    qp_LBA_.resize(ng_);
    qp_UBA_.resize(ng_);
    qp_LBX_.resize(nx_);
    qp_UBX_.resize(nx_);

    // QP solution
    dx_.resize(nx_);
    qp_DUAL_X_.resize(nx_);
    qp_DUAL_A_.resize(ng_);

    // Gradient of the objective
    gf_.resize(nx_);

    // Create Hessian update function
    if (!exact_hessian_) {
      // Create expressions corresponding to Bk, x, x_old, gLag and gLag_old
      SX Bk = SX::sym("Bk", H_sparsity);
      SX x = SX::sym("x", input(NLP_SOLVER_X0).sparsity());
      SX x_old = SX::sym("x", x.sparsity());
      SX gLag = SX::sym("gLag", x.sparsity());
      SX gLag_old = SX::sym("gLag_old", x.sparsity());

      SX sk = x - x_old;
      SX yk = gLag - gLag_old;
      SX qk = mul(Bk, sk);

      // Calculating theta
      SX skBksk = inner_prod(sk, qk);
      SX omega = if_else(inner_prod(yk, sk) < 0.2 * inner_prod(sk, qk),
                               0.8 * skBksk / (skBksk - inner_prod(sk, yk)),
                               1);
      yk = omega * yk + (1 - omega) * qk;
      SX theta = 1. / inner_prod(sk, yk);
      SX phi = 1. / inner_prod(qk, sk);
      SX Bk_new = Bk + theta * mul(yk, yk.T()) - phi * mul(qk, qk.T());

      // Inputs of the BFGS update function
      vector<SX> bfgs_in(BFGS_NUM_IN);
      bfgs_in[BFGS_BK] = Bk;
      bfgs_in[BFGS_X] = x;
      bfgs_in[BFGS_X_OLD] = x_old;
      bfgs_in[BFGS_GLAG] = gLag;
      bfgs_in[BFGS_GLAG_OLD] = gLag_old;
      bfgs_ = SXFunction("bfgs", bfgs_in, make_vector(Bk_new));

      // Initial Hessian approximation
      B_init_ = DMatrix::eye(nx_);
    }

    // Header
    if (static_cast<bool>(getOption("print_header"))) {
      userOut()
        << "-------------------------------------------" << endl
        << "This is casadi::SQPMethod." << endl;
      if (exact_hessian_) {
        userOut() << "Using exact Hessian" << endl;
      } else {
        userOut() << "Using limited memory BFGS Hessian approximation" << endl;
      }
      userOut()
        << endl
        << "Number of variables:                       " << setw(9) << nx_ << endl
        << "Number of constraints:                     " << setw(9) << ng_ << endl
        << "Number of nonzeros in constraint Jacobian: " << setw(9) << A_sparsity.nnz() << endl
        << "Number of nonzeros in Lagrangian Hessian:  " << setw(9) << H_sparsity.nnz() << endl
        << endl;
    }
  }
Ejemplo n.º 24
0
  void SXFunctionInternal::evalAdj(const vector<vector<SX> >& aseed, vector<vector<SX> >& asens) {
    if (verbose()) userOut() << "SXFunctionInternal::evalAdj begin" << endl;

    // number of adjoint seeds
    int nadj = aseed.size();
    asens.resize(nadj);

    // Quick return if possible
    if (nadj==0) return;

    // Get the number of inputs and outputs
    int num_in = nIn();
    int num_out = nOut();

    // Make sure matching sparsity of fseed
    bool matching_sparsity = true;
    for (int d=0; d<nadj; ++d) {
      casadi_assert(aseed[d].size()==num_out);
      for (int i=0; matching_sparsity && i<num_out; ++i)
        matching_sparsity = aseed[d][i].sparsity()==output(i).sparsity();
    }

    // Correct sparsity if needed
    if (!matching_sparsity) {
      vector<vector<SX> > aseed2(aseed);
      for (int d=0; d<nadj; ++d)
        for (int i=0; i<num_out; ++i)
          if (aseed2[d][i].sparsity()!=output(i).sparsity())
            aseed2[d][i] = project(aseed2[d][i], output(i).sparsity());
      return evalAdj(aseed2, asens);
    }

    // Allocate results if needed
    for (int d=0; d<nadj; ++d) {
      asens[d].resize(num_in);
      for (int i=0; i<asens[d].size(); ++i) {
        if (asens[d][i].sparsity()!=input(i).sparsity()) {
          asens[d][i] = SX::zeros(input(i).sparsity());
        } else {
          fill(asens[d][i].begin(), asens[d][i].end(), 0);
        }
      }
    }

    // Iterator to the binary operations
    vector<SXElement>::const_iterator b_it=operations_.begin();

    // Tape
    vector<TapeEl<SXElement> > s_pdwork(operations_.size());
    vector<TapeEl<SXElement> >::iterator it1 = s_pdwork.begin();

    // Evaluate algorithm
    if (verbose()) userOut() << "SXFunctionInternal::evalFwd evaluating algorithm forward" << endl;
    for (vector<AlgEl>::const_iterator it = algorithm_.begin(); it!=algorithm_.end(); ++it) {
      switch (it->op) {
      case OP_INPUT:
      case OP_OUTPUT:
      case OP_CONST:
      case OP_PARAMETER:
        break;
      default:
        {
          const SXElement& f=*b_it++;
          switch (it->op) {
            CASADI_MATH_DER_BUILTIN(f->dep(0), f->dep(1), f, it1++->d)
          }
        }
      }
    }

    // Calculate adjoint sensitivities
    if (verbose()) userOut() << "SXFunctionInternal::evalAdj calculating adjoint derivatives"
                       << endl;
    fill(s_work_.begin(), s_work_.end(), 0);
    for (int dir=0; dir<nadj; ++dir) {
      vector<TapeEl<SXElement> >::const_reverse_iterator it2 = s_pdwork.rbegin();
      for (vector<AlgEl>::const_reverse_iterator it = algorithm_.rbegin();
           it!=algorithm_.rend(); ++it) {
        SXElement seed;
        switch (it->op) {
        case OP_INPUT:
          asens[dir][it->i1].data()[it->i2] = s_work_[it->i0];
          s_work_[it->i0] = 0;
          break;
        case OP_OUTPUT:
          s_work_[it->i1] += aseed[dir][it->i0].data()[it->i2];
          break;
        case OP_CONST:
        case OP_PARAMETER:
          s_work_[it->i0] = 0;
          break;
          CASADI_MATH_BINARY_BUILTIN // Binary operation
            seed = s_work_[it->i0];
          s_work_[it->i0] = 0;
          s_work_[it->i1] += it2->d[0] * seed;
          s_work_[it->i2] += it2->d[1] * seed;
          it2++;
          break;
        default: // Unary operation
          seed = s_work_[it->i0];
          s_work_[it->i0] = 0;
          s_work_[it->i1] += it2->d[0] * seed;
          it2++;
        }
      }
    }
    if (verbose()) userOut() << "SXFunctionInternal::evalAdj end" << endl;
  }
Ejemplo n.º 25
0
  void SqicInterface::generateNativeCode(std::ostream& file) const {
    // Dump the contents of resource_sqic, but filter out the C bind stuff
    std::string resource_sqic_input(resource_sqic);
    std::istringstream stream(resource_sqic_input);
    std::string line;
    while (std::getline(stream, line)) {
      size_t b_i = line.find("bind ( C, ");
      if (b_i!=std::string::npos) {
        file << line.substr(0, b_i) << std::endl;
      } else {
        file << line << std::endl;
      }
    }

    file.precision(std::numeric_limits<double>::digits10+2);
    file << std::scientific; // This is really only to force a decimal dot,
    // would be better if it can be avoided

    file << "program exported" << std::endl;
    file << "  use SQICModule" << std::endl;
    file << "  implicit none" << std::endl;
    file << "  integer(ip)               :: m, n, n_inf, nnH, nnzH, nnzA, nS" << std::endl;


    file << "  real(rp)                  :: Obj" << std::endl;

    file << "  real(rp), allocatable:: bl(:), bu(:), x(:), valA(:), valH(:) , pi(:), rc(:)"
         << std::endl;
    file << "  integer(ip), allocatable:: indA(:), locA(:), indH(:), locH(:), hEtype(:), hs(:)"
         << std::endl;

    int n = n_;
    int m = nc_+1;
    int nnzA=formatA_.size_out(0);
    int nnzH=input(CONIC_H).size();

    file << "  n = " << n << std::endl;
    file << "  m = " << m << std::endl;
    file << "  nnzA = " << nnzA << std::endl;
    file << "  nnzH = " << nnzH << std::endl;

    file << "  allocate ( bl(n+m), bu(n+m) )" << std::endl;
    file << "  allocate ( hEtype(n+m) )" << std::endl;
    file << "  allocate ( locA(n+1), valA(nnzA), indA(nnzA) )" << std::endl;
    file << "  allocate ( pi(m), rc(n+m), x(n+m) )" << std::endl;
    file << "  allocate ( hs(n+m) )" << std::endl;
    file << "  allocate ( valH(nnzH), locH(n+1), indH(nnzH) )" << std::endl;

    for (int i=0;i<indA_.size();++i) {
      file << "  indA(" << i +1 << ") = " << indA_[i] << std::endl;
    }
    for (int i=0;i<locA_.size();++i) {
      file << "  locA(" << i +1 << ") = " << locA_[i] << std::endl;
    }
    for (int i=0;i<formatA_.size_out(0);++i) {
      file << "  valA(" << i +1 << ") = " << formatA_.output().at(i) << std::endl;
    }
    for (int i=0;i<bl_.size();++i) {
      file << "  bl(" << i +1 << ") = " << bl_[i] << std::endl;
      file << "  bu(" << i +1 << ") = " << bu_[i] << std::endl;
    }
    for (int i=0;i<hEtype_.size();++i) {
      file << "  hEtype(" << i +1 << ") = " << hEtype_[i] << std::endl;
    }
    for (int i=0;i<hs_.size();++i) {
      file << "  hs(" << i +1 << ") = " << hs_[i] << std::endl;
    }
    for (int i=0;i<indH_.size();++i) {
      file << "  indH(" << i +1 << ") = " << indH_[i] << std::endl;
    }
    for (int i=0;i<locH_.size();++i) {
      file << "  locH(" << i +1 << ") = " << locH_[i] << std::endl;
    }
    for (int i=0;i<input(CONIC_H).size();++i) {
      file << "  valH(" << i +1 << ") = " << input(CONIC_H).at(i) << std::endl;
    }
    for (int i=0;i<input(CONIC_X0).size();++i) {
      file << "  x(" << i +1 << ") = " << input(CONIC_X0).at(i) << std::endl;
    }
    for (int i=0;i<pi_.size();++i) {
      file << "  pi(" << i +1 << ") = " <<  0 << std::endl; //pi_[i] << std::endl;
    }
    userOut() << "lam_x0:::" << input(CONIC_LAM_X0) << std::endl;
    for (int i=0;i<rc_.size();++i) {
      file << "  rc(" << i +1 << ") = "
           << ((i<input(CONIC_LAM_X0).size()) ? -input(CONIC_LAM_X0).at(i) : 0.0)
           << std::endl;
    }

    file << "  call wsqic (m, n, nnzA, indA, locA, valA, bl, bu, hEtype, "
         << "hs, x, pi, rc, nnzH, indH, locH, valH)" << std::endl;
    /**for (int i=0;i<input(CONIC_X0).size();++i) {
       file << "  x(" << i +1 << ") = " << input(CONIC_X0).at(i) << std::endl;
       }
       for (int i=0;i<pi_.size();++i) {
       file << "  pi(" << i +1 << ") = " << pi_[i] << std::endl;
       }
       userOut() << "lam_x0:::" << input(CONIC_LAM_X0) << std::endl;
       for (int i=0;i<rc_.size();++i) {
       file << "  rc(" << i +1 << ") = "
       << ((i<input(CONIC_LAM_X0).size()) ? -input(CONIC_LAM_X0).at(i) : 0.0)
       << std::endl;
       }*/
    /**
       file << "  call sqicSolve(Obj)" << std::endl;
       for (int i=0;i<input(CONIC_X0).size();++i) {
       file << "  x(" << i +1 << ") = " << input(CONIC_X0).at(i) << std::endl;
       }
       for (int i=0;i<pi_.size();++i) {
       file << "  pi(" << i +1 << ") = " << pi_[i] << std::endl;
       }
       userOut() << "lam_x0:::" << input(CONIC_LAM_X0) << std::endl;
       for (int i=0;i<rc_.size();++i) {
       file << "  rc(" << i +1 << ") = "
       << ((i<input(CONIC_LAM_X0).size()) ? -input(CONIC_LAM_X0).at(i) : 0.0)
       << std::endl;
       }
    */
    file << "  call sqicSolve(Obj)" << std::endl;
    file << "  deallocate ( bl, bu )" << std::endl;
    file << "  deallocate ( hEtype )" << std::endl;
    file << "  deallocate ( locA, valA, indA )" << std::endl;
    file << "  deallocate ( pi, rc, x )" << std::endl;
    file << "  deallocate ( valH, locH, indH )" << std::endl;
    file << "  call sqicDestroy()" << std::endl;
    file << "end program exported" << std::endl;


  }
Ejemplo n.º 26
0
 void Polynomial::repr(std::ostream &stream, bool trailing_newline) const {
   userOut() << "poly(" << p_ << ")" << endl;
   if (trailing_newline) stream << std::endl;
 }