void LinearCutsGenerator::generateCuts(const OsiSolverInterface &solver, OsiCuts &cs, const CglTreeInfo info) const { //const OsiTMINLPInterface * tmp = dynamic_cast<const OsiTMINLPInterface *>(&solver); OsiTMINLPInterface * nlp = dynamic_cast<OsiTMINLPInterface *>(solver.clone());//const_cast<OsiTMINLPInterface *>(tmp); assert(nlp); OuterApprox oa; //si.writeMps("toto"); int numberRows = nlp->getNumRows(); for(int i = 0 ; i < 5 ; i++){ nlp->resolve(); OsiClpSolverInterface si; oa(*nlp, &si, solver.getColSolution(), true); si.resolve(); OsiCuts cuts; for(std::list<Coin::SmartPtr<CuttingMethod> >::const_iterator i = methods_.begin() ; i != methods_.end() ; i++){ (*i)->cgl->generateCuts(si, cuts, info); } std::vector<OsiRowCut *> mycuts(cuts.sizeRowCuts()); for(int i = 0 ; i < cuts.sizeRowCuts() ; i++){ mycuts[i] = cuts.rowCutPtr(i); cs.insert(*mycuts[i]); } nlp->applyRowCuts(mycuts.size(), const_cast<const OsiRowCut **> (&mycuts[0])); } // Take off slack cuts std::vector<int> kept; int numberRowsNow = nlp->getNumRows(); int * del = new int [numberRowsNow-numberRows]; nlp->resolve(); const double * activity = nlp->getRowActivity(); const double * lb = nlp->getRowLower(); const double * ub = nlp->getRowUpper(); CoinRelFltEq eq(1e-06); //int nDelete=0; for (int i=numberRowsNow -1;i>=numberRows;i--) { if ( !(eq(activity[i], lb[i]) || eq(activity[i], ub[i])) ) cs.eraseRowCut(i - numberRows); } delete [] del; delete nlp; }
void HeuristicInnerApproximation::extractInnerApproximation(OsiTMINLPInterface & nlp, OsiSolverInterface &si, const double * x, bool getObj) { int n; int m; int nnz_jac_g; int nnz_h_lag; Ipopt::TNLP::IndexStyleEnum index_style; TMINLP2TNLP * problem = nlp.problem(); //Get problem information problem->get_nlp_info(n, m, nnz_jac_g, nnz_h_lag, index_style); vector<int> jRow(nnz_jac_g); vector<int> jCol(nnz_jac_g); vector<double> jValues(nnz_jac_g); problem->eval_jac_g(n, NULL, 0, m, nnz_jac_g, jRow(), jCol(), NULL); if(index_style == Ipopt::TNLP::FORTRAN_STYLE)//put C-style { for(int i = 0 ; i < nnz_jac_g ; i++){ jRow[i]--; jCol[i]--; } } //get Jacobian problem->eval_jac_g(n, x, 1, m, nnz_jac_g, NULL, NULL, jValues()); vector<double> g(m); problem->eval_g(n, x, 1, m, g()); vector<int> nonLinear(m); //store non linear constraints (which are to be removed from IA) int numNonLinear = 0; const double * rowLower = nlp.getRowLower(); const double * rowUpper = nlp.getRowUpper(); const double * colLower = nlp.getColLower(); const double * colUpper = nlp.getColUpper(); assert(m == nlp.getNumRows()); double infty = si.getInfinity(); double nlp_infty = nlp.getInfinity(); vector<Ipopt::TNLP::LinearityType> constTypes(m); problem->get_constraints_linearity(m, constTypes()); for (int i = 0; i < m; i++) { if (constTypes[i] == Ipopt::TNLP::NON_LINEAR) { nonLinear[numNonLinear++] = i; } } vector<double> rowLow(m - numNonLinear); vector<double> rowUp(m - numNonLinear); int ind = 0; for (int i = 0; i < m; i++) { if (constTypes[i] != Ipopt::TNLP::NON_LINEAR) { if (rowLower[i] > -nlp_infty) { // printf("Lower %g ", rowLower[i]); rowLow[ind] = (rowLower[i]); } else rowLow[ind] = -infty; if (rowUpper[i] < nlp_infty) { // printf("Upper %g ", rowUpper[i]); rowUp[ind] = (rowUpper[i]); } else rowUp[ind] = infty; ind++; } } CoinPackedMatrix mat(true, jRow(), jCol(), jValues(), nnz_jac_g); mat.setDimensions(m, n); // In case matrix was empty, this should be enough //remove non-linear constraints mat.deleteRows(numNonLinear, nonLinear()); int numcols = nlp.getNumCols(); vector<double> obj(numcols); for (int i = 0; i < numcols; i++) obj[i] = 0.; si.loadProblem(mat, nlp.getColLower(), nlp.getColUpper(), obj(), rowLow(), rowUp()); const Bonmin::TMINLP::VariableType* variableType = problem->var_types(); for (int i = 0; i < n; i++) { if ((variableType[i] == TMINLP::BINARY) || (variableType[i] == TMINLP::INTEGER)) si.setInteger(i); } if (getObj) { bool addObjVar = false; if (problem->hasLinearObjective()) { double zero; vector<double> x0(n, 0.); problem->eval_f(n, x0(), 1, zero); si.setDblParam(OsiObjOffset, -zero); //Copy the linear objective and don't create a dummy variable. problem->eval_grad_f(n, x, 1, obj()); si.setObjective(obj()); } else { addObjVar = true; } if (addObjVar) { nlp.addObjectiveFunction(si, x); } } // Hassan IA initial description int InnerDesc = 1; if (InnerDesc == 1) { OsiCuts cs; double * p = CoinCopyOfArray(colLower, n); double * pp = CoinCopyOfArray(colLower, n); double * up = CoinCopyOfArray(colUpper, n); const int& nbAp = nbAp_; std::vector<int> nbG(m, 0);// Number of generated points for each nonlinear constraint std::vector<double> step(n); for (int i = 0; i < n; i++) { if (colUpper[i] > 1e08) { up[i] = 0; } if (colUpper[i] > 1e08 || colLower[i] < -1e08 || (variableType[i] == TMINLP::BINARY) || (variableType[i] == TMINLP::INTEGER)) { step[i] = 0; } else step[i] = (up[i] - colLower[i]) / (nbAp); if (colLower[i] < -1e08) { p[i] = 0; pp[i] = 0; } } vector<double> g_p(m); vector<double> g_pp(m); for (int j = 1; j <= nbAp; j++) { for (int i = 0; i < n; i++) { pp[i] += step[i]; } problem->eval_g(n, p, 1, m, g_p()); problem->eval_g(n, pp, 1, m, g_pp()); double diff = 0; int varInd = 0; for (int i = 0; (i < m && constTypes[i] == Ipopt::TNLP::NON_LINEAR); i++) { if (varInd == n - 1) varInd = 0; diff = std::abs(g_p[i] - g_pp[i]); if (nbG[i] < nbAp - 1) { getMyInnerApproximation(nlp, cs, i, p, pp);// Generate a chord connecting the two points p[varInd] = pp[varInd]; nbG[i]++; } varInd++; } } for(int i = 0; (i< m && constTypes[i] == Ipopt::TNLP::NON_LINEAR); i++) { // getConstraintOuterApproximation(cs, i, colUpper, NULL, true);// Generate Tangents at current point getMyInnerApproximation(nlp, cs, i, p, up);// Generate a chord connecting the two points } delete [] p; delete [] pp; delete [] up; si.applyCuts(cs); } }