コード例 #1
0
  void 
  LinearCutsGenerator::generateCuts(const OsiSolverInterface &solver, OsiCuts &cs,
                     const CglTreeInfo info) const {

    //const OsiTMINLPInterface * tmp = dynamic_cast<const OsiTMINLPInterface *>(&solver);
    OsiTMINLPInterface * nlp = dynamic_cast<OsiTMINLPInterface *>(solver.clone());//const_cast<OsiTMINLPInterface *>(tmp);
    assert(nlp);
    OuterApprox oa;
    //si.writeMps("toto");
    int numberRows = nlp->getNumRows();
    for(int i = 0 ; i < 5 ; i++){
      nlp->resolve();
      OsiClpSolverInterface si;
      oa(*nlp, &si, solver.getColSolution(), true); 
      si.resolve();
      OsiCuts cuts;
      for(std::list<Coin::SmartPtr<CuttingMethod> >::const_iterator i = methods_.begin() ;
          i != methods_.end() ; i++){
         (*i)->cgl->generateCuts(si, cuts, info);
      }
      std::vector<OsiRowCut *> mycuts(cuts.sizeRowCuts());
      for(int i = 0 ; i < cuts.sizeRowCuts() ; i++){
        mycuts[i] = cuts.rowCutPtr(i);
        cs.insert(*mycuts[i]);
      }
      nlp->applyRowCuts(mycuts.size(), const_cast<const OsiRowCut **> (&mycuts[0]));
    }

    // Take off slack cuts
    std::vector<int> kept;
    int numberRowsNow = nlp->getNumRows();
    int * del = new int [numberRowsNow-numberRows];
    nlp->resolve();
    
    const double * activity = nlp->getRowActivity();
    const double * lb = nlp->getRowLower();
    const double * ub = nlp->getRowUpper();
    CoinRelFltEq eq(1e-06);
    //int nDelete=0;
    for (int i=numberRowsNow -1;i>=numberRows;i--) {
      if ( !(eq(activity[i], lb[i]) || eq(activity[i], ub[i])) )
        cs.eraseRowCut(i - numberRows);
    }
    delete [] del;
    delete nlp;
  }
コード例 #2
0
  /** Runs heuristic*/
  int
  FixAndSolveHeuristic::solution(double & objectiveValue,
                                 double * newSolution){
    //if(model_->getNodeCount() || model_->getCurrentPassNumber() > 1) return 0;
    if(model_->getSolutionCount() > 0) return 0;
    if(model_->getNodeCount() > 1000) return 0;
    if(model_->getNodeCount() % 100 != 0) return 0;
    int numberObjects = model_->numberObjects();
    OsiObject ** objects = model_->objects();

    OsiTMINLPInterface * nlp = dynamic_cast<OsiTMINLPInterface *>
                               (model_->solver());
    if(nlp == NULL){
       nlp = dynamic_cast<OsiTMINLPInterface *>
                         (setup_->nonlinearSolver()->clone());
     }
     else {
       nlp = dynamic_cast<OsiTMINLPInterface *>(nlp->clone());
     }

    OsiBranchingInformation info = model_->usefulInformation();
    info.solution_ = model_->getColSolution();

    int dummy;
    int nFixed = 0;
    for(int i = 0 ; i < numberObjects; i++){
      if(objects[i]->infeasibility(&info, dummy) == 0.){
         objects[i]->feasibleRegion(nlp, &info);
         nFixed ++;
      }
    }
    if(nFixed < numberObjects / 3) return 0;
    double cutoff = info.cutoff_; 
    int r_val = doLocalSearch(nlp, newSolution, objectiveValue, cutoff);
    delete nlp;
    return r_val;
  }
コード例 #3
0
ファイル: BonCbcNlpStrategy.cpp プロジェクト: Flymir/coin-all
  /* After a CbcModel::resolve this can return a status
     -1 no effect
     0 treat as optimal
     1 as 0 but do not do any more resolves (i.e. no more cuts)
     2 treat as infeasible
  */
  int
  CbcNlpStrategy::status(CbcModel * model, CbcNodeInfo * parent,int whereFrom)
  {

    OsiSolverInterface * solver = model->solver();//get solver
    int feasible = 1;
    bool solved = true;
    int returnStatus = -1;
    BonCbcPartialNodeInfo * bmNodeInfo = dynamic_cast<BonCbcPartialNodeInfo *>(parent);
    if (!bmNodeInfo) return -1;

    int seqOfInfeasiblesSize = bmNodeInfo->getSequenceOfInfeasiblesSize();
    int seqOfUnsolvedSize = bmNodeInfo->getSequenceOfUnsolvedSize();


    if (solver->isAbandoned()) {
      solved = false;
      seqOfUnsolvedSize++;
      ;
    }
    else if (solver->isProvenPrimalInfeasible()) {
      feasible = 0;
      seqOfInfeasiblesSize++;
    }

    if ((seqOfUnsolvedSize==0) || (maxFailure_ == 0) &&
        (maxInfeasible_== 0) || (seqOfInfeasiblesSize==0))

      if (feasible && seqOfInfeasiblesSize > 1) {
        (*model->messageHandler())<<"Feasible node while father was infeasible."
        <<CoinMessageEol;
      }

    if (solved && seqOfUnsolvedSize > 1) {
      (*model->messageHandler())<<"Solved node while father was unsolved."
      <<CoinMessageEol;
    }

    if (seqOfInfeasiblesSize < maxInfeasible_ &&
        solved && !feasible) {
      (*model->messageHandler())<<"Branching on infeasible node, sequence of infeasibles size "
      <<seqOfInfeasiblesSize<<CoinMessageEol;
      // Have to make sure that we will branch
      OsiTMINLPInterface * ipopt = dynamic_cast<OsiTMINLPInterface *>(solver);
      ipopt->forceBranchable();
      //change objective value
      returnStatus = 0;

    }

    if (!solved && parent != NULL &&
        seqOfUnsolvedSize <= maxFailure_) {
      (*model->messageHandler())<<"Branching on unsolved node, sequence of unsolved size "<<seqOfUnsolvedSize<<CoinMessageEol;
      // Have to make sure that we will branch
      OsiTMINLPInterface * osiMinlp = dynamic_cast<OsiTMINLPInterface *>(solver);
      osiMinlp->forceBranchable();     //      feasible=1;
      returnStatus = 0;
    }

    if (solver->isAbandoned() && parent != NULL &&
        seqOfUnsolvedSize > maxFailure_) {
      hasFailed_ = true;
      OsiTMINLPInterface * osiMinlp =
        dynamic_cast<OsiTMINLPInterface *>(solver);
      if (pretendFailIsInfeasible_) {
        //force infeasible
        osiMinlp->forceInfeasible();
        returnStatus = 2;
      }
      else {
        std::string probName;
        osiMinlp->getStrParam(OsiProbName,probName);
        throw osiMinlp->newUnsolvedError(0, osiMinlp->problem(), probName);
      }
    }
    return returnStatus;
  }
コード例 #4
0
ファイル: CouenneBab.cpp プロジェクト: coin-or/Couenne
/** Perform a branch-and-bound on given setup.*/
void CouenneBab::branchAndBound (Bonmin::BabSetupBase & s) {

  double remaining_time = s.getDoubleParameter(Bonmin::BabSetupBase::MaxTime) + CoinCpuTime();

  /* Put a link to this into solver.*/
  OsiBabSolver *  babInfo = dynamic_cast<OsiBabSolver *>(s.continuousSolver()->getAuxiliaryInfo());
  assert(babInfo);
  Bonmin::BabInfo *  bonBabInfoPtr = dynamic_cast<Bonmin::BabInfo*>(babInfo);

  if (bonBabInfoPtr == NULL) { //Replace with a Bonmin::babInfo
    bonBabInfoPtr = new Bonmin::BabInfo(*babInfo);
    s.continuousSolver()->setAuxiliaryInfo(bonBabInfoPtr);
    delete bonBabInfoPtr;
    bonBabInfoPtr = dynamic_cast<Bonmin::BabInfo*>(s.continuousSolver()->getAuxiliaryInfo());
  }

  bonBabInfoPtr->setBabPtr(this);

  s.nonlinearSolver()->solver()->setup_global_time_limit(s.getDoubleParameter(Bonmin::BabSetupBase::MaxTime));
  OsiSolverInterface * solver = s.continuousSolver()->clone();
  delete modelHandler_;
  modelHandler_ = s.continuousSolver()->messageHandler()->clone();
  model_.passInMessageHandler(modelHandler_);
  model_.assignSolver(solver, true);

  //  s.continuousSolver() = model_.solver();
  //   if(s.continuousSolver()->objects()!=NULL){
  //     model_.addObjects(s.continuousSolver()->numberObjects(),s.continuousSolver()->objects());
  //   }

  int specOpt = s.getIntParameter(Bonmin::BabSetupBase::SpecialOption);
  if (specOpt) {
    model_.setSpecialOptions(specOpt);
    if (specOpt==16) {
      Bonmin::CbcNlpStrategy strat(s.getIntParameter(Bonmin::BabSetupBase::MaxFailures), 
				   s.getIntParameter(Bonmin::BabSetupBase::MaxInfeasible), 
				   s.getIntParameter(Bonmin::BabSetupBase::FailureBehavior));
      model_.setStrategy(strat);
    }
  }

  model_.setMaximumCutPasses(s.getIntParameter(Bonmin::BabSetupBase::NumCutPasses));
  model_.setMaximumCutPassesAtRoot(s.getIntParameter(Bonmin::BabSetupBase::NumCutPassesAtRoot));

  //Setup cutting plane methods
  for (Bonmin::BabSetupBase::CuttingMethods::iterator i = s.cutGenerators().begin() ;
       i != s.cutGenerators().end() ; i++) {

    Bonmin::OaDecompositionBase * oa = dynamic_cast<Bonmin::OaDecompositionBase *>(i->cgl);
    if (oa && oa->reassignLpsolver())
      oa->assignLpInterface(model_.solver());
    model_.addCutGenerator(i->cgl,i->frequency,i->id.c_str(), i->normal,
			   i->atSolution);
    if(i->always){
      model_.cutGenerators()[model_.numberCutGenerators()-1]
	->setMustCallAgain(true);
    }
  }

  for (Bonmin::BabSetupBase::HeuristicMethods::iterator i = s.heuristics().begin() ;
       i != s.heuristics().end() ; i++) {
    CbcHeuristic * heu = i->heuristic;
    heu->setModel(&model_);
    model_.addHeuristic(heu, i->id.c_str());
  }

  //need to record solver logLevel here
  int logLevel = s.continuousSolver()->messageHandler()->logLevel();

  //Set true branch-and-bound parameters
  model_.setLogLevel(s.getIntParameter(Bonmin::BabSetupBase::BabLogLevel));

  // Put back solver logLevel
  model_.solver()->messageHandler()->setLogLevel(logLevel);

  model_.setPrintFrequency(s.getIntParameter(Bonmin::BabSetupBase::BabLogInterval));

  bool ChangedObject = false;
  //Pass over user set branching priorities to Cbc
  if (s.continuousSolver()->objects()==NULL) {
    //assert (s.branchingMethod() == NULL);
    const OsiTMINLPInterface * nlpSolver = s.nonlinearSolver();
    //set priorities, prefered directions...
    const int * priorities = nlpSolver->getPriorities();
    const double * upPsCosts = nlpSolver->getUpPsCosts();
    const double * downPsCosts = nlpSolver->getDownPsCosts();
    const int * directions = nlpSolver->getBranchingDirections();
    bool hasPseudo = (upPsCosts!=NULL);
    model_.findIntegers(true,hasPseudo);
    OsiObject ** simpleIntegerObjects = model_.objects();
    int numberObjects = model_.numberObjects();
    if (priorities != NULL || directions != NULL || hasPseudo) {
      ChangedObject = true;
      for (int i = 0 ; i < numberObjects ; i++) {
	CbcObject * object = dynamic_cast<CbcObject *>
	  (simpleIntegerObjects[i]);
	int iCol = object->columnNumber();
	if (priorities)
	  object->setPriority(priorities[iCol]);
	if (directions)
	  object->setPreferredWay(directions[iCol]);
	if (upPsCosts) {
	  CbcSimpleIntegerPseudoCost * pscObject =
	    dynamic_cast<CbcSimpleIntegerPseudoCost*> (object);
	  pscObject->setUpPseudoCost(upPsCosts[iCol]);
	  pscObject->setDownPseudoCost(downPsCosts[iCol]);
	}
      }
    }

#if 1
    // Now pass user set Sos constraints (code inspired from CoinSolve.cpp)
    const TMINLP::SosInfo * sos = s.nonlinearSolver()->model()->sosConstraints();

    if (!s.getIntParameter(Bonmin::BabSetupBase::DisableSos) && sos && sos->num > 0) {

      // we have some sos constraints

        const OsiTMINLPInterface * nlpSolver = s.nonlinearSolver();
        const int & numSos = sos->num;
	(*nlpSolver->messageHandler())<<"Adding "<<sos->num<<" sos constraints."
				      <<CoinMessageEol;

        CbcObject ** objects = new CbcObject*[numSos];
        const int * starts = sos->starts;
        const int * indices = sos->indices;
        const char * types = sos->types;
        const double * weights = sos->weights;
        //verify if model has user set priorities
        bool hasPriorities = false;
        const int * varPriorities = nlpSolver->getPriorities();
        int numberObjects = model_.numberObjects();
        if (varPriorities)
	  {
	    for (int i = 0 ; i < numberObjects ; i++) {
	      if (varPriorities[i]) {
		hasPriorities = true;
		break;
	      }
	    }
	  }
        const int * sosPriorities = sos->priorities;
        if (sosPriorities)
	  {
	    for (int i = 0 ; i < numSos ; i++) {
	      if (sosPriorities[i]) {
		hasPriorities = true;
		break;
	      }
	    }
	  }
        for (int i = 0 ; i < numSos ; i++)
	  {
	    int start = starts[i];
	    int length = starts[i + 1] - start;
#ifdef DO_IT_NWAY
	    printf("setting nway object\n"),
	      objects[i] = new CbcNWay(&model_, length, &indices[start],
				       i);
	    objects[i]->setPriority(1);
#else
	    objects[i] = new CbcSOS(&model_, length, &indices[start],
				    &weights[start], i, types[i]);
	    objects[i]->setPriority(10);
#endif
	    if (hasPriorities && sosPriorities && sosPriorities[i]) {
	      objects[i]->setPriority(sosPriorities[i]);
	    }
	  }
        model_.addObjects (numSos, objects);
        for (int i = 0 ; i < numSos ; i++)
          delete objects[i];
        delete [] objects;
      }
#endif
    //If Setup contains more objects add them to Cbc
    if (s.objects().size()) {
      CbcObject ** objects = new CbcObject *[s.objects().size()];
      for (unsigned int i = 0 ; i < s.objects().size() ; i++) {
	objects[i] = dynamic_cast<CbcObject *> (s.objects()[i]);
	assert(objects[i]);
	objects[i]->setModel(&model_);
      }
      model_.addObjects ((int) s.objects().size(), objects);
      delete [] objects;
    }

    replaceIntegers(model_.objects(), model_.numberObjects());

  } else { // Pass in objects to Cbc

    // Redundant definition of default branching (as Default == User)
    assert (s.branchingMethod() != NULL);

    // Add nonlinear and integer objects (need to add OsiSOS)
    model_.addObjects (s.continuousSolver () -> numberObjects (), s.continuousSolver () -> objects ());

    // Now model_ has only CouenneObjects and SOS objects

    // for (int i=0; i<nco; i++) 
    //   if (!(dynamic_cast <CbcSimpleInteger *> (s.continuousSolver () -> objects () [i])))
    // 	model_ . objects () [nRealObj++] = s.continuousSolver () -> objects () [i] -> clone ();

    CbcBranchDefaultDecision branch;
    s.branchingMethod()->setSolver(model_.solver());
    BonChooseVariable * strong2 = dynamic_cast<BonChooseVariable *>(s.branchingMethod());
    if (strong2)
      strong2->setCbcModel(&model_);
    branch.setChooseMethod(*s.branchingMethod());

    model_.setBranchingMethod(&branch);
    // prevent duplicating object when copying in CbcModel.cpp
    model_.solver()->deleteObjects();
  }

  model_.setDblParam(CbcModel::CbcCutoffIncrement, s.getDoubleParameter(Bonmin::BabSetupBase::CutoffDecr));

  model_.setCutoff(s.getDoubleParameter(Bonmin::BabSetupBase::Cutoff) + CUTOFF_TOL);

  model_.setDblParam(CbcModel::CbcAllowableGap, s.getDoubleParameter(Bonmin::BabSetupBase::AllowableGap));
  model_.setDblParam(CbcModel::CbcAllowableFractionGap, s.getDoubleParameter(Bonmin::BabSetupBase::AllowableFractionGap));

  // Definition of node selection strategy

  if (s.nodeComparisonMethod()==Bonmin::BabSetupBase::bestBound) {
    CbcCompareObjective compare;
    model_.setNodeComparison(compare);
  }
  else if (s.nodeComparisonMethod()==Bonmin::BabSetupBase::DFS) {
    CbcCompareDepth compare;
    model_.setNodeComparison(compare);
  }
  else if (s.nodeComparisonMethod()==Bonmin::BabSetupBase::BFS) {
    CbcCompareDefault compare;
    compare.setWeight(0.0);
    model_.setNodeComparison(compare);
  }
  else if (s.nodeComparisonMethod()==Bonmin::BabSetupBase::dynamic) {
    CbcCompareDefault compare;
    model_.setNodeComparison(compare);
  }
  else if (s.nodeComparisonMethod()==Bonmin::BabSetupBase::bestGuess) {
    // Right now, this is a mess.  We need a separation of the
    // pseudo costs from the ChooseVariable method
    CbcCompareEstimate compare;
    model_.setNodeComparison(compare);
    GuessHeuristic * guessHeu = new GuessHeuristic(model_);
    model_.addHeuristic(guessHeu);
    delete guessHeu;
  }

  if (s.treeTraversalMethod() == Bonmin::BabSetupBase::HeapOnly) {
    //Do nothing this is the default of Cbc.
  }
  else if (s.treeTraversalMethod() == Bonmin::BabSetupBase::DiveFromBest) {
    CbcDiver treeTraversal;
    treeTraversal.initialize(s);
    model_.passInTreeHandler(treeTraversal);
  }
  else if (s.treeTraversalMethod() == Bonmin::BabSetupBase::ProbedDive) {
    CbcProbedDiver treeTraversal;
    treeTraversal.initialize(s);
    model_.passInTreeHandler(treeTraversal);
  }
  else if (s.treeTraversalMethod() == Bonmin::BabSetupBase::DfsDiveFromBest) {
    CbcDfsDiver treeTraversal;
    treeTraversal.initialize(s);
    model_.passInTreeHandler(treeTraversal);
  }
  else if (s.treeTraversalMethod() == Bonmin::BabSetupBase::DfsDiveDynamic) {
    CbcDfsDiver treeTraversal;
    treeTraversal.initialize(s);
    model_.passInTreeHandler(treeTraversal);

    DiverCompare compare;
    compare.setComparisonDive(*model_.nodeComparison());
    compare.setComparisonBound(CbcCompareObjective());
    CbcDfsDiver * dfs = dynamic_cast<CbcDfsDiver *> (model_.tree());
    assert(dfs);
    compare.setDiver(dfs);
    model_.setNodeComparison(compare);
  }

  model_.setNumberStrong(s.getIntParameter(Bonmin::BabSetupBase::NumberStrong));
  model_.setNumberBeforeTrust(s.getIntParameter(Bonmin::BabSetupBase::MinReliability));
  model_.setNumberPenalties(8);

  model_.setDblParam(CbcModel::CbcMaximumSeconds, s.getDoubleParameter(Bonmin::BabSetupBase::MaxTime));

  model_.setMaximumNodes(s.getIntParameter(Bonmin::BabSetupBase::MaxNodes));

  model_.setMaximumNumberIterations(s.getIntParameter(Bonmin::BabSetupBase::MaxIterations));

  model_.setMaximumSolutions(s.getIntParameter(Bonmin::BabSetupBase::MaxSolutions));

  model_.setIntegerTolerance(s.getDoubleParameter(Bonmin::BabSetupBase::IntTol));

  //Get objects from model_ if it is not null means there are some sos constraints or non-integer branching object
  // pass them to cut generators.
  OsiObject ** objects = model_.objects();

  if (specOpt!=16 && objects) {

    int numberObjects = model_.numberObjects();
    if (objects_ != NULL) {
      for (int i = 0 ; i < nObjects_; i++)
  	delete objects_[i];
    }
    delete [] objects_;
    objects_ = new OsiObject*[numberObjects];
    nObjects_ = numberObjects;
    for (int i = 0 ; i < numberObjects; i++) {
      OsiObject * obj = objects[i];
      CbcSimpleInteger * intObj = dynamic_cast<CbcSimpleInteger *> (obj);
      if (intObj) {
  	objects_[i] = intObj->osiObject();
      }
      else {
  	CbcSOS * sosObj = dynamic_cast<CbcSOS *>(obj);
  	if (sosObj) objects_[i] = sosObj->osiObject(model_.solver());
  	else {//Maybe an unsupported CbcObject
  	  CbcObject * cbcObj = dynamic_cast<CbcObject *>(obj);
  	  if (cbcObj) {
  	    std::cerr<<"Unsupported CbcObject appears in the code"<<std::endl;
  	    throw UNSUPPORTED_CBC_OBJECT;
  	  }
  	  else {//It has to be an OsiObject.
  	    objects_[i]=obj->clone();
  	  }
  	}
      }
    }
    CbcCutGenerator ** gen = model_.cutGenerators();
    int numGen = model_.numberCutGenerators();
    for (int i = 0 ; i < numGen ; i++) {
      Bonmin::OaDecompositionBase * oa = dynamic_cast<Bonmin::OaDecompositionBase * >(gen[i]->generator());
      // if (oa)
      // 	printf ("\n\n\nat least one OADecompBase\n\n\n");
      if (oa) // pass objects
  	oa->setObjects(objects_,nObjects_);
    }
  }

  // if (objects_) {

  //   for (int i = 0 ; i < nObjects_; i++)
  //     delete objects_ [i];

  //   delete [] objects_;
  // }

  // OsiObject ** objects = model_.objects();
  // int numObjects = model_.numberObjects();

  // nObjects_ = 0;
  // objects_ = new OsiObject* [numObjects];

  // for (int i=0; i < numObjects; ++i)
  //   if (objects [i])
  //     objects_ [nObjects_++] = objects [i] -> clone ();

  try {

    //Get the time and start.

    {
      OsiTMINLPInterface * tmpOsi = NULL;
      if(s.nonlinearSolver() == s.continuousSolver()){
        tmpOsi = dynamic_cast<OsiTMINLPInterface *> (model_.solver());
        tmpOsi->forceSolverOutput(s.getIntParameter(Bonmin::BabSetupBase::RootLogLevel)); 
      }

      model_.initialSolve();

      if(tmpOsi != NULL){
        tmpOsi->setSolverOutputToDefault(); 
      }
    }

    int ival;

    s.options()->GetEnumValue("enable_dynamic_nlp", ival, "bonmin.");

    if(s.nonlinearSolver() == s.continuousSolver() && ival) {

	if(!model_.solver()->isProvenOptimal() ){//Something went wrong check if objective is linear and alternate model
	  // can be solved
	  OsiTMINLPInterface * tmpOsi = dynamic_cast<OsiTMINLPInterface *> (model_.solver());
	  TMINLPLinObj * tmp_tminlp = dynamic_cast<TMINLPLinObj *> (tmpOsi->model());
	  tmpOsi->setModel(tmp_tminlp->tminlp());
	  model_.initialSolve();
	} 
	else {
	  LinearCutsGenerator cgl;
	  cgl.initialize(s); 
	  OsiCuts cuts;
	  cgl.generateCuts(*model_.solver(), cuts);
	  std::vector<const OsiRowCut *> mycuts(cuts.sizeRowCuts());
	  for(int i = 0 ; i < cuts.sizeRowCuts() ; i++){
	    mycuts[i] = cuts.rowCutPtr(i);
	  }
	  model_. solver () -> applyRowCuts ((int) mycuts.size(), (const OsiRowCut **) &mycuts[0]);
	}

	//Added by Claudia
	OsiTMINLPInterface * nlpSolver = dynamic_cast<OsiTMINLPInterface *>(model_.solver());
	if(nlpSolver && nlpSolver->getNewCutoffDecr()!=COIN_DBL_MAX)
          model_.setDblParam(CbcModel::CbcCutoffIncrement, nlpSolver->getNewCutoffDecr());

	model_.solver()->resolve();
      }

    // for Couenne
    model_.passInSolverCharacteristics (bonBabInfoPtr);

    continuousRelaxation_ =model_.solver()->getObjValue();
    if (specOpt==16)//Set warm start point for Ipopt
      {
#if 1
	const double * colsol = model_.solver()->getColSolution();
	const double * duals = model_.solver()->getRowPrice();

	OsiTMINLPInterface * tnlpSolver = dynamic_cast<OsiTMINLPInterface *>(model_.solver());
	// Primal dual point is not copied if one (supposedly a better one) has already been put into the solver.
	if(tnlpSolver->problem()->has_x_init() != 2){
	  model_.solver()->setColSolution(colsol);
	  model_.solver()->setRowPrice(duals);
	}
#else
	OsiTMINLPInterface * tnlpSolver = dynamic_cast<OsiTMINLPInterface *>(model_.solver());
	CoinWarmStart * warm = tnlpSolver->solver()->getWarmStart(tnlpSolver->problem());
	tnlpSolver->solver()->setWarmStart(warm, tnlpSolver->problem());
	delete warm;
#endif

#if 0 // Sometimes primal dual point is problematic in the context of Cut-and-branch
	model_.solver()->resolve();
	if(!model_.solver()->isProvenOptimal())
	  model_.solver()->setColSolution(NULL);
#endif 
      }

#ifdef SIGNAL
    CoinSighandler_t saveSignal = SIG_DFL;
    // register signal handler
    saveSignal = signal (SIGINT,couenne_signal_handler);
    currentBranchModel = &model_;
#endif


    // to get node parent info in Cbc, pass parameter 3.
    //model_.branchAndBound(3);
    remaining_time -= CoinCpuTime();
    model_.setDblParam(CbcModel::CbcMaximumSeconds, remaining_time);
    if(remaining_time > 0.)
      model_.branchAndBound();
  }

  catch(TNLPSolver::UnsolvedError *E){
    s.nonlinearSolver()->model()->finalize_solution
      (TMINLP::MINLP_ERROR, 0, NULL, DBL_MAX);
    throw E;
  }

  numNodes_ = model_.getNodeCount();
  bestObj_ = model_.getObjValue();
  bestBound_ = model_.getBestPossibleObjValue();
  mipIterationCount_ = model_.getIterationCount();

  bool hasFailed = false;
  if (specOpt==16)//Did we continue branching on a failure
    {
      CbcNlpStrategy * nlpStrategy = dynamic_cast<CbcNlpStrategy *>(model_.strategy());
      if (nlpStrategy)
        hasFailed = nlpStrategy->hasFailed();
      else
        throw -1;
    }
  else
    hasFailed = s.nonlinearSolver()->hasContinuedOnAFailure();

  // Output summarizing cut generators (taken from CbcSolver.cpp)
  // ToDo put into proper print level

  int numberGenerators = model_.numberCutGenerators();
  for (int iGenerator=0;iGenerator<numberGenerators;iGenerator++) {
    CbcCutGenerator * generator = model_.cutGenerator(iGenerator);
    //CglStored * stored = dynamic_cast<CglStored*>(generator->generator());
    if (true&&!(generator->numberCutsInTotal() || generator->numberColumnCuts()))
      continue;
    if(modelHandler_->logLevel() >= 1) {
      *modelHandler_ << generator->cutGeneratorName()
		     << "was tried" << generator->numberTimesEntered()
		     << "times and created" << generator->numberCutsInTotal()+generator->numberColumnCuts()
		     << "cuts of which" << generator->numberCutsActive()
		     << "were active after adding rounds of cuts";
      // if (generator->timing()) {
      // 	char timebuf[20];
      // 	sprintf(timebuf, "(%.3fs)", generator->timeInCutGenerator());
      // 	*modelHandler_ << timebuf << CoinMessageEol;
      // }
      // else {
      // 	*modelHandler_ << CoinMessageEol;
      // }
    }
  }

  TMINLP::SolverReturn status = TMINLP::MINLP_ERROR;

  if (model_.numberObjects()==0) {
    if (bestSolution_)
      delete [] bestSolution_;
    OsiSolverInterface * solver = 
      (s.nonlinearSolver() == s.continuousSolver())? 
      model_.solver() : s.nonlinearSolver();
    bestSolution_ = new double[solver->getNumCols()];
    CoinCopyN(solver->getColSolution(), solver->getNumCols(),
	      bestSolution_);
    bestObj_ = bestBound_ = solver->getObjValue();
  }

  if (bonBabInfoPtr->bestSolution2().size() > 0) {
    assert((int) bonBabInfoPtr->bestSolution2().size() == s.nonlinearSolver()->getNumCols());
    if (bestSolution_)
      delete [] bestSolution_;
    bestSolution_ = new double[s.nonlinearSolver()->getNumCols()];
    std::copy(bonBabInfoPtr->bestSolution2().begin(), bonBabInfoPtr->bestSolution2().end(),
	      bestSolution_);
    bestObj_ = (bonBabInfoPtr->bestObj2());
    (*s.nonlinearSolver()->messageHandler())<<"\nReal objective function: "
                                            <<bestObj_<<CoinMessageEol;
  }
  else if (model_.bestSolution()) {
    if (bestSolution_)
      delete [] bestSolution_;
    bestSolution_ = new double[s.nonlinearSolver()->getNumCols()];
    CoinCopyN(model_.bestSolution(), s.nonlinearSolver()->getNumCols(), bestSolution_);
  }
  if(remaining_time <= 0.){
    status = TMINLP::LIMIT_EXCEEDED;
    if (bestSolution_) {
      mipStatus_ = Feasible;
    }
    else {
      mipStatus_ = NoSolutionKnown;
    }
  }
  else if (model_.status() == 0) {
    if(model_.isContinuousUnbounded()){
      status = TMINLP::CONTINUOUS_UNBOUNDED;
      mipStatus_ = UnboundedOrInfeasible;
    }
    else
      if (bestSolution_) {
        status = TMINLP::SUCCESS;
        mipStatus_ = FeasibleOptimal;
      }
      else {
        status = TMINLP::INFEASIBLE;
        mipStatus_ = ProvenInfeasible;
      }
  }
  else if (model_.status() == 1 || model_.status() == 5) {
#if (BONMIN_VERSION_MAJOR > 1) || (BONMIN_VERSION_MINOR > 6)
    status = model_.status() == 1 ? TMINLP::LIMIT_EXCEEDED : TMINLP::USER_INTERRUPT;
#else
    status = TMINLP::LIMIT_EXCEEDED;
#endif
    if (bestSolution_) {
      mipStatus_ = Feasible;
    }
    else {
      mipStatus_ = NoSolutionKnown;
    }
  }
  else if (model_.status()==2) {
    status = TMINLP::MINLP_ERROR;
  }

  // Which solution should we use? false if RBS's, true if Cbc's
  bool use_RBS_Cbc = 
    !problem_ ||
    !(problem_ -> getRecordBestSol ()) ||
    !(problem_ -> getRecordBestSol () -> getHasSol()) ||
    (((fabs (bestObj_) < COUENNE_INFINITY / 1e4) && 
      (problem_ -> getRecordBestSol () -> getVal () > bestObj_)));

  /* if we do not pass the cbc solution and problem_ -> getRecordBestSol () -> getHasSol() is true, then there should be a solution vector in problem_ -> getRecordBestSol () */
  assert(use_RBS_Cbc || problem_ -> getRecordBestSol () -> getSol() != NULL);

  s.nonlinearSolver () -> model () -> finalize_solution 
    (status,
     s.nonlinearSolver () -> getNumCols (),
     use_RBS_Cbc ? bestSolution_ : problem_ -> getRecordBestSol () -> getSol (),
     use_RBS_Cbc ? bestObj_      : problem_ -> getRecordBestSol () -> getVal ());
}
コード例 #5
0
/** Returns a feasible solution to the MINLP 
 * The heuristic constructs a MIP based approximating all univariate functions appearing in nonlinear constraints 
 * The linear approximation is obtained by adding inner chords linking pairs of points until covering the range of each variable **/
int
HeuristicInnerApproximation::solution(double &solutionValue, double *betterSolution)
{
if(model_->getNodeCount() || model_->getCurrentPassNumber() > 1) return 0;
if ((model_->getNodeCount()%howOften_)!=0||model_->getCurrentPassNumber()>1)
return 0;

int returnCode = 0; // 0 means it didn't find a feasible solution

OsiTMINLPInterface * nlp = NULL;
if(setup_->getAlgorithm() == B_BB)
nlp = dynamic_cast<OsiTMINLPInterface *>(model_->solver()->clone());
else
nlp = dynamic_cast<OsiTMINLPInterface *>(setup_->nonlinearSolver()->clone());

TMINLP2TNLP* minlp = nlp->problem();
// set tolerances
double integerTolerance = model_->getDblParam(CbcModel::CbcIntegerTolerance);

int numberColumns;
int numberRows;
int nnz_jac_g;
int nnz_h_lag;
Ipopt::TNLP::IndexStyleEnum index_style;
minlp->get_nlp_info(numberColumns, numberRows, nnz_jac_g,
    nnz_h_lag, index_style);

const Bonmin::TMINLP::VariableType* variableType = minlp->var_types();

const double* x_sol = minlp->x_sol();

double* newSolution = new double [numberColumns];
memcpy(newSolution,x_sol,numberColumns*sizeof(double));
double* new_g_sol = new double [numberRows];

bool feasible = true;
// load the problem to OSI
#ifdef DEBUG_BON_HEURISTIC
cout << "Loading the problem to OSI\n";
#endif
OsiSolverInterface *si = mip_->solver(); // the MIP solver

bool delete_si = false;
if(si == NULL) {
  si = new OsiClpSolverInterface;
  mip_->setLpSolver(si);
  delete_si = true;
}
CoinMessageHandler * handler = model_->messageHandler()->clone();
si->passInMessageHandler(handler);
si->messageHandler()->setLogLevel(2);
#ifdef DEBUG_BON_HEURISTIC
cout << "Loading problem into si\n";
#endif
extractInnerApproximation(*nlp, *si, newSolution, true); // Call the function construncting the inner approximation description 
#ifdef DEBUG_BON_HEURISTIC
cout << "problem loaded\n";
cout << "**** Running optimization ****\n";
#endif
mip_->optimize(DBL_MAX, 2, 180); // Optimize the MIP
#ifdef DEBUG_BON_HEURISTIC
cout << "Optimization finished\n";
#endif
if(mip_->getLastSolution()) { // if the MIP solver returns a feasible solution
  const double* solution = mip_->getLastSolution();
  for (size_t iLCol=0;iLCol<numberColumns;iLCol++) {
    newSolution[iLCol] = solution[iLCol];
  }
}
else
feasible = false;

if(delete_si) {
  delete si;
}
delete handler;

const double* x_l = minlp->x_l();
const double* x_u = minlp->x_u();
const double* g_l = minlp->g_l();
const double* g_u = minlp->g_u();
double primalTolerance = 1.0e-6;
#if 1
if(feasible ) {

  std::vector<double> memLow(numberColumns);
  std::vector<double> memUpp(numberColumns);
  std::copy(minlp->x_l(), minlp->x_l() + numberColumns, memLow.begin());
  std::copy(minlp->x_u(), minlp->x_u() + numberColumns, memUpp.begin());
  // fix the integer variables and solve the NLP
  for (int iColumn=0;iColumn<numberColumns;iColumn++) {
        if (variableType[iColumn] != Bonmin::TMINLP::CONTINUOUS) {
          double value=floor(newSolution[iColumn]+0.5);
          minlp->SetVariableUpperBound(iColumn, value);
          minlp->SetVariableLowerBound(iColumn, value);
        }
  }
  if(feasible) {
    nlp->initialSolve();
    if(minlp->optimization_status() != Ipopt::SUCCESS) {
      feasible = false;
    }
    memcpy(newSolution,minlp->x_sol(),numberColumns*sizeof(double));
  }

 
  for (int iColumn=0;iColumn<numberColumns;iColumn++) {
    if (variableType[iColumn] != Bonmin::TMINLP::CONTINUOUS) {
        minlp->SetVariableUpperBound(iColumn, memUpp[iColumn]);
        minlp->SetVariableLowerBound(iColumn, memLow[iColumn]);
    }
  }
}
#endif
#endif

if(feasible) {
  double newSolutionValue;
  minlp->eval_f(numberColumns, newSolution, true, newSolutionValue);
  if(newSolutionValue < solutionValue) {
    memcpy(betterSolution,newSolution,numberColumns*sizeof(double));
    solutionValue = newSolutionValue;
    returnCode = 1;
  }
}

delete [] newSolution;
delete [] new_g_sol;

delete nlp;

#ifdef DEBUG_BON_HEURISTIC
std::cout<<"Inner approximation returnCode = "<<returnCode<<std::endl;
#endif
return returnCode;
}
コード例 #6
0
void 
HeuristicInnerApproximation::extractInnerApproximation(OsiTMINLPInterface & nlp, OsiSolverInterface &si,
                                                       const double * x, bool getObj) {
   int n;
   int m;
   int nnz_jac_g;
   int nnz_h_lag;
   Ipopt::TNLP::IndexStyleEnum index_style;
   TMINLP2TNLP * problem = nlp.problem(); 
   //Get problem information
   problem->get_nlp_info(n, m, nnz_jac_g, nnz_h_lag, index_style);
   
   vector<int> jRow(nnz_jac_g);
   vector<int> jCol(nnz_jac_g);
   vector<double> jValues(nnz_jac_g);
   problem->eval_jac_g(n, NULL, 0, m, nnz_jac_g, jRow(), jCol(), NULL);
   if(index_style == Ipopt::TNLP::FORTRAN_STYLE)//put C-style
   {
     for(int i = 0 ; i < nnz_jac_g ; i++){
       jRow[i]--;
       jCol[i]--;
     }
   }
   
   //get Jacobian
   problem->eval_jac_g(n, x, 1, m, nnz_jac_g, NULL, NULL,
       jValues());
   
   vector<double> g(m);
   problem->eval_g(n, x, 1, m, g());
   
   vector<int> nonLinear(m);
   //store non linear constraints (which are to be removed from IA)
   int numNonLinear = 0;
   const double * rowLower = nlp.getRowLower();
   const double * rowUpper = nlp.getRowUpper();
   const double * colLower = nlp.getColLower();
   const double * colUpper = nlp.getColUpper();
   assert(m == nlp.getNumRows());
   double infty = si.getInfinity();
   double nlp_infty = nlp.getInfinity();
   vector<Ipopt::TNLP::LinearityType>  constTypes(m);
   problem->get_constraints_linearity(m, constTypes());
   for (int i = 0; i < m; i++) {
     if (constTypes[i] == Ipopt::TNLP::NON_LINEAR) {
       nonLinear[numNonLinear++] = i;
     }
   }
   vector<double> rowLow(m - numNonLinear);
   vector<double> rowUp(m - numNonLinear);
   int ind = 0;
   for (int i = 0; i < m; i++) {
     if (constTypes[i] != Ipopt::TNLP::NON_LINEAR) {
       if (rowLower[i] > -nlp_infty) {
         //   printf("Lower %g ", rowLower[i]);
         rowLow[ind] = (rowLower[i]);
       } else
         rowLow[ind] = -infty;
       if (rowUpper[i] < nlp_infty) {
         //   printf("Upper %g ", rowUpper[i]);
         rowUp[ind] = (rowUpper[i]);
       } else
         rowUp[ind] = infty;
       ind++;
     }
   
   }
   
   CoinPackedMatrix mat(true, jRow(), jCol(), jValues(), nnz_jac_g);
   mat.setDimensions(m, n); // In case matrix was empty, this should be enough
   
   //remove non-linear constraints
   mat.deleteRows(numNonLinear, nonLinear());
   
   int numcols = nlp.getNumCols();
   vector<double> obj(numcols);
   for (int i = 0; i < numcols; i++)
     obj[i] = 0.;
   
   si.loadProblem(mat, nlp.getColLower(), nlp.getColUpper(), 
                  obj(), rowLow(), rowUp());
   const Bonmin::TMINLP::VariableType* variableType = problem->var_types();
   for (int i = 0; i < n; i++) {
     if ((variableType[i] == TMINLP::BINARY) || (variableType[i]
         == TMINLP::INTEGER))
       si.setInteger(i);
   }
   if (getObj) {
     bool addObjVar = false;
     if (problem->hasLinearObjective()) {
       double zero;
       vector<double> x0(n, 0.);
       problem->eval_f(n, x0(), 1, zero);
       si.setDblParam(OsiObjOffset, -zero);
       //Copy the linear objective and don't create a dummy variable.
       problem->eval_grad_f(n, x, 1, obj());
       si.setObjective(obj());
     } else {
       addObjVar = true;
     }
   
     if (addObjVar) {
       nlp.addObjectiveFunction(si, x);
     }
   }
   
   // Hassan IA initial description
   int InnerDesc = 1;
   if (InnerDesc == 1) {
     OsiCuts cs;
   
     double * p = CoinCopyOfArray(colLower, n);
     double * pp = CoinCopyOfArray(colLower, n);
     double * up = CoinCopyOfArray(colUpper, n);
   
     const int& nbAp = nbAp_;
     std::vector<int> nbG(m, 0);// Number of generated points for each nonlinear constraint
   
     std::vector<double> step(n);
   
     for (int i = 0; i < n; i++) {
   
       if (colUpper[i] > 1e08) {
         up[i] = 0;
       }
   
       if (colUpper[i] > 1e08 || colLower[i] < -1e08 || (variableType[i]
           == TMINLP::BINARY) || (variableType[i] == TMINLP::INTEGER)) {
         step[i] = 0;
       } else
         step[i] = (up[i] - colLower[i]) / (nbAp);
   
       if (colLower[i] < -1e08) {
         p[i] = 0;
         pp[i] = 0;
       }
   
     }
     vector<double> g_p(m);
     vector<double> g_pp(m);
   
     for (int j = 1; j <= nbAp; j++) {
   
       for (int i = 0; i < n; i++) {
         pp[i] += step[i];
       }
   
       problem->eval_g(n, p, 1, m, g_p());
       problem->eval_g(n, pp, 1, m, g_pp());
       double diff = 0;
       int varInd = 0;
       for (int i = 0; (i < m && constTypes[i] == Ipopt::TNLP::NON_LINEAR); i++) {
         if (varInd == n - 1)
           varInd = 0;
         diff = std::abs(g_p[i] - g_pp[i]);
         if (nbG[i] < nbAp - 1) {
           getMyInnerApproximation(nlp, cs, i, p, pp);// Generate a chord connecting the two points
           p[varInd] = pp[varInd];
           nbG[i]++;
         }
         varInd++;
       }
     }
   
     for(int i = 0; (i< m && constTypes[i] == Ipopt::TNLP::NON_LINEAR); i++) {
      //  getConstraintOuterApproximation(cs, i, colUpper, NULL, true);// Generate Tangents at current point
         getMyInnerApproximation(nlp, cs, i, p, up);// Generate a chord connecting the two points
     }

        delete [] p; 
        delete [] pp;
        delete [] up; 
     si.applyCuts(cs);
   }
  }
コード例 #7
0
/** Get an inner-approximation constraint obtained by drawing a chord linking the two given points x and x2. 
 * This only applies to nonlinear constraints featuring univariate functions (f(x) <= y).**/
bool
HeuristicInnerApproximation::getMyInnerApproximation(OsiTMINLPInterface &si, OsiCuts &cs, int ind,
    const double * x, const double * x2) {

  int n, m, nnz_jac_g, nnz_h_lag;
  Ipopt::TNLP::IndexStyleEnum index_style;
        TMINLP2TNLP * problem = si.problem(); 
  problem->get_nlp_info(n, m, nnz_jac_g, nnz_h_lag, index_style);


  CoinPackedVector cut;
  double lb = 0;
  double ub = 0;

  double infty = si.getInfinity();

  lb = -infty; // we only compute <= constraints

  double g = 0;
  double g2 = 0;
  double diff = 0;
  double a = 0;
  problem->eval_gi(n, x, 1, ind, g);
  problem->eval_gi(n, x2, 1, ind, g2);
  vector<int> jCol(n);
  int nnz;
  problem->eval_grad_gi(n, x2, 0, ind, nnz, jCol(), NULL);
  vector<double> jValues(nnz);
  problem->eval_grad_gi(n, x2, 0, ind, nnz, NULL, jValues());
  bool add = false;
  for (int i = 0; i < nnz; i++) {
    const int &colIdx = jCol[i];
    if(index_style == Ipopt::TNLP::FORTRAN_STYLE) jCol[i]--;
    diff = x[colIdx] - x2[colIdx];

    if (fabs(diff) >= 1e-8) {
                   a = (g - g2) / diff;
                   cut.insert(colIdx, a);
                   ub = a * x[colIdx] - g;
                   add = true;
    } else
                  cut.insert(colIdx, jValues[i]);
  }

  if (add) {

    OsiRowCut newCut;
    newCut.setGloballyValidAsInteger(1);
    newCut.setLb(lb);
    
      //********* Perspective Extension ********//
    int* ids = problem->get_const_xtra_id(); // vector of indices corresponding to the binary variable activating the corresponding constraint
    int binary_id = (ids == NULL) ? -1 : ids[ind]; // Get the index of the corresponding indicator binary variable
    if(binary_id>0) {// If this hyperplane is a linearization of a disjunctive constraint, we link its righthand side to the corresponding indicator binary variable
        cut.insert(binary_id, -ub); // ∂x ≤ ub => ∂x - ub*z ≤ 0
        newCut.setUb(0);
    }
    else
        newCut.setUb(ub);
    //********* Perspective Extension ********//

    newCut.setRow(cut);
    cs.insert(newCut);
    return true;
  }
  return false;
}
コード例 #8
0
ファイル: BonMilpRounding.cpp プロジェクト: coin-or/Bonmin
  int
  MilpRounding::solution(double &solutionValue, double *betterSolution)
  {
    if(model_->getCurrentPassNumber() > 1) return 0;
    if (model_->currentDepth() > 2 && (model_->getNodeCount()%howOften_)!=0)
      return 0;
 
    int returnCode = 0; // 0 means it didn't find a feasible solution

    OsiTMINLPInterface * nlp = NULL;
    if(setup_->getAlgorithm() == B_BB)
      nlp = dynamic_cast<OsiTMINLPInterface *>(model_->solver()->clone());
    else
      nlp = dynamic_cast<OsiTMINLPInterface *>(setup_->nonlinearSolver()->clone());

    TMINLP2TNLP* minlp = nlp->problem();
 
    // set tolerances
    double integerTolerance = model_->getDblParam(CbcModel::CbcIntegerTolerance);
    //double primalTolerance = 1.0e-6;

    int n;
    int m;
    int nnz_jac_g;
    int nnz_h_lag;
    Ipopt::TNLP::IndexStyleEnum index_style;
    minlp->get_nlp_info(n, m, nnz_jac_g,
			nnz_h_lag, index_style);

    const Bonmin::TMINLP::VariableType* variableType = minlp->var_types();
    const double* x_sol = minlp->x_sol();
    const double* g_l = minlp->g_l();
    const double* g_u = minlp->g_u();

    const double * colsol = model_->solver()->getColSolution();


    // Get information about the linear and nonlinear part of the instance
    TMINLP* tminlp = nlp->model();
    vector<Ipopt::TNLP::LinearityType> c_lin(m);
    tminlp->get_constraints_linearity(m, c_lin());
    vector<int> c_idx(m);
    int n_lin = 0;
    for (int i=0;i<m;i++) {
      if (c_lin[i]==Ipopt::TNLP::LINEAR)
	c_idx[i] = n_lin++;
      else
	c_idx[i] = -1;
    }


    // Get the structure of the jacobian
    vector<int> indexRow(nnz_jac_g);
    vector<int> indexCol(nnz_jac_g);
    minlp->eval_jac_g(n, x_sol, false,
		      m, nnz_jac_g,
		      indexRow(), indexCol(), 0);

    // get the jacobian values 
    vector<double> jac_g(nnz_jac_g);
    minlp->eval_jac_g(n, x_sol, false,
                      m, nnz_jac_g,
                      NULL, NULL, jac_g());

    // Sort the matrix to column ordered
    vector<int> sortedIndex(nnz_jac_g);
    CoinIotaN(sortedIndex(), nnz_jac_g, 0);
    MatComp c;
    c.iRow = indexRow();
    c.jCol = indexCol();
    std::sort(sortedIndex.begin(), sortedIndex.end(), c);

    vector<int> row (nnz_jac_g);
    vector<double> value (nnz_jac_g);
    vector<int> columnStart(n,0); 
    vector<int> columnLength(n,0);
    int indexCorrection = (index_style == Ipopt::TNLP::C_STYLE) ? 0 : 1;
    int iniCol = -1;
    int nnz = 0;
    for(int i=0; i<nnz_jac_g; i++) {
      int thisIndexCol = indexCol[sortedIndex[i]]-indexCorrection;
      int thisIndexRow = c_idx[indexRow[sortedIndex[i]] - indexCorrection];
      if(thisIndexCol != iniCol) {
	iniCol = thisIndexCol;
	columnStart[thisIndexCol] = nnz;
	columnLength[thisIndexCol] = 0;
      }
      if(thisIndexRow == -1) continue;
      columnLength[thisIndexCol]++;
      row[nnz] = thisIndexRow;
      value[nnz] = jac_g[i];
      nnz++;
    }

    // Build the row lower and upper bounds
    vector<double> newRowLower(n_lin);
    vector<double> newRowUpper(n_lin);
    for(int i = 0 ; i < m ; i++){
      if(c_idx[i] == -1) continue;
      newRowLower[c_idx[i]] = g_l[i];
      newRowUpper[c_idx[i]] = g_u[i];
    }

    // Get solution array for heuristic solution
    vector<double> newSolution(n);
    std::copy(x_sol, x_sol + n, newSolution.begin());

    // Define the constraint matrix for MILP
    CoinPackedMatrix matrix(true,n_lin,n, nnz, value(), row(), columnStart(), columnLength());

      // create objective function and columns lower and upper bounds for MILP
      // and create columns for matrix in MILP
      //double alpha = 0;
      double beta = 1;
      vector<double> objective(n);
      vector<int> idxIntegers;
      idxIntegers.reserve(n);
      for(int i = 0 ; i < n ; i++){
         if(variableType[i] != Bonmin::TMINLP::CONTINUOUS){
            idxIntegers.push_back(i);
            objective[i] = beta*(1 - 2*colsol[i]);
         }
      }

#if 0
      // Get dual multipliers and build gradient of the lagrangean
      const double * duals = nlp->getRowPrice() + 2 *n;
      vector<double> grad(n, 0); 
      vector<int> indices(n, 0);
      tminlp->eval_grad_f(n, x_sol, false, grad());
      for(int i = 0 ; i < m ; i++){
        if(c_lin[i] == Ipopt::TNLP::LINEAR) continue;
        int nnz;
        tminlp->eval_grad_gi(n, x_sol, false, i, nnz, indices(), NULL);  
        tminlp->eval_grad_gi(n, x_sol, false, i, nnz, NULL, grad());
        for(int k = 0 ; k < nnz ; k++){
          objective[indices[k]] += alpha *duals[i] * grad[k];
        } 
      }
      for(int i = 0 ; i < n ; i++){
         if(variableType[i] != Bonmin::TMINLP::CONTINUOUS)
         objective[i] += alpha * grad[i];
         //if(fabs(objective[i]) < 1e-4) objective[i] = 0;
         else objective[i] = 0;
      }
      std::copy(objective.begin(), objective.end(), std::ostream_iterator<double>(std::cout, " "));
      std::cout<<std::endl;
#endif

      // load the problem to OSI
      OsiSolverInterface *si = mip_->solver();
      assert(si != NULL);
      CoinMessageHandler * handler = model_->messageHandler()->clone();
      si->passInMessageHandler(handler);
      si->messageHandler()->setLogLevel(1);

      si->loadProblem(matrix, model_->solver()->getColLower(), model_->solver()->getColUpper(), objective(), 
                      newRowLower(), newRowUpper());
      si->setInteger(idxIntegers(), static_cast<int>(idxIntegers.size()));
      si->applyCuts(noGoods);

      bool hasFractionnal = true;
      while(hasFractionnal){
        mip_->optimize(DBL_MAX, 0, 60);
        hasFractionnal = false;
#if 0
        bool feasible = false;
        if(mip_->getLastSolution()) {
  	const double* solution = mip_->getLastSolution();
          std::copy(solution, solution + n, newSolution.begin());
  	feasible = true;
  
        }

    if(feasible) {
      // fix the integer variables and solve the NLP
      // also add no good cut
      CoinPackedVector v;
      double lb = 1;
      for (int iColumn=0;iColumn<n;iColumn++) {
	if (variableType[iColumn] != Bonmin::TMINLP::CONTINUOUS) {
	  double value=newSolution[iColumn];
	  if (fabs(floor(value+0.5)-value)>integerTolerance) {
#ifdef DEBUG_BON_HEURISTIC_DIVE_MIP
	    cout<<"It should be infeasible because: "<<endl;
	    cout<<"variable "<<iColumn<<" is not integer"<<endl;
#endif
	    feasible = false;
	    break;
	  }
	  else {
	    value=floor(newSolution[iColumn]+0.5);
            if(value > 0.5){
              v.insert(iColumn, -1);
              lb -= value;
            }
	    minlp->SetVariableUpperBound(iColumn, value);
	    minlp->SetVariableLowerBound(iColumn, value);
	  }
	}
      }
      }
#endif
      }
      bool feasible = false;
      if(mip_->getLastSolution()) {
	const double* solution = mip_->getLastSolution();
        std::copy(solution, solution + n, newSolution.begin());
	feasible = true;

        delete handler;
      }
      

    if(feasible) {
      // fix the integer variables and solve the NLP
      // also add no good cut
      CoinPackedVector v;
      double lb = 1;
      for (int iColumn=0;iColumn<n;iColumn++) {
	if (variableType[iColumn] != Bonmin::TMINLP::CONTINUOUS) {
	  double value=newSolution[iColumn];
	  if (fabs(floor(value+0.5)-value)>integerTolerance) {
	    feasible = false;
	    break;
	  }
	  else {
	    value=floor(newSolution[iColumn]+0.5);
            if(value > 0.5){
              v.insert(iColumn, -1);
              lb -= value;
            }
	    minlp->SetVariableUpperBound(iColumn, value);
	    minlp->SetVariableLowerBound(iColumn, value);
	  }
	}
      }
      OsiRowCut c;
      c.setRow(v);
      c.setLb(lb);
      c.setUb(DBL_MAX);
      noGoods.insert(c);
      if(feasible) {
	nlp->initialSolve();
	if(minlp->optimization_status() != Ipopt::SUCCESS) {
	  feasible = false;
	}
	std::copy(x_sol,x_sol+n, newSolution.begin());
      }
    }
    if(feasible) {
      double newSolutionValue;
      minlp->eval_f(n, newSolution(), true, newSolutionValue); 
      if(newSolutionValue < solutionValue) {
        std::copy(newSolution.begin(), newSolution.end(), betterSolution);
	solutionValue = newSolutionValue;
	returnCode = 1;
      }
    }

    delete nlp;

#ifdef DEBUG_BON_HEURISTIC_DIVE_MIP
    std::cout<<"DiveMIP returnCode = "<<returnCode<<std::endl;
#endif

    return returnCode;
  }