Exemple #1
0
static PetscErrorCode TaoSolve_SSILS(Tao tao)
{
  TAO_SSLS                     *ssls = (TAO_SSLS *)tao->data;
  PetscReal                    psi, ndpsi, normd, innerd, t=0;
  PetscReal                    delta, rho;
  PetscInt                     iter=0,kspits;
  TaoConvergedReason           reason;
  TaoLineSearchConvergedReason ls_reason;
  PetscErrorCode               ierr;

  PetscFunctionBegin;
  /* Assume that Setup has been called!
     Set the structure for the Jacobian and create a linear solver. */
  delta = ssls->delta;
  rho = ssls->rho;

  ierr = TaoComputeVariableBounds(tao);CHKERRQ(ierr);
  ierr = VecMedian(tao->XL,tao->solution,tao->XU,tao->solution);CHKERRQ(ierr);
  ierr = TaoLineSearchSetObjectiveAndGradientRoutine(tao->linesearch,Tao_SSLS_FunctionGradient,tao);CHKERRQ(ierr);
  ierr = TaoLineSearchSetObjectiveRoutine(tao->linesearch,Tao_SSLS_Function,tao);CHKERRQ(ierr);

  /* Calculate the function value and fischer function value at the
     current iterate */
  ierr = TaoLineSearchComputeObjectiveAndGradient(tao->linesearch,tao->solution,&psi,ssls->dpsi);CHKERRQ(ierr);
  ierr = VecNorm(ssls->dpsi,NORM_2,&ndpsi);CHKERRQ(ierr);

  while (1) {
    ierr=PetscInfo3(tao, "iter: %D, merit: %g, ndpsi: %g\n",iter, (double)ssls->merit, (double)ndpsi);CHKERRQ(ierr);
    /* Check the termination criteria */
    ierr = TaoMonitor(tao,iter++,ssls->merit,ndpsi,0.0,t,&reason);CHKERRQ(ierr);
    if (reason!=TAO_CONTINUE_ITERATING) break;

    /* Calculate direction.  (Really negative of newton direction.  Therefore,
       rest of the code uses -d.) */
    ierr = KSPSetOperators(tao->ksp,tao->jacobian,tao->jacobian_pre);CHKERRQ(ierr);
    ierr = KSPSolve(tao->ksp,ssls->ff,tao->stepdirection);CHKERRQ(ierr);
    ierr = KSPGetIterationNumber(tao->ksp,&kspits);CHKERRQ(ierr);
    tao->ksp_its+=kspits;
    ierr = VecNorm(tao->stepdirection,NORM_2,&normd);CHKERRQ(ierr);
    ierr = VecDot(tao->stepdirection,ssls->dpsi,&innerd);CHKERRQ(ierr);

    /* Make sure that we have a descent direction */
    if (innerd <= delta*pow(normd, rho)) {
      ierr = PetscInfo(tao, "newton direction not descent\n");CHKERRQ(ierr);
      ierr = VecCopy(ssls->dpsi,tao->stepdirection);CHKERRQ(ierr);
      ierr = VecDot(tao->stepdirection,ssls->dpsi,&innerd);CHKERRQ(ierr);
    }

    ierr = VecScale(tao->stepdirection, -1.0);CHKERRQ(ierr);
    innerd = -innerd;

    ierr = TaoLineSearchSetInitialStepLength(tao->linesearch,1.0);
    ierr = TaoLineSearchApply(tao->linesearch,tao->solution,&psi,ssls->dpsi,tao->stepdirection,&t,&ls_reason);CHKERRQ(ierr);
    ierr = VecNorm(ssls->dpsi,NORM_2,&ndpsi);CHKERRQ(ierr);
  }
  PetscFunctionReturn(0);
}
int gather(TAO_SOLVER tao, TAO_APPS *appsPtr)
{
  double f;
  int local_nevaluations;
  int nevaluations=0;
  int info;
  int myid;
  double fnorm,cnorm,xdiff;
  TaoTerminateReason reason;


  TaoFunctionBegin;


  // The main process and the cachewkr process do not have anything to contribute
  myid = GCI::mytid();
  if ((myid == 0) || ((myid == GCI::getNumProcs()-1) && (appsPtr->usecache)))
  {
    local_nevaluations = 0;
  }

  else
  {
    info = TaoGetIterationData(tao, &local_nevaluations, &f, &fnorm, &cnorm, &xdiff, &reason); CHKERRQ(info);
  }

  // Combine the local nevaluations into nevaluations
  info = MPI_Reduce(&local_nevaluations, &nevaluations, 1, MPI_INT, MPI_SUM, 0, tao->comm); CHKERRQ(info);
  


  if (myid==0)
  {
    // Now report this information to the monitor
    info = TaoMonitor(tao, nevaluations, appsPtr->fval, 1, 0, 1, &reason); CHKERRQ(info);
  }

  TaoFunctionReturn(0);
  
  
}
Exemple #3
0
static PetscErrorCode TaoSolve_OWLQN(Tao tao)
{
  TAO_OWLQN                    *lmP = (TAO_OWLQN *)tao->data;
  PetscReal                    f, fold, gdx, gnorm;
  PetscReal                    step = 1.0;
  PetscReal                    delta;
  PetscErrorCode               ierr;
  PetscInt                     stepType;
  PetscInt                     iter = 0;
  TaoConvergedReason           reason = TAO_CONTINUE_ITERATING;
  TaoLineSearchConvergedReason ls_status = TAOLINESEARCH_CONTINUE_ITERATING;

  PetscFunctionBegin;
  if (tao->XL || tao->XU || tao->ops->computebounds) {
    ierr = PetscPrintf(((PetscObject)tao)->comm,"WARNING: Variable bounds have been set but will be ignored by owlqn algorithm\n");CHKERRQ(ierr);
  }

  /* Check convergence criteria */
  ierr = TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient);CHKERRQ(ierr);

  ierr = VecCopy(tao->gradient, lmP->GV);CHKERRQ(ierr);

  ierr = ComputePseudoGrad_OWLQN(tao->solution,lmP->GV,lmP->lambda);CHKERRQ(ierr);

  ierr = VecNorm(lmP->GV,NORM_2,&gnorm);CHKERRQ(ierr);

  if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");

  ierr = TaoMonitor(tao, iter, f, gnorm, 0.0, step, &reason);CHKERRQ(ierr);
  if (reason != TAO_CONTINUE_ITERATING) PetscFunctionReturn(0);

  /* Set initial scaling for the function */
  if (f != 0.0) {
    delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
  } else {
    delta = 2.0 / (gnorm*gnorm);
  }
  ierr = MatLMVMSetDelta(lmP->M,delta);CHKERRQ(ierr);

  /* Set counter for gradient/reset steps */
  lmP->bfgs = 0;
  lmP->sgrad = 0;
  lmP->grad = 0;

  /* Have not converged; continue with Newton method */
  while (reason == TAO_CONTINUE_ITERATING) {
    /* Compute direction */
    ierr = MatLMVMUpdate(lmP->M,tao->solution,tao->gradient);CHKERRQ(ierr);
    ierr = MatLMVMSolve(lmP->M, lmP->GV, lmP->D);CHKERRQ(ierr);

    ierr = ProjDirect_OWLQN(lmP->D,lmP->GV);CHKERRQ(ierr);

    ++lmP->bfgs;

    /* Check for success (descent direction) */
    ierr = VecDot(lmP->D, lmP->GV , &gdx);CHKERRQ(ierr);
    if ((gdx <= 0.0) || PetscIsInfOrNanReal(gdx)) {

      /* Step is not descent or direction produced not a number
         We can assert bfgsUpdates > 1 in this case because
         the first solve produces the scaled gradient direction,
         which is guaranteed to be descent

         Use steepest descent direction (scaled) */
      ++lmP->grad;

      if (f != 0.0) {
        delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
      } else {
        delta = 2.0 / (gnorm*gnorm);
      }
      ierr = MatLMVMSetDelta(lmP->M, delta);CHKERRQ(ierr);
      ierr = MatLMVMReset(lmP->M);CHKERRQ(ierr);
      ierr = MatLMVMUpdate(lmP->M, tao->solution, tao->gradient);CHKERRQ(ierr);
      ierr = MatLMVMSolve(lmP->M,lmP->GV, lmP->D);CHKERRQ(ierr);

      ierr = ProjDirect_OWLQN(lmP->D,lmP->GV);CHKERRQ(ierr);

      lmP->bfgs = 1;
      ++lmP->sgrad;
      stepType = OWLQN_SCALED_GRADIENT;
    } else {
      if (1 == lmP->bfgs) {
        /* The first BFGS direction is always the scaled gradient */
        ++lmP->sgrad;
        stepType = OWLQN_SCALED_GRADIENT;
      } else {
        ++lmP->bfgs;
        stepType = OWLQN_BFGS;
      }
    }

    ierr = VecScale(lmP->D, -1.0);CHKERRQ(ierr);

    /* Perform the linesearch */
    fold = f;
    ierr = VecCopy(tao->solution, lmP->Xold);CHKERRQ(ierr);
    ierr = VecCopy(tao->gradient, lmP->Gold);CHKERRQ(ierr);

    ierr = TaoLineSearchApply(tao->linesearch, tao->solution, &f, lmP->GV, lmP->D, &step,&ls_status);CHKERRQ(ierr);
    ierr = TaoAddLineSearchCounts(tao);CHKERRQ(ierr);

    while (((int)ls_status < 0) && (stepType != OWLQN_GRADIENT)) {

      /* Reset factors and use scaled gradient step */
      f = fold;
      ierr = VecCopy(lmP->Xold, tao->solution);CHKERRQ(ierr);
      ierr = VecCopy(lmP->Gold, tao->gradient);CHKERRQ(ierr);
      ierr = VecCopy(tao->gradient, lmP->GV);CHKERRQ(ierr);

      ierr = ComputePseudoGrad_OWLQN(tao->solution,lmP->GV,lmP->lambda);CHKERRQ(ierr);

      switch(stepType) {
      case OWLQN_BFGS:
        /* Failed to obtain acceptable iterate with BFGS step
           Attempt to use the scaled gradient direction */

        if (f != 0.0) {
          delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
        } else {
          delta = 2.0 / (gnorm*gnorm);
        }
        ierr = MatLMVMSetDelta(lmP->M, delta);CHKERRQ(ierr);
        ierr = MatLMVMReset(lmP->M);CHKERRQ(ierr);
        ierr = MatLMVMUpdate(lmP->M, tao->solution, tao->gradient);CHKERRQ(ierr);
        ierr = MatLMVMSolve(lmP->M, lmP->GV, lmP->D);CHKERRQ(ierr);

        ierr = ProjDirect_OWLQN(lmP->D,lmP->GV);CHKERRQ(ierr);

        lmP->bfgs = 1;
        ++lmP->sgrad;
        stepType = OWLQN_SCALED_GRADIENT;
        break;

      case OWLQN_SCALED_GRADIENT:
        /* The scaled gradient step did not produce a new iterate;
           attempt to use the gradient direction.
           Need to make sure we are not using a different diagonal scaling */
        ierr = MatLMVMSetDelta(lmP->M, 1.0);CHKERRQ(ierr);
        ierr = MatLMVMReset(lmP->M);CHKERRQ(ierr);
        ierr = MatLMVMUpdate(lmP->M, tao->solution, tao->gradient);CHKERRQ(ierr);
        ierr = MatLMVMSolve(lmP->M, lmP->GV, lmP->D);CHKERRQ(ierr);

        ierr = ProjDirect_OWLQN(lmP->D,lmP->GV);CHKERRQ(ierr);

        lmP->bfgs = 1;
        ++lmP->grad;
        stepType = OWLQN_GRADIENT;
        break;
      }
      ierr = VecScale(lmP->D, -1.0);CHKERRQ(ierr);


      /* Perform the linesearch */
      ierr = TaoLineSearchApply(tao->linesearch, tao->solution, &f, lmP->GV, lmP->D, &step, &ls_status);CHKERRQ(ierr);
      ierr = TaoAddLineSearchCounts(tao);CHKERRQ(ierr);
    }

    if ((int)ls_status < 0) {
      /* Failed to find an improving point*/
      f = fold;
      ierr = VecCopy(lmP->Xold, tao->solution);CHKERRQ(ierr);
      ierr = VecCopy(lmP->Gold, tao->gradient);CHKERRQ(ierr);
      ierr = VecCopy(tao->gradient, lmP->GV);CHKERRQ(ierr);
      step = 0.0;
    } else {
      /* a little hack here, because that gv is used to store g */
      ierr = VecCopy(lmP->GV, tao->gradient);CHKERRQ(ierr);
    }

    ierr = ComputePseudoGrad_OWLQN(tao->solution,lmP->GV,lmP->lambda);CHKERRQ(ierr);

    /* Check for termination */

    ierr = VecNorm(lmP->GV,NORM_2,&gnorm);CHKERRQ(ierr);

    iter++;
    ierr = TaoMonitor(tao,iter,f,gnorm,0.0,step,&reason);CHKERRQ(ierr);

    if ((int)ls_status < 0) break;
  }
  PetscFunctionReturn(0);
}
Exemple #4
0
static int TaoSolve_GPCG(TAO_SOLVER tao, void *solver)
{
  TAO_GPCG *gpcg = (TAO_GPCG *)solver ;
  int info;
  TaoInt lsflag,iter=0;
  TaoTruth optimal_face=TAO_FALSE,success;
  double actred,f,f_new,f_full,gnorm,gdx,stepsize;
  double c;
  TaoVec *XU, *XL;
  TaoVec *X,  *G=gpcg->G , *B=gpcg->B, *PG=gpcg->PG;
  TaoVec *R=gpcg->R, *DXFree=gpcg->DXFree;
  TaoVec *G_New=gpcg->G_New;
  TaoVec *DX=gpcg->DX, *Work=gpcg->Work;
  TaoMat *H, *Hsub=gpcg->Hsub;
  TaoIndexSet *Free_Local = gpcg->Free_Local, *TIS=gpcg->TT;
  TaoTerminateReason reason;

  TaoFunctionBegin;

  /* Check if upper bound greater than lower bound. */
  info = TaoGetSolution(tao,&X);CHKERRQ(info);
  info = TaoGetHessian(tao,&H);CHKERRQ(info);

  info = TaoGetVariableBounds(tao,&XL,&XU);CHKERRQ(info);
  info = TaoEvaluateVariableBounds(tao,XL,XU); CHKERRQ(info);
  info = X->Median(XL,X,XU); CHKERRQ(info);

  info = TaoComputeHessian(tao,X,H); CHKERRQ(info);
  info = TaoComputeFunctionGradient(tao,X,&f,B);
  CHKERRQ(info);

  /* Compute quadratic representation */
  info = H->Multiply(X,Work); CHKERRQ(info);
  info = X->Dot(Work,&c); CHKERRQ(info);
  info = B->Axpy(-1.0,Work); CHKERRQ(info);
  info = X->Dot(B,&stepsize); CHKERRQ(info);
  gpcg->c=f-c/2.0-stepsize;

  info = Free_Local->WhichBetween(XL,X,XU); CHKERRQ(info);
  
  info = TaoGPCGComputeFunctionGradient(tao, X, &gpcg->f , G);
  
  /* Project the gradient and calculate the norm */
  info = G_New->CopyFrom(G);CHKERRQ(info);
  info = PG->BoundGradientProjection(G,XL,X,XU);CHKERRQ(info);
  info = PG->Norm2(&gpcg->gnorm); CHKERRQ(info);
  gpcg->step=1.0;

    /* Check Stopping Condition      */
  info=TaoMonitor(tao,iter++,gpcg->f,gpcg->gnorm,0,gpcg->step,&reason); CHKERRQ(info);

  while (reason == TAO_CONTINUE_ITERATING){

    info = TaoGradProjections(tao, gpcg); CHKERRQ(info);

    info = Free_Local->WhichBetween(XL,X,XU); CHKERRQ(info);
    info = Free_Local->GetSize(&gpcg->n_free); CHKERRQ(info);
    f=gpcg->f; gnorm=gpcg->gnorm; 

    if (gpcg->n_free > 0){
      
      /* Create a reduced linear system */
      info = R->SetReducedVec(G,Free_Local);CHKERRQ(info);
      info = R->Negate(); CHKERRQ(info);
      info = DXFree->SetReducedVec(DX,Free_Local);CHKERRQ(info);
      info = DXFree->SetToZero(); CHKERRQ(info);

      info = Hsub->SetReducedMatrix(H,Free_Local,Free_Local);CHKERRQ(info);

      info = TaoPreLinearSolve(tao,Hsub);CHKERRQ(info);

      /* Approximately solve the reduced linear system */
      info = TaoLinearSolve(tao,Hsub,R,DXFree,&success);CHKERRQ(info);
      
      info=DX->SetToZero(); CHKERRQ(info);
      info=DX->ReducedXPY(DXFree,Free_Local);CHKERRQ(info);
      
      info = G->Dot(DX,&gdx); CHKERRQ(info);
      
      stepsize=1.0; f_new=f;
      info = TaoLineSearchApply(tao,X,G,DX,Work,
				&f_new,&f_full,&stepsize,&lsflag);
      CHKERRQ(info);
      
      actred = f_new - f;
      
      /* Evaluate the function and gradient at the new point */      
      info =  PG->BoundGradientProjection(G,XL,X,XU);
      CHKERRQ(info);
      info = PG->Norm2(&gnorm);  CHKERRQ(info);      
      f=f_new;
      
      info = GPCGCheckOptimalFace(X,XL,XU,PG,Work, Free_Local, TIS,
				  &optimal_face); CHKERRQ(info);
      
    } else {
      
      actred = 0; stepsize=1.0;
      /* if there were no free variables, no cg method */

    }

    info = TaoMonitor(tao,iter,f,gnorm,0.0,stepsize,&reason); CHKERRQ(info);
    gpcg->f=f;gpcg->gnorm=gnorm; gpcg->actred=actred;
    if (reason!=TAO_CONTINUE_ITERATING) break;
    iter++;


  }  /* END MAIN LOOP  */

  TaoFunctionReturn(0);
}
Exemple #5
0
static int TaoSolve_SSFLS(TAO_SOLVER tao, void *solver)
{
  TAO_SSLS *ssls = (TAO_SSLS *)solver;
  //  TaoLinearSolver *lsolver;
  TaoVec *x, *l, *u, *ff, *dpsi, *d, *w;
  TaoMat *J;
  double psi, psi_full, ndpsi, normd, innerd, t=0;
  double delta, rho;
  int iter=0, info;
  TaoTerminateReason reason;
  TaoTruth flag;

  TaoFunctionBegin;

  // Assume that Setup has been called!
  // Set the structure for the Jacobian and create a linear solver.
 
  delta = ssls->delta;
  rho = ssls->rho;

  info = TaoGetSolution(tao, &x); CHKERRQ(info);
  l=ssls->xl;
  u=ssls->xu;
  info = TaoEvaluateVariableBounds(tao,l,u); CHKERRQ(info);
  info = x->Median(l,x,u); CHKERRQ(info);
  info = TaoGetJacobian(tao, &J); CHKERRQ(info);

  ff = ssls->ff;
  dpsi = ssls->dpsi;
  d = ssls->d;
  w = ssls->w;

  info = x->PointwiseMaximum(x, l); CHKERRQ(info);
  info = x->PointwiseMinimum(x, u); CHKERRQ(info);
  info = TaoSetMeritFunction(tao, Tao_SSLS_Function, Tao_SSLS_FunctionGradient,
			     TAO_NULL, TAO_NULL, TAO_NULL, ssls); CHKERRQ(info);

  // Calculate the function value and fischer function value at the 
  // current iterate
  info = TaoComputeMeritFunctionGradient(tao, x, &psi, dpsi); CHKERRQ(info);
  info = dpsi->Norm2(&ndpsi);

  while (1) {
    info=PetscInfo3(tao, "TaoSolve_SSFLS: %d: merit: %5.4e, ndpsi: %5.4e\n",
		       iter, ssls->merit, ndpsi);CHKERRQ(info);

    // Check the termination criteria
    info = TaoMonitor(tao,iter++,ssls->merit,ndpsi,0.0,t,&reason); 
           CHKERRQ(info);
    if (reason!=TAO_CONTINUE_ITERATING) break;

    // Calculate direction.  (Really negative of newton direction.  Therefore,
    // rest of the code uses -d.)
    info = TaoPreLinearSolve(tao, J); CHKERRQ(info);
    info = TaoLinearSolve(tao, J, ff, d, &flag); CHKERRQ(info);
    
    info = w->CopyFrom(d); CHKERRQ(info);
    info = w->Negate(); CHKERRQ(info);
    info = w->BoundGradientProjection(w,l, x, u);

    info = w->Norm2(&normd); CHKERRQ(info);
    info = w->Dot(dpsi, &innerd); CHKERRQ(info);

    // Make sure that we have a descent direction
    if (innerd >= -delta*pow(normd, rho)) {
      info = PetscInfo1(tao, "TaoSolve_SSFLS: %d: newton direction not descent\n", iter); CHKERRQ(info);
      info = d->CopyFrom(dpsi); CHKERRQ(info);
      info = w->Dot(dpsi, &innerd); CHKERRQ(info);
    }
    info = d->Negate(); CHKERRQ(info);
    innerd = -innerd;

    t = 1;
    info = TaoLineSearchApply(tao, x, dpsi, d, w, 
                              &psi, &psi_full, &t, &tao->lsflag); CHKERRQ(info);
    info = dpsi->Norm2(&ndpsi);
  }
  TaoFunctionReturn(0);
}
Exemple #6
0
static PetscErrorCode TaoSolve_TRON(Tao tao)
{
  TAO_TRON                     *tron = (TAO_TRON *)tao->data;
  PetscErrorCode               ierr;
  PetscInt                     its;
  TaoConvergedReason           reason = TAO_CONTINUE_ITERATING;
  TaoLineSearchConvergedReason ls_reason = TAOLINESEARCH_CONTINUE_ITERATING;
  PetscReal                    prered,actred,delta,f,f_new,rhok,gdx,xdiff,stepsize;

  PetscFunctionBegin;
  tron->pgstepsize=1.0;
  tao->trust = tao->trust0;
  /*   Project the current point onto the feasible set */
  ierr = TaoComputeVariableBounds(tao);CHKERRQ(ierr);
  ierr = VecMedian(tao->XL,tao->solution,tao->XU,tao->solution);CHKERRQ(ierr);
  ierr = TaoLineSearchSetVariableBounds(tao->linesearch,tao->XL,tao->XU);CHKERRQ(ierr);

  ierr = TaoComputeObjectiveAndGradient(tao,tao->solution,&tron->f,tao->gradient);CHKERRQ(ierr);
  ierr = ISDestroy(&tron->Free_Local);CHKERRQ(ierr);

  ierr = VecWhichBetween(tao->XL,tao->solution,tao->XU,&tron->Free_Local);CHKERRQ(ierr);

  /* Project the gradient and calculate the norm */
  ierr = VecBoundGradientProjection(tao->gradient,tao->solution, tao->XL, tao->XU, tao->gradient);CHKERRQ(ierr);
  ierr = VecNorm(tao->gradient,NORM_2,&tron->gnorm);CHKERRQ(ierr);

  if (PetscIsInfOrNanReal(tron->f) || PetscIsInfOrNanReal(tron->gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf pr NaN");
  if (tao->trust <= 0) {
    tao->trust=PetscMax(tron->gnorm*tron->gnorm,1.0);
  }

  tron->stepsize=tao->trust;
  ierr = TaoMonitor(tao, tao->niter, tron->f, tron->gnorm, 0.0, tron->stepsize, &reason);CHKERRQ(ierr);
  while (reason==TAO_CONTINUE_ITERATING){
    tao->ksp_its=0;
    ierr = TronGradientProjections(tao,tron);CHKERRQ(ierr);
    f=tron->f; delta=tao->trust;
    tron->n_free_last = tron->n_free;
    ierr = TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);CHKERRQ(ierr);

    ierr = ISGetSize(tron->Free_Local, &tron->n_free);CHKERRQ(ierr);

    /* If no free variables */
    if (tron->n_free == 0) {
      actred=0;
      ierr = PetscInfo(tao,"No free variables in tron iteration.\n");CHKERRQ(ierr);
      ierr = VecNorm(tao->gradient,NORM_2,&tron->gnorm);CHKERRQ(ierr);
      ierr = TaoMonitor(tao, tao->niter, tron->f, tron->gnorm, 0.0, delta, &reason);CHKERRQ(ierr);
      if (!reason) {
        reason = TAO_CONVERGED_STEPTOL;
        ierr = TaoSetConvergedReason(tao,reason);CHKERRQ(ierr);
      }

      break;

    }
    /* use free_local to mask/submat gradient, hessian, stepdirection */
    ierr = TaoVecGetSubVec(tao->gradient,tron->Free_Local,tao->subset_type,0.0,&tron->R);CHKERRQ(ierr);
    ierr = TaoVecGetSubVec(tao->gradient,tron->Free_Local,tao->subset_type,0.0,&tron->DXFree);CHKERRQ(ierr);
    ierr = VecSet(tron->DXFree,0.0);CHKERRQ(ierr);
    ierr = VecScale(tron->R, -1.0);CHKERRQ(ierr);
    ierr = TaoMatGetSubMat(tao->hessian, tron->Free_Local, tron->diag, tao->subset_type, &tron->H_sub);CHKERRQ(ierr);
    if (tao->hessian == tao->hessian_pre) {
      ierr = MatDestroy(&tron->Hpre_sub);CHKERRQ(ierr);
      ierr = PetscObjectReference((PetscObject)(tron->H_sub));CHKERRQ(ierr);
      tron->Hpre_sub = tron->H_sub;
    } else {
      ierr = TaoMatGetSubMat(tao->hessian_pre, tron->Free_Local, tron->diag, tao->subset_type,&tron->Hpre_sub);CHKERRQ(ierr);
    }
    ierr = KSPReset(tao->ksp);CHKERRQ(ierr);
    ierr = KSPSetOperators(tao->ksp, tron->H_sub, tron->Hpre_sub);CHKERRQ(ierr);
    while (1) {

      /* Approximately solve the reduced linear system */
      ierr = KSPSTCGSetRadius(tao->ksp,delta);CHKERRQ(ierr);

      ierr = KSPSolve(tao->ksp, tron->R, tron->DXFree);CHKERRQ(ierr);
      ierr = KSPGetIterationNumber(tao->ksp,&its);CHKERRQ(ierr);
      tao->ksp_its+=its;
      tao->ksp_tot_its+=its;
      ierr = VecSet(tao->stepdirection,0.0);CHKERRQ(ierr);

      /* Add dxfree matrix to compute step direction vector */
      ierr = VecISAXPY(tao->stepdirection,tron->Free_Local,1.0,tron->DXFree);CHKERRQ(ierr);
      if (0) {
        PetscReal rhs,stepnorm;
        ierr = VecNorm(tron->R,NORM_2,&rhs);CHKERRQ(ierr);
        ierr = VecNorm(tron->DXFree,NORM_2,&stepnorm);CHKERRQ(ierr);
        ierr = PetscPrintf(PETSC_COMM_WORLD,"|rhs|=%g\t|s|=%g\n",(double)rhs,(double)stepnorm);CHKERRQ(ierr);
      }


      ierr = VecDot(tao->gradient, tao->stepdirection, &gdx);CHKERRQ(ierr);
      ierr = PetscInfo1(tao,"Expected decrease in function value: %14.12e\n",(double)gdx);CHKERRQ(ierr);

      ierr = VecCopy(tao->solution, tron->X_New);CHKERRQ(ierr);
      ierr = VecCopy(tao->gradient, tron->G_New);CHKERRQ(ierr);

      stepsize=1.0;f_new=f;

      ierr = TaoLineSearchSetInitialStepLength(tao->linesearch,1.0);CHKERRQ(ierr);
      ierr = TaoLineSearchApply(tao->linesearch, tron->X_New, &f_new, tron->G_New, tao->stepdirection,&stepsize,&ls_reason);CHKERRQ(ierr);CHKERRQ(ierr);
      ierr = TaoAddLineSearchCounts(tao);CHKERRQ(ierr);

      ierr = MatMult(tao->hessian, tao->stepdirection, tron->Work);CHKERRQ(ierr);
      ierr = VecAYPX(tron->Work, 0.5, tao->gradient);CHKERRQ(ierr);
      ierr = VecDot(tao->stepdirection, tron->Work, &prered);CHKERRQ(ierr);
      actred = f_new - f;
      if (actred<0) {
        rhok=PetscAbs(-actred/prered);
      } else {
        rhok=0.0;
      }

      /* Compare actual improvement to the quadratic model */
      if (rhok > tron->eta1) { /* Accept the point */
        /* d = x_new - x */
        ierr = VecCopy(tron->X_New, tao->stepdirection);CHKERRQ(ierr);
        ierr = VecAXPY(tao->stepdirection, -1.0, tao->solution);CHKERRQ(ierr);

        ierr = VecNorm(tao->stepdirection, NORM_2, &xdiff);CHKERRQ(ierr);
        xdiff *= stepsize;

        /* Adjust trust region size */
        if (rhok < tron->eta2 ){
          delta = PetscMin(xdiff,delta)*tron->sigma1;
        } else if (rhok > tron->eta4 ){
          delta= PetscMin(xdiff,delta)*tron->sigma3;
        } else if (rhok > tron->eta3 ){
          delta=PetscMin(xdiff,delta)*tron->sigma2;
        }
        ierr = VecBoundGradientProjection(tron->G_New,tron->X_New, tao->XL, tao->XU, tao->gradient);CHKERRQ(ierr);
        if (tron->Free_Local) {
          ierr = ISDestroy(&tron->Free_Local);CHKERRQ(ierr);
        }
        ierr = VecWhichBetween(tao->XL, tron->X_New, tao->XU, &tron->Free_Local);CHKERRQ(ierr);
        f=f_new;
        ierr = VecNorm(tao->gradient,NORM_2,&tron->gnorm);CHKERRQ(ierr);
        ierr = VecCopy(tron->X_New, tao->solution);CHKERRQ(ierr);
        ierr = VecCopy(tron->G_New, tao->gradient);CHKERRQ(ierr);
        break;
      }
      else if (delta <= 1e-30) {
        break;
      }
      else {
        delta /= 4.0;
      }
    } /* end linear solve loop */


    tron->f=f; tron->actred=actred; tao->trust=delta;
    tao->niter++;
    ierr = TaoMonitor(tao, tao->niter, tron->f, tron->gnorm, 0.0, delta, &reason);CHKERRQ(ierr);
  }  /* END MAIN LOOP  */

  PetscFunctionReturn(0);
}
Exemple #7
0
static PetscErrorCode TaoSolve_IPM(Tao tao)
{
  PetscErrorCode     ierr;
  TAO_IPM            *ipmP = (TAO_IPM*)tao->data;
  TaoConvergedReason reason = TAO_CONTINUE_ITERATING;
  PetscInt           its,i;
  PetscScalar        stepsize=1.0;
  PetscScalar        step_s,step_l,alpha,tau,sigma,phi_target;

  PetscFunctionBegin;
  /* Push initial point away from bounds */
  ierr = IPMInitializeBounds(tao);CHKERRQ(ierr);
  ierr = IPMPushInitialPoint(tao);CHKERRQ(ierr);
  ierr = VecCopy(tao->solution,ipmP->rhs_x);CHKERRQ(ierr);
  ierr = IPMEvaluate(tao);CHKERRQ(ierr);
  ierr = IPMComputeKKT(tao);CHKERRQ(ierr);
  ierr = TaoMonitor(tao,tao->niter++,ipmP->kkt_f,ipmP->phi,0.0,1.0,&reason);CHKERRQ(ierr);

  while (reason == TAO_CONTINUE_ITERATING) {
    tao->ksp_its=0;
    ierr = IPMUpdateK(tao);CHKERRQ(ierr);
    /*
       rhs.x    = -rd
       rhs.lame = -rpe
       rhs.lami = -rpi
       rhs.com  = -com
    */

    ierr = VecCopy(ipmP->rd,ipmP->rhs_x);CHKERRQ(ierr);
    if (ipmP->me > 0) {
      ierr = VecCopy(ipmP->rpe,ipmP->rhs_lamdae);CHKERRQ(ierr);
    }
    if (ipmP->nb > 0) {
      ierr = VecCopy(ipmP->rpi,ipmP->rhs_lamdai);CHKERRQ(ierr);
      ierr = VecCopy(ipmP->complementarity,ipmP->rhs_s);CHKERRQ(ierr);
    }
    ierr = IPMGatherRHS(tao,ipmP->bigrhs,ipmP->rhs_x,ipmP->rhs_lamdae,ipmP->rhs_lamdai,ipmP->rhs_s);CHKERRQ(ierr);
    ierr = VecScale(ipmP->bigrhs,-1.0);CHKERRQ(ierr);

    /* solve K * step = rhs */
    ierr = KSPSetOperators(tao->ksp,ipmP->K,ipmP->K);CHKERRQ(ierr);
    ierr = KSPSolve(tao->ksp,ipmP->bigrhs,ipmP->bigstep);CHKERRQ(ierr);

    ierr = IPMScatterStep(tao,ipmP->bigstep,tao->stepdirection,ipmP->ds,ipmP->dlamdae,ipmP->dlamdai);CHKERRQ(ierr);
    ierr = KSPGetIterationNumber(tao->ksp,&its);CHKERRQ(ierr);
    tao->ksp_its += its;
    tao->ksp_tot_its+=its;
     /* Find distance along step direction to closest bound */
    if (ipmP->nb > 0) {
      ierr = VecStepBoundInfo(ipmP->s,ipmP->ds,ipmP->Zero_nb,ipmP->Inf_nb,&step_s,NULL,NULL);CHKERRQ(ierr);
      ierr = VecStepBoundInfo(ipmP->lamdai,ipmP->dlamdai,ipmP->Zero_nb,ipmP->Inf_nb,&step_l,NULL,NULL);CHKERRQ(ierr);
      alpha = PetscMin(step_s,step_l);
      alpha = PetscMin(alpha,1.0);
      ipmP->alpha1 = alpha;
    } else {
      ipmP->alpha1 = alpha = 1.0;
    }

    /* x_aff = x + alpha*d */
    ierr = VecCopy(tao->solution,ipmP->save_x);CHKERRQ(ierr);
    if (ipmP->me > 0) {
      ierr = VecCopy(ipmP->lamdae,ipmP->save_lamdae);CHKERRQ(ierr);
    }
    if (ipmP->nb > 0) {
      ierr = VecCopy(ipmP->lamdai,ipmP->save_lamdai);CHKERRQ(ierr);
      ierr = VecCopy(ipmP->s,ipmP->save_s);CHKERRQ(ierr);
    }

    ierr = VecAXPY(tao->solution,alpha,tao->stepdirection);CHKERRQ(ierr);
    if (ipmP->me > 0) {
      ierr = VecAXPY(ipmP->lamdae,alpha,ipmP->dlamdae);CHKERRQ(ierr);
    }
    if (ipmP->nb > 0) {
      ierr = VecAXPY(ipmP->lamdai,alpha,ipmP->dlamdai);CHKERRQ(ierr);
      ierr = VecAXPY(ipmP->s,alpha,ipmP->ds);CHKERRQ(ierr);
    }

    /* Recompute kkt to find centering parameter sigma = (new_mu/old_mu)^3 */
    if (ipmP->mu == 0.0) {
      sigma = 0.0;
    } else {
      sigma = 1.0/ipmP->mu;
    }
    ierr = IPMComputeKKT(tao);CHKERRQ(ierr);
    sigma *= ipmP->mu;
    sigma*=sigma*sigma;

    /* revert kkt info */
    ierr = VecCopy(ipmP->save_x,tao->solution);CHKERRQ(ierr);
    if (ipmP->me > 0) {
      ierr = VecCopy(ipmP->save_lamdae,ipmP->lamdae);CHKERRQ(ierr);
    }
    if (ipmP->nb > 0) {
      ierr = VecCopy(ipmP->save_lamdai,ipmP->lamdai);CHKERRQ(ierr);
      ierr = VecCopy(ipmP->save_s,ipmP->s);CHKERRQ(ierr);
    }
    ierr = IPMComputeKKT(tao);CHKERRQ(ierr);

    /* update rhs with new complementarity vector */
    if (ipmP->nb > 0) {
      ierr = VecCopy(ipmP->complementarity,ipmP->rhs_s);CHKERRQ(ierr);
      ierr = VecScale(ipmP->rhs_s,-1.0);CHKERRQ(ierr);
      ierr = VecShift(ipmP->rhs_s,sigma*ipmP->mu);CHKERRQ(ierr);
    }
    ierr = IPMGatherRHS(tao,ipmP->bigrhs,NULL,NULL,NULL,ipmP->rhs_s);CHKERRQ(ierr);

    /* solve K * step = rhs */
    ierr = KSPSetOperators(tao->ksp,ipmP->K,ipmP->K);CHKERRQ(ierr);
    ierr = KSPSolve(tao->ksp,ipmP->bigrhs,ipmP->bigstep);CHKERRQ(ierr);

    ierr = IPMScatterStep(tao,ipmP->bigstep,tao->stepdirection,ipmP->ds,ipmP->dlamdae,ipmP->dlamdai);CHKERRQ(ierr);
    ierr = KSPGetIterationNumber(tao->ksp,&its);CHKERRQ(ierr);
    tao->ksp_its += its;
    tao->ksp_tot_its+=its;
    if (ipmP->nb > 0) {
      /* Get max step size and apply frac-to-boundary */
      tau = PetscMax(ipmP->taumin,1.0-ipmP->mu);
      tau = PetscMin(tau,1.0);
      if (tau != 1.0) {
        ierr = VecScale(ipmP->s,tau);CHKERRQ(ierr);
        ierr = VecScale(ipmP->lamdai,tau);CHKERRQ(ierr);
      }
      ierr = VecStepBoundInfo(ipmP->s,ipmP->ds,ipmP->Zero_nb,ipmP->Inf_nb,&step_s,NULL,NULL);CHKERRQ(ierr);
      ierr = VecStepBoundInfo(ipmP->lamdai,ipmP->dlamdai,ipmP->Zero_nb,ipmP->Inf_nb,&step_l,NULL,NULL);CHKERRQ(ierr);
      if (tau != 1.0) {
        ierr = VecCopy(ipmP->save_s,ipmP->s);CHKERRQ(ierr);
        ierr = VecCopy(ipmP->save_lamdai,ipmP->lamdai);CHKERRQ(ierr);
      }
      alpha = PetscMin(step_s,step_l);
      alpha = PetscMin(alpha,1.0);
    } else {
      alpha = 1.0;
    }
    ipmP->alpha2 = alpha;
    /* TODO make phi_target meaningful */
    phi_target = ipmP->dec * ipmP->phi;
    for (i=0; i<11;i++) {
      ierr = VecAXPY(tao->solution,alpha,tao->stepdirection);CHKERRQ(ierr);
      if (ipmP->nb > 0) {
        ierr = VecAXPY(ipmP->s,alpha,ipmP->ds);CHKERRQ(ierr);
        ierr = VecAXPY(ipmP->lamdai,alpha,ipmP->dlamdai);CHKERRQ(ierr);
      }
      if (ipmP->me > 0) {
        ierr = VecAXPY(ipmP->lamdae,alpha,ipmP->dlamdae);CHKERRQ(ierr);
      }

      /* update dual variables */
      if (ipmP->me > 0) {
        ierr = VecCopy(ipmP->lamdae,tao->DE);CHKERRQ(ierr);
      }

      ierr = IPMEvaluate(tao);CHKERRQ(ierr);
      ierr = IPMComputeKKT(tao);CHKERRQ(ierr);
      if (ipmP->phi <= phi_target) break;
      alpha /= 2.0;
    }

    ierr = TaoMonitor(tao,tao->niter,ipmP->kkt_f,ipmP->phi,0.0,stepsize,&reason);CHKERRQ(ierr);
    tao->niter++;
  }
  PetscFunctionReturn(0);
}
Exemple #8
0
static PetscErrorCode TaoSolve_NTR(Tao tao)
{
  TAO_NTR            *tr = (TAO_NTR *)tao->data;
  PC                 pc;
  KSPConvergedReason ksp_reason;
  TaoConvergedReason reason;
  PetscReal          fmin, ftrial, prered, actred, kappa, sigma, beta;
  PetscReal          tau, tau_1, tau_2, tau_max, tau_min, max_radius;
  PetscReal          f, gnorm;

  PetscReal          delta;
  PetscReal          norm_d;
  PetscErrorCode     ierr;
  PetscInt           iter = 0;
  PetscInt           bfgsUpdates = 0;
  PetscInt           needH;

  PetscInt           i_max = 5;
  PetscInt           j_max = 1;
  PetscInt           i, j, N, n, its;

  PetscFunctionBegin;
  if (tao->XL || tao->XU || tao->ops->computebounds) {
    ierr = PetscPrintf(((PetscObject)tao)->comm,"WARNING: Variable bounds have been set but will be ignored by ntr algorithm\n");CHKERRQ(ierr);
  }

  tao->trust = tao->trust0;

  /* Modify the radius if it is too large or small */
  tao->trust = PetscMax(tao->trust, tr->min_radius);
  tao->trust = PetscMin(tao->trust, tr->max_radius);


  if (NTR_PC_BFGS == tr->pc_type && !tr->M) {
    ierr = VecGetLocalSize(tao->solution,&n);CHKERRQ(ierr);
    ierr = VecGetSize(tao->solution,&N);CHKERRQ(ierr);
    ierr = MatCreateLMVM(((PetscObject)tao)->comm,n,N,&tr->M);CHKERRQ(ierr);
    ierr = MatLMVMAllocateVectors(tr->M,tao->solution);CHKERRQ(ierr);
  }

  /* Check convergence criteria */
  ierr = TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient);CHKERRQ(ierr);
  ierr = VecNorm(tao->gradient,NORM_2,&gnorm);CHKERRQ(ierr);
  if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
  needH = 1;

  ierr = TaoMonitor(tao, iter, f, gnorm, 0.0, 1.0, &reason);CHKERRQ(ierr);
  if (reason != TAO_CONTINUE_ITERATING) PetscFunctionReturn(0);

  /* Create vectors for the limited memory preconditioner */
  if ((NTR_PC_BFGS == tr->pc_type) &&
      (BFGS_SCALE_BFGS != tr->bfgs_scale_type)) {
    if (!tr->Diag) {
        ierr = VecDuplicate(tao->solution, &tr->Diag);CHKERRQ(ierr);
    }
  }

  switch(tr->ksp_type) {
  case NTR_KSP_NASH:
    ierr = KSPSetType(tao->ksp, KSPNASH);CHKERRQ(ierr);
    if (tao->ksp->ops->setfromoptions) {
      (*tao->ksp->ops->setfromoptions)(tao->ksp);
    }
    break;

  case NTR_KSP_STCG:
    ierr = KSPSetType(tao->ksp, KSPSTCG);CHKERRQ(ierr);
    if (tao->ksp->ops->setfromoptions) {
      (*tao->ksp->ops->setfromoptions)(tao->ksp);
    }
    break;

  default:
    ierr = KSPSetType(tao->ksp, KSPGLTR);CHKERRQ(ierr);
    if (tao->ksp->ops->setfromoptions) {
      (*tao->ksp->ops->setfromoptions)(tao->ksp);
    }
    break;
  }

  /*  Modify the preconditioner to use the bfgs approximation */
  ierr = KSPGetPC(tao->ksp, &pc);CHKERRQ(ierr);
  switch(tr->pc_type) {
  case NTR_PC_NONE:
    ierr = PCSetType(pc, PCNONE);CHKERRQ(ierr);
    if (pc->ops->setfromoptions) {
      (*pc->ops->setfromoptions)(pc);
    }
    break;

  case NTR_PC_AHESS:
    ierr = PCSetType(pc, PCJACOBI);CHKERRQ(ierr);
    if (pc->ops->setfromoptions) {
      (*pc->ops->setfromoptions)(pc);
    }
    ierr = PCJacobiSetUseAbs(pc);CHKERRQ(ierr);
    break;

  case NTR_PC_BFGS:
    ierr = PCSetType(pc, PCSHELL);CHKERRQ(ierr);
    if (pc->ops->setfromoptions) {
      (*pc->ops->setfromoptions)(pc);
    }
    ierr = PCShellSetName(pc, "bfgs");CHKERRQ(ierr);
    ierr = PCShellSetContext(pc, tr->M);CHKERRQ(ierr);
    ierr = PCShellSetApply(pc, MatLMVMSolveShell);CHKERRQ(ierr);
    break;

  default:
    /*  Use the pc method set by pc_type */
    break;
  }

  /*  Initialize trust-region radius */
  switch(tr->init_type) {
  case NTR_INIT_CONSTANT:
    /*  Use the initial radius specified */
    break;

  case NTR_INIT_INTERPOLATION:
    /*  Use the initial radius specified */
    max_radius = 0.0;

    for (j = 0; j < j_max; ++j) {
      fmin = f;
      sigma = 0.0;

      if (needH) {
        ierr = TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);CHKERRQ(ierr);
        needH = 0;
      }

      for (i = 0; i < i_max; ++i) {

        ierr = VecCopy(tao->solution, tr->W);CHKERRQ(ierr);
        ierr = VecAXPY(tr->W, -tao->trust/gnorm, tao->gradient);CHKERRQ(ierr);
        ierr = TaoComputeObjective(tao, tr->W, &ftrial);CHKERRQ(ierr);

        if (PetscIsInfOrNanReal(ftrial)) {
          tau = tr->gamma1_i;
        }
        else {
          if (ftrial < fmin) {
            fmin = ftrial;
            sigma = -tao->trust / gnorm;
          }

          ierr = MatMult(tao->hessian, tao->gradient, tao->stepdirection);CHKERRQ(ierr);
          ierr = VecDot(tao->gradient, tao->stepdirection, &prered);CHKERRQ(ierr);

          prered = tao->trust * (gnorm - 0.5 * tao->trust * prered / (gnorm * gnorm));
          actred = f - ftrial;
          if ((PetscAbsScalar(actred) <= tr->epsilon) &&
              (PetscAbsScalar(prered) <= tr->epsilon)) {
            kappa = 1.0;
          }
          else {
            kappa = actred / prered;
          }

          tau_1 = tr->theta_i * gnorm * tao->trust / (tr->theta_i * gnorm * tao->trust + (1.0 - tr->theta_i) * prered - actred);
          tau_2 = tr->theta_i * gnorm * tao->trust / (tr->theta_i * gnorm * tao->trust - (1.0 + tr->theta_i) * prered + actred);
          tau_min = PetscMin(tau_1, tau_2);
          tau_max = PetscMax(tau_1, tau_2);

          if (PetscAbsScalar(kappa - 1.0) <= tr->mu1_i) {
            /*  Great agreement */
            max_radius = PetscMax(max_radius, tao->trust);

            if (tau_max < 1.0) {
              tau = tr->gamma3_i;
            }
            else if (tau_max > tr->gamma4_i) {
              tau = tr->gamma4_i;
            }
            else {
              tau = tau_max;
            }
          }
          else if (PetscAbsScalar(kappa - 1.0) <= tr->mu2_i) {
            /*  Good agreement */
            max_radius = PetscMax(max_radius, tao->trust);

            if (tau_max < tr->gamma2_i) {
              tau = tr->gamma2_i;
            }
            else if (tau_max > tr->gamma3_i) {
              tau = tr->gamma3_i;
            }
            else {
              tau = tau_max;
            }
          }
          else {
            /*  Not good agreement */
            if (tau_min > 1.0) {
              tau = tr->gamma2_i;
            }
            else if (tau_max < tr->gamma1_i) {
              tau = tr->gamma1_i;
            }
            else if ((tau_min < tr->gamma1_i) && (tau_max >= 1.0)) {
              tau = tr->gamma1_i;
            }
            else if ((tau_1 >= tr->gamma1_i) && (tau_1 < 1.0) &&
                     ((tau_2 < tr->gamma1_i) || (tau_2 >= 1.0))) {
              tau = tau_1;
            }
            else if ((tau_2 >= tr->gamma1_i) && (tau_2 < 1.0) &&
                     ((tau_1 < tr->gamma1_i) || (tau_2 >= 1.0))) {
              tau = tau_2;
            }
            else {
              tau = tau_max;
            }
          }
        }
        tao->trust = tau * tao->trust;
      }

      if (fmin < f) {
        f = fmin;
        ierr = VecAXPY(tao->solution, sigma, tao->gradient);CHKERRQ(ierr);
        ierr = TaoComputeGradient(tao,tao->solution, tao->gradient);CHKERRQ(ierr);

        ierr = VecNorm(tao->gradient, NORM_2, &gnorm);CHKERRQ(ierr);

        if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
        needH = 1;

        ierr = TaoMonitor(tao, iter, f, gnorm, 0.0, 1.0, &reason);CHKERRQ(ierr);
        if (reason != TAO_CONTINUE_ITERATING) {
          PetscFunctionReturn(0);
        }
      }
    }
    tao->trust = PetscMax(tao->trust, max_radius);

    /*  Modify the radius if it is too large or small */
    tao->trust = PetscMax(tao->trust, tr->min_radius);
    tao->trust = PetscMin(tao->trust, tr->max_radius);
    break;

  default:
    /*  Norm of the first direction will initialize radius */
    tao->trust = 0.0;
    break;
  }

  /* Set initial scaling for the BFGS preconditioner
     This step is done after computing the initial trust-region radius
     since the function value may have decreased */
  if (NTR_PC_BFGS == tr->pc_type) {
    if (f != 0.0) {
      delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
    }
    else {
      delta = 2.0 / (gnorm*gnorm);
    }
    ierr = MatLMVMSetDelta(tr->M,delta);CHKERRQ(ierr);
  }

  /* Have not converged; continue with Newton method */
  while (reason == TAO_CONTINUE_ITERATING) {
    ++iter;
    tao->ksp_its=0;
    /* Compute the Hessian */
    if (needH) {
      ierr = TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);CHKERRQ(ierr);
      needH = 0;
    }

    if (NTR_PC_BFGS == tr->pc_type) {
      if (BFGS_SCALE_AHESS == tr->bfgs_scale_type) {
        /* Obtain diagonal for the bfgs preconditioner */
        ierr = MatGetDiagonal(tao->hessian, tr->Diag);CHKERRQ(ierr);
        ierr = VecAbs(tr->Diag);CHKERRQ(ierr);
        ierr = VecReciprocal(tr->Diag);CHKERRQ(ierr);
        ierr = MatLMVMSetScale(tr->M,tr->Diag);CHKERRQ(ierr);
      }

      /* Update the limited memory preconditioner */
      ierr = MatLMVMUpdate(tr->M, tao->solution, tao->gradient);CHKERRQ(ierr);
      ++bfgsUpdates;
    }

    while (reason == TAO_CONTINUE_ITERATING) {
      ierr = KSPSetOperators(tao->ksp, tao->hessian, tao->hessian_pre);CHKERRQ(ierr);

      /* Solve the trust region subproblem */
      if (NTR_KSP_NASH == tr->ksp_type) {
        ierr = KSPNASHSetRadius(tao->ksp,tao->trust);CHKERRQ(ierr);
        ierr = KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);CHKERRQ(ierr);
        ierr = KSPGetIterationNumber(tao->ksp,&its);CHKERRQ(ierr);
        tao->ksp_its+=its;
        tao->ksp_tot_its+=its;
        ierr = KSPNASHGetNormD(tao->ksp, &norm_d);CHKERRQ(ierr);
      } else if (NTR_KSP_STCG == tr->ksp_type) {
        ierr = KSPSTCGSetRadius(tao->ksp,tao->trust);CHKERRQ(ierr);
        ierr = KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);CHKERRQ(ierr);
        ierr = KSPGetIterationNumber(tao->ksp,&its);CHKERRQ(ierr);
        tao->ksp_its+=its;
        tao->ksp_tot_its+=its;
        ierr = KSPSTCGGetNormD(tao->ksp, &norm_d);CHKERRQ(ierr);
      } else { /* NTR_KSP_GLTR */
        ierr = KSPGLTRSetRadius(tao->ksp,tao->trust);CHKERRQ(ierr);
        ierr = KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);CHKERRQ(ierr);
        ierr = KSPGetIterationNumber(tao->ksp,&its);CHKERRQ(ierr);
        tao->ksp_its+=its;
        tao->ksp_tot_its+=its;
        ierr = KSPGLTRGetNormD(tao->ksp, &norm_d);CHKERRQ(ierr);
      }

      if (0.0 == tao->trust) {
        /* Radius was uninitialized; use the norm of the direction */
        if (norm_d > 0.0) {
          tao->trust = norm_d;

          /* Modify the radius if it is too large or small */
          tao->trust = PetscMax(tao->trust, tr->min_radius);
          tao->trust = PetscMin(tao->trust, tr->max_radius);
        }
        else {
          /* The direction was bad; set radius to default value and re-solve
             the trust-region subproblem to get a direction */
          tao->trust = tao->trust0;

          /* Modify the radius if it is too large or small */
          tao->trust = PetscMax(tao->trust, tr->min_radius);
          tao->trust = PetscMin(tao->trust, tr->max_radius);

          if (NTR_KSP_NASH == tr->ksp_type) {
            ierr = KSPNASHSetRadius(tao->ksp,tao->trust);CHKERRQ(ierr);
            ierr = KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);CHKERRQ(ierr);
            ierr = KSPGetIterationNumber(tao->ksp,&its);CHKERRQ(ierr);
            tao->ksp_its+=its;
            tao->ksp_tot_its+=its;
            ierr = KSPNASHGetNormD(tao->ksp, &norm_d);CHKERRQ(ierr);
          } else if (NTR_KSP_STCG == tr->ksp_type) {
            ierr = KSPSTCGSetRadius(tao->ksp,tao->trust);CHKERRQ(ierr);
            ierr = KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);CHKERRQ(ierr);
            ierr = KSPGetIterationNumber(tao->ksp,&its);CHKERRQ(ierr);
            tao->ksp_its+=its;
            tao->ksp_tot_its+=its;
            ierr = KSPSTCGGetNormD(tao->ksp, &norm_d);CHKERRQ(ierr);
          } else { /* NTR_KSP_GLTR */
            ierr = KSPGLTRSetRadius(tao->ksp,tao->trust);CHKERRQ(ierr);
            ierr = KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);CHKERRQ(ierr);
            ierr = KSPGetIterationNumber(tao->ksp,&its);CHKERRQ(ierr);
            tao->ksp_its+=its;
            tao->ksp_tot_its+=its;
            ierr = KSPGLTRGetNormD(tao->ksp, &norm_d);CHKERRQ(ierr);
          }

          if (norm_d == 0.0) SETERRQ(PETSC_COMM_SELF,1, "Initial direction zero");
        }
      }
      ierr = VecScale(tao->stepdirection, -1.0);CHKERRQ(ierr);
      ierr = KSPGetConvergedReason(tao->ksp, &ksp_reason);CHKERRQ(ierr);
      if ((KSP_DIVERGED_INDEFINITE_PC == ksp_reason) &&
          (NTR_PC_BFGS == tr->pc_type) && (bfgsUpdates > 1)) {
        /* Preconditioner is numerically indefinite; reset the
           approximate if using BFGS preconditioning. */

        if (f != 0.0) {
          delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
        }
        else {
          delta = 2.0 / (gnorm*gnorm);
        }
        ierr = MatLMVMSetDelta(tr->M, delta);CHKERRQ(ierr);
        ierr = MatLMVMReset(tr->M);CHKERRQ(ierr);
        ierr = MatLMVMUpdate(tr->M, tao->solution, tao->gradient);CHKERRQ(ierr);
        bfgsUpdates = 1;
      }

      if (NTR_UPDATE_REDUCTION == tr->update_type) {
        /* Get predicted reduction */
        if (NTR_KSP_NASH == tr->ksp_type) {
          ierr = KSPNASHGetObjFcn(tao->ksp,&prered);CHKERRQ(ierr);
        } else if (NTR_KSP_STCG == tr->ksp_type) {
          ierr = KSPSTCGGetObjFcn(tao->ksp,&prered);CHKERRQ(ierr);
        } else { /* gltr */
          ierr = KSPGLTRGetObjFcn(tao->ksp,&prered);CHKERRQ(ierr);
        }

        if (prered >= 0.0) {
          /* The predicted reduction has the wrong sign.  This cannot
             happen in infinite precision arithmetic.  Step should
             be rejected! */
          tao->trust = tr->alpha1 * PetscMin(tao->trust, norm_d);
        }
        else {
          /* Compute trial step and function value */
          ierr = VecCopy(tao->solution,tr->W);CHKERRQ(ierr);
          ierr = VecAXPY(tr->W, 1.0, tao->stepdirection);CHKERRQ(ierr);
          ierr = TaoComputeObjective(tao, tr->W, &ftrial);CHKERRQ(ierr);

          if (PetscIsInfOrNanReal(ftrial)) {
            tao->trust = tr->alpha1 * PetscMin(tao->trust, norm_d);
          } else {
            /* Compute and actual reduction */
            actred = f - ftrial;
            prered = -prered;
            if ((PetscAbsScalar(actred) <= tr->epsilon) &&
                (PetscAbsScalar(prered) <= tr->epsilon)) {
              kappa = 1.0;
            }
            else {
              kappa = actred / prered;
            }

            /* Accept or reject the step and update radius */
            if (kappa < tr->eta1) {
              /* Reject the step */
              tao->trust = tr->alpha1 * PetscMin(tao->trust, norm_d);
            }
            else {
              /* Accept the step */
              if (kappa < tr->eta2) {
                /* Marginal bad step */
                tao->trust = tr->alpha2 * PetscMin(tao->trust, norm_d);
              }
              else if (kappa < tr->eta3) {
                /* Reasonable step */
                tao->trust = tr->alpha3 * tao->trust;
              }
              else if (kappa < tr->eta4) {
                /* Good step */
                tao->trust = PetscMax(tr->alpha4 * norm_d, tao->trust);
              }
              else {
                /* Very good step */
                tao->trust = PetscMax(tr->alpha5 * norm_d, tao->trust);
              }
              break;
            }
          }
        }
      }
      else {
        /* Get predicted reduction */
        if (NTR_KSP_NASH == tr->ksp_type) {
          ierr = KSPNASHGetObjFcn(tao->ksp,&prered);CHKERRQ(ierr);
        } else if (NTR_KSP_STCG == tr->ksp_type) {
          ierr = KSPSTCGGetObjFcn(tao->ksp,&prered);CHKERRQ(ierr);
        } else { /* gltr */
          ierr = KSPGLTRGetObjFcn(tao->ksp,&prered);CHKERRQ(ierr);
        }

        if (prered >= 0.0) {
          /* The predicted reduction has the wrong sign.  This cannot
             happen in infinite precision arithmetic.  Step should
             be rejected! */
          tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
        }
        else {
          ierr = VecCopy(tao->solution, tr->W);CHKERRQ(ierr);
          ierr = VecAXPY(tr->W, 1.0, tao->stepdirection);CHKERRQ(ierr);
          ierr = TaoComputeObjective(tao, tr->W, &ftrial);CHKERRQ(ierr);
          if (PetscIsInfOrNanReal(ftrial)) {
            tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
          }
          else {
            ierr = VecDot(tao->gradient, tao->stepdirection, &beta);CHKERRQ(ierr);
            actred = f - ftrial;
            prered = -prered;
            if ((PetscAbsScalar(actred) <= tr->epsilon) &&
                (PetscAbsScalar(prered) <= tr->epsilon)) {
              kappa = 1.0;
            }
            else {
              kappa = actred / prered;
            }

            tau_1 = tr->theta * beta / (tr->theta * beta - (1.0 - tr->theta) * prered + actred);
            tau_2 = tr->theta * beta / (tr->theta * beta + (1.0 + tr->theta) * prered - actred);
            tau_min = PetscMin(tau_1, tau_2);
            tau_max = PetscMax(tau_1, tau_2);

            if (kappa >= 1.0 - tr->mu1) {
              /* Great agreement; accept step and update radius */
              if (tau_max < 1.0) {
                tao->trust = PetscMax(tao->trust, tr->gamma3 * norm_d);
              }
              else if (tau_max > tr->gamma4) {
                tao->trust = PetscMax(tao->trust, tr->gamma4 * norm_d);
              }
              else {
                tao->trust = PetscMax(tao->trust, tau_max * norm_d);
              }
              break;
            }
            else if (kappa >= 1.0 - tr->mu2) {
              /* Good agreement */

              if (tau_max < tr->gamma2) {
                tao->trust = tr->gamma2 * PetscMin(tao->trust, norm_d);
              }
              else if (tau_max > tr->gamma3) {
                tao->trust = PetscMax(tao->trust, tr->gamma3 * norm_d);
              }
              else if (tau_max < 1.0) {
                tao->trust = tau_max * PetscMin(tao->trust, norm_d);
              }
              else {
                tao->trust = PetscMax(tao->trust, tau_max * norm_d);
              }
              break;
            }
            else {
              /* Not good agreement */
              if (tau_min > 1.0) {
                tao->trust = tr->gamma2 * PetscMin(tao->trust, norm_d);
              }
              else if (tau_max < tr->gamma1) {
                tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
              }
              else if ((tau_min < tr->gamma1) && (tau_max >= 1.0)) {
                tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
              }
              else if ((tau_1 >= tr->gamma1) && (tau_1 < 1.0) &&
                       ((tau_2 < tr->gamma1) || (tau_2 >= 1.0))) {
                tao->trust = tau_1 * PetscMin(tao->trust, norm_d);
              }
              else if ((tau_2 >= tr->gamma1) && (tau_2 < 1.0) &&
                       ((tau_1 < tr->gamma1) || (tau_2 >= 1.0))) {
                tao->trust = tau_2 * PetscMin(tao->trust, norm_d);
              }
              else {
                tao->trust = tau_max * PetscMin(tao->trust, norm_d);
              }
            }
          }
        }
      }

      /* The step computed was not good and the radius was decreased.
         Monitor the radius to terminate. */
      ierr = TaoMonitor(tao, iter, f, gnorm, 0.0, tao->trust, &reason);CHKERRQ(ierr);
    }

    /* The radius may have been increased; modify if it is too large */
    tao->trust = PetscMin(tao->trust, tr->max_radius);

    if (reason == TAO_CONTINUE_ITERATING) {
      ierr = VecCopy(tr->W, tao->solution);CHKERRQ(ierr);
      f = ftrial;
      ierr = TaoComputeGradient(tao, tao->solution, tao->gradient);
      ierr = VecNorm(tao->gradient, NORM_2, &gnorm);CHKERRQ(ierr);
      if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
      needH = 1;
      ierr = TaoMonitor(tao, iter, f, gnorm, 0.0, tao->trust, &reason);CHKERRQ(ierr);
    }
  }
  PetscFunctionReturn(0);
}
Exemple #9
0
static PetscErrorCode TaoSolve_ASILS(Tao tao)
{
  TAO_SSLS                     *asls = (TAO_SSLS *)tao->data;
  PetscReal                    psi,ndpsi, normd, innerd, t=0;
  PetscInt                     iter=0, nf;
  PetscErrorCode               ierr;
  TaoConvergedReason           reason;
  TaoLineSearchConvergedReason ls_reason;

  PetscFunctionBegin;
  /* Assume that Setup has been called!
     Set the structure for the Jacobian and create a linear solver. */

  ierr = TaoComputeVariableBounds(tao);CHKERRQ(ierr);
  ierr = TaoLineSearchSetObjectiveAndGradientRoutine(tao->linesearch,Tao_ASLS_FunctionGradient,tao);CHKERRQ(ierr);
  ierr = TaoLineSearchSetObjectiveRoutine(tao->linesearch,Tao_SSLS_Function,tao);CHKERRQ(ierr);

  /* Calculate the function value and fischer function value at the
     current iterate */
  ierr = TaoLineSearchComputeObjectiveAndGradient(tao->linesearch,tao->solution,&psi,asls->dpsi);CHKERRQ(ierr);
  ierr = VecNorm(asls->dpsi,NORM_2,&ndpsi);CHKERRQ(ierr);

  while (1) {
    /* Check the termination criteria */
    ierr = PetscInfo3(tao,"iter %D, merit: %g, ||dpsi||: %g\n",iter, (double)asls->merit,  (double)ndpsi);CHKERRQ(ierr);
    ierr = TaoMonitor(tao, iter++, asls->merit, ndpsi, 0.0, t, &reason);CHKERRQ(ierr);
    if (TAO_CONTINUE_ITERATING != reason) break;

    /* We are going to solve a linear system of equations.  We need to
       set the tolerances for the solve so that we maintain an asymptotic
       rate of convergence that is superlinear.
       Note: these tolerances are for the reduced system.  We really need
       to make sure that the full system satisfies the full-space conditions.

       This rule gives superlinear asymptotic convergence
       asls->atol = min(0.5, asls->merit*sqrt(asls->merit));
       asls->rtol = 0.0;

       This rule gives quadratic asymptotic convergence
       asls->atol = min(0.5, asls->merit*asls->merit);
       asls->rtol = 0.0;

       Calculate a free and fixed set of variables.  The fixed set of
       variables are those for the d_b is approximately equal to zero.
       The definition of approximately changes as we approach the solution
       to the problem.

       No one rule is guaranteed to work in all cases.  The following
       definition is based on the norm of the Jacobian matrix.  If the
       norm is large, the tolerance becomes smaller. */
    ierr = MatNorm(tao->jacobian,NORM_1,&asls->identifier);CHKERRQ(ierr);
    asls->identifier = PetscMin(asls->merit, 1e-2) / (1 + asls->identifier);

    ierr = VecSet(asls->t1,-asls->identifier);CHKERRQ(ierr);
    ierr = VecSet(asls->t2, asls->identifier);CHKERRQ(ierr);

    ierr = ISDestroy(&asls->fixed);CHKERRQ(ierr);
    ierr = ISDestroy(&asls->free);CHKERRQ(ierr);
    ierr = VecWhichBetweenOrEqual(asls->t1, asls->db, asls->t2, &asls->fixed);CHKERRQ(ierr);
    ierr = ISComplementVec(asls->fixed,asls->t1, &asls->free);CHKERRQ(ierr);

    ierr = ISGetSize(asls->fixed,&nf);CHKERRQ(ierr);
    ierr = PetscInfo1(tao,"Number of fixed variables: %D\n", nf);CHKERRQ(ierr);

    /* We now have our partition.  Now calculate the direction in the
       fixed variable space. */
    ierr = TaoVecGetSubVec(asls->ff, asls->fixed, tao->subset_type, 0.0, &asls->r1);
    ierr = TaoVecGetSubVec(asls->da, asls->fixed, tao->subset_type, 1.0, &asls->r2);
    ierr = VecPointwiseDivide(asls->r1,asls->r1,asls->r2);CHKERRQ(ierr);
    ierr = VecSet(tao->stepdirection,0.0);CHKERRQ(ierr);
    ierr = VecISAXPY(tao->stepdirection, asls->fixed,1.0,asls->r1);CHKERRQ(ierr);

    /* Our direction in the Fixed Variable Set is fixed.  Calculate the
       information needed for the step in the Free Variable Set.  To
       do this, we need to know the diagonal perturbation and the
       right hand side. */

    ierr = TaoVecGetSubVec(asls->da, asls->free, tao->subset_type, 0.0, &asls->r1);CHKERRQ(ierr);
    ierr = TaoVecGetSubVec(asls->ff, asls->free, tao->subset_type, 0.0, &asls->r2);CHKERRQ(ierr);
    ierr = TaoVecGetSubVec(asls->db, asls->free, tao->subset_type, 1.0, &asls->r3);CHKERRQ(ierr);
    ierr = VecPointwiseDivide(asls->r1,asls->r1, asls->r3);CHKERRQ(ierr);
    ierr = VecPointwiseDivide(asls->r2,asls->r2, asls->r3);CHKERRQ(ierr);

    /* r1 is the diagonal perturbation
       r2 is the right hand side
       r3 is no longer needed

       Now need to modify r2 for our direction choice in the fixed
       variable set:  calculate t1 = J*d, take the reduced vector
       of t1 and modify r2. */

    ierr = MatMult(tao->jacobian, tao->stepdirection, asls->t1);CHKERRQ(ierr);
    ierr = TaoVecGetSubVec(asls->t1,asls->free,tao->subset_type,0.0,&asls->r3);CHKERRQ(ierr);
    ierr = VecAXPY(asls->r2, -1.0, asls->r3);CHKERRQ(ierr);

    /* Calculate the reduced problem matrix and the direction */
    if (!asls->w && (tao->subset_type == TAO_SUBSET_MASK || tao->subset_type == TAO_SUBSET_MATRIXFREE)) {
      ierr = VecDuplicate(tao->solution, &asls->w);CHKERRQ(ierr);
    }
    ierr = TaoMatGetSubMat(tao->jacobian, asls->free, asls->w, tao->subset_type,&asls->J_sub);CHKERRQ(ierr);
    if (tao->jacobian != tao->jacobian_pre) {
      ierr = TaoMatGetSubMat(tao->jacobian_pre, asls->free, asls->w, tao->subset_type, &asls->Jpre_sub);CHKERRQ(ierr);
    } else {
      ierr = MatDestroy(&asls->Jpre_sub);CHKERRQ(ierr);
      asls->Jpre_sub = asls->J_sub;
      ierr = PetscObjectReference((PetscObject)(asls->Jpre_sub));CHKERRQ(ierr);
    }
    ierr = MatDiagonalSet(asls->J_sub, asls->r1,ADD_VALUES);CHKERRQ(ierr);
    ierr = TaoVecGetSubVec(tao->stepdirection, asls->free, tao->subset_type, 0.0, &asls->dxfree);CHKERRQ(ierr);
    ierr = VecSet(asls->dxfree, 0.0);CHKERRQ(ierr);

    /* Calculate the reduced direction.  (Really negative of Newton
       direction.  Therefore, rest of the code uses -d.) */
    ierr = KSPReset(tao->ksp);
    ierr = KSPSetOperators(tao->ksp, asls->J_sub, asls->Jpre_sub);CHKERRQ(ierr);
    ierr = KSPSolve(tao->ksp, asls->r2, asls->dxfree);CHKERRQ(ierr);

    /* Add the direction in the free variables back into the real direction. */
    ierr = VecISAXPY(tao->stepdirection, asls->free, 1.0,asls->dxfree);CHKERRQ(ierr);

    /* Check the real direction for descent and if not, use the negative
       gradient direction. */
    ierr = VecNorm(tao->stepdirection, NORM_2, &normd);CHKERRQ(ierr);
    ierr = VecDot(tao->stepdirection, asls->dpsi, &innerd);CHKERRQ(ierr);

    if (innerd <= asls->delta*pow(normd, asls->rho)) {
      ierr = PetscInfo1(tao,"Gradient direction: %5.4e.\n", (double)innerd);CHKERRQ(ierr);
      ierr = PetscInfo1(tao, "Iteration %D: newton direction not descent\n", iter);CHKERRQ(ierr);
      ierr = VecCopy(asls->dpsi, tao->stepdirection);CHKERRQ(ierr);
      ierr = VecDot(asls->dpsi, tao->stepdirection, &innerd);CHKERRQ(ierr);
    }

    ierr = VecScale(tao->stepdirection, -1.0);CHKERRQ(ierr);
    innerd = -innerd;

    /* We now have a correct descent direction.  Apply a linesearch to
       find the new iterate. */
    ierr = TaoLineSearchSetInitialStepLength(tao->linesearch, 1.0);CHKERRQ(ierr);
    ierr = TaoLineSearchApply(tao->linesearch, tao->solution, &psi,asls->dpsi, tao->stepdirection, &t, &ls_reason);CHKERRQ(ierr);
    ierr = VecNorm(asls->dpsi, NORM_2, &ndpsi);CHKERRQ(ierr);
  }
  PetscFunctionReturn(0);
}
Exemple #10
0
PetscErrorCode TaoSolve_BNTR(Tao tao)
{
  PetscErrorCode               ierr;
  TAO_BNK                      *bnk = (TAO_BNK *)tao->data;
  KSPConvergedReason           ksp_reason;

  PetscReal                    oldTrust, prered, actred, steplen, resnorm;
  PetscBool                    cgTerminate, needH = PETSC_TRUE, stepAccepted, shift = PETSC_FALSE;
  PetscInt                     stepType, nDiff;
  
  PetscFunctionBegin;
  /* Initialize the preconditioner, KSP solver and trust radius/line search */
  tao->reason = TAO_CONTINUE_ITERATING;
  ierr = TaoBNKInitialize(tao, bnk->init_type, &needH);CHKERRQ(ierr);
  if (tao->reason != TAO_CONTINUE_ITERATING) PetscFunctionReturn(0);

  /* Have not converged; continue with Newton method */
  while (tao->reason == TAO_CONTINUE_ITERATING) {
    /* Call general purpose update function */
    if (tao->ops->update) {
      ierr = (*tao->ops->update)(tao, tao->niter, tao->user_update);CHKERRQ(ierr);
    }
    ++tao->niter;
    
    if (needH && bnk->inactive_idx) { 
      /* Take BNCG steps (if enabled) to trade-off Hessian evaluations for more gradient evaluations */
      ierr = TaoBNKTakeCGSteps(tao, &cgTerminate);CHKERRQ(ierr);
      if (cgTerminate) {
        tao->reason = bnk->bncg->reason;
        PetscFunctionReturn(0);
      }
      /* Compute the hessian and update the BFGS preconditioner at the new iterate */
      ierr = (*bnk->computehessian)(tao);CHKERRQ(ierr);
      needH = PETSC_FALSE;
    }
    
    /* Store current solution before it changes */
    bnk->fold = bnk->f;
    ierr = VecCopy(tao->solution, bnk->Xold);CHKERRQ(ierr);
    ierr = VecCopy(tao->gradient, bnk->Gold);CHKERRQ(ierr);
    ierr = VecCopy(bnk->unprojected_gradient, bnk->unprojected_gradient_old);CHKERRQ(ierr);
    
    /* Enter into trust region loops */
    stepAccepted = PETSC_FALSE;
    while (!stepAccepted && tao->reason == TAO_CONTINUE_ITERATING) {
      tao->ksp_its=0;
      
      /* Use the common BNK kernel to compute the Newton step (for inactive variables only) */
      ierr = (*bnk->computestep)(tao, shift, &ksp_reason, &stepType);CHKERRQ(ierr);

      /* Temporarily accept the step and project it into the bounds */
      ierr = VecAXPY(tao->solution, 1.0, tao->stepdirection);CHKERRQ(ierr);
      ierr = TaoBoundSolution(tao->solution, tao->XL,tao->XU, 0.0, &nDiff, tao->solution);CHKERRQ(ierr);

      /* Check if the projection changed the step direction */
      if (nDiff > 0) {
        /* Projection changed the step, so we have to recompute the step and 
           the predicted reduction. Leave the trust radius unchanged. */
        ierr = VecCopy(tao->solution, tao->stepdirection);CHKERRQ(ierr);
        ierr = VecAXPY(tao->stepdirection, -1.0, bnk->Xold);CHKERRQ(ierr);
        ierr = TaoBNKRecomputePred(tao, tao->stepdirection, &prered);CHKERRQ(ierr);
      } else {
        /* Step did not change, so we can just recover the pre-computed prediction */
        ierr = KSPCGGetObjFcn(tao->ksp, &prered);CHKERRQ(ierr);
      }
      prered = -prered;

      /* Compute the actual reduction and update the trust radius */
      ierr = TaoComputeObjective(tao, tao->solution, &bnk->f);CHKERRQ(ierr);
      if (PetscIsInfOrNanReal(bnk->f)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
      actred = bnk->fold - bnk->f;
      oldTrust = tao->trust;
      ierr = TaoBNKUpdateTrustRadius(tao, prered, actred, bnk->update_type, stepType, &stepAccepted);CHKERRQ(ierr);

      if (stepAccepted) {
        /* Step is good, evaluate the gradient and flip the need-Hessian switch */
        steplen = 1.0;
        needH = PETSC_TRUE;
        ++bnk->newt;
        ierr = TaoComputeGradient(tao, tao->solution, bnk->unprojected_gradient);CHKERRQ(ierr);
        ierr = TaoBNKEstimateActiveSet(tao, bnk->as_type);CHKERRQ(ierr);
        ierr = VecCopy(bnk->unprojected_gradient, tao->gradient);CHKERRQ(ierr);
        ierr = VecISSet(tao->gradient, bnk->active_idx, 0.0);CHKERRQ(ierr);
        ierr = TaoGradientNorm(tao, tao->gradient, NORM_2, &bnk->gnorm);CHKERRQ(ierr);
      } else {
        /* Step is bad, revert old solution and re-solve with new radius*/
        steplen = 0.0;
        needH = PETSC_FALSE;
        bnk->f = bnk->fold;
        ierr = VecCopy(bnk->Xold, tao->solution);CHKERRQ(ierr);
        ierr = VecCopy(bnk->Gold, tao->gradient);CHKERRQ(ierr);
        ierr = VecCopy(bnk->unprojected_gradient_old, bnk->unprojected_gradient);CHKERRQ(ierr);
        if (oldTrust == tao->trust) {
          /* Can't change the radius anymore so just terminate */
          tao->reason = TAO_DIVERGED_TR_REDUCTION;
        }
      }

      /*  Check for termination */
      ierr = VecFischer(tao->solution, bnk->unprojected_gradient, tao->XL, tao->XU, bnk->W);CHKERRQ(ierr);
      ierr = VecNorm(bnk->W, NORM_2, &resnorm);CHKERRQ(ierr);
      if (PetscIsInfOrNanReal(resnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
      ierr = TaoLogConvergenceHistory(tao, bnk->f, resnorm, 0.0, tao->ksp_its);CHKERRQ(ierr);
      ierr = TaoMonitor(tao, tao->niter, bnk->f, resnorm, 0.0, steplen);CHKERRQ(ierr);
      ierr = (*tao->ops->convergencetest)(tao, tao->cnvP);CHKERRQ(ierr);
    }
  }
  PetscFunctionReturn(0);
}
Exemple #11
0
static PetscErrorCode TaoSolve_GPCG(Tao tao)
{
  TAO_GPCG                     *gpcg = (TAO_GPCG *)tao->data;
  PetscErrorCode               ierr;
  PetscInt                     its;
  PetscReal                    actred,f,f_new,gnorm,gdx,stepsize,xtb;
  PetscReal                    xtHx;
  TaoConvergedReason           reason = TAO_CONTINUE_ITERATING;
  TaoLineSearchConvergedReason ls_status = TAOLINESEARCH_CONTINUE_ITERATING;

  PetscFunctionBegin;

  ierr = TaoComputeVariableBounds(tao);CHKERRQ(ierr);
  ierr = VecMedian(tao->XL,tao->solution,tao->XU,tao->solution);CHKERRQ(ierr);
  ierr = TaoLineSearchSetVariableBounds(tao->linesearch,tao->XL,tao->XU);CHKERRQ(ierr);

  /* Using f = .5*x'Hx + x'b + c and g=Hx + b,  compute b,c */
  ierr = TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);CHKERRQ(ierr);
  ierr = TaoComputeObjectiveAndGradient(tao,tao->solution,&f,tao->gradient);CHKERRQ(ierr);
  ierr = VecCopy(tao->gradient, gpcg->B);CHKERRQ(ierr);
  ierr = MatMult(tao->hessian,tao->solution,gpcg->Work);CHKERRQ(ierr);
  ierr = VecDot(gpcg->Work, tao->solution, &xtHx);CHKERRQ(ierr);
  ierr = VecAXPY(gpcg->B,-1.0,gpcg->Work);CHKERRQ(ierr);
  ierr = VecDot(gpcg->B,tao->solution,&xtb);CHKERRQ(ierr);
  gpcg->c=f-xtHx/2.0-xtb;
  if (gpcg->Free_Local) {
      ierr = ISDestroy(&gpcg->Free_Local);CHKERRQ(ierr);
  }
  ierr = VecWhichBetween(tao->XL,tao->solution,tao->XU,&gpcg->Free_Local);CHKERRQ(ierr);

  /* Project the gradient and calculate the norm */
  ierr = VecCopy(tao->gradient,gpcg->G_New);CHKERRQ(ierr);
  ierr = VecBoundGradientProjection(tao->gradient,tao->solution,tao->XL,tao->XU,gpcg->PG);CHKERRQ(ierr);
  ierr = VecNorm(gpcg->PG,NORM_2,&gpcg->gnorm);CHKERRQ(ierr);
  tao->step=1.0;
  gpcg->f = f;

    /* Check Stopping Condition      */
  ierr=TaoMonitor(tao,tao->niter,f,gpcg->gnorm,0.0,tao->step,&reason);CHKERRQ(ierr);

  while (reason == TAO_CONTINUE_ITERATING){
    tao->ksp_its=0;

    ierr = GPCGGradProjections(tao);CHKERRQ(ierr);
    ierr = ISGetSize(gpcg->Free_Local,&gpcg->n_free);CHKERRQ(ierr);

    f=gpcg->f; gnorm=gpcg->gnorm;

    ierr = KSPReset(tao->ksp);CHKERRQ(ierr);

    if (gpcg->n_free > 0){
      /* Create a reduced linear system */
      ierr = VecDestroy(&gpcg->R);CHKERRQ(ierr);
      ierr = VecDestroy(&gpcg->DXFree);CHKERRQ(ierr);
      ierr = TaoVecGetSubVec(tao->gradient,gpcg->Free_Local, tao->subset_type, 0.0, &gpcg->R);CHKERRQ(ierr);
      ierr = VecScale(gpcg->R, -1.0);CHKERRQ(ierr);
      ierr = TaoVecGetSubVec(tao->stepdirection,gpcg->Free_Local,tao->subset_type, 0.0, &gpcg->DXFree);CHKERRQ(ierr);
      ierr = VecSet(gpcg->DXFree,0.0);CHKERRQ(ierr);

      ierr = TaoMatGetSubMat(tao->hessian, gpcg->Free_Local, gpcg->Work, tao->subset_type, &gpcg->Hsub);CHKERRQ(ierr);

      if (tao->hessian_pre == tao->hessian) {
        ierr = MatDestroy(&gpcg->Hsub_pre);CHKERRQ(ierr);
        ierr = PetscObjectReference((PetscObject)gpcg->Hsub);CHKERRQ(ierr);
        gpcg->Hsub_pre = gpcg->Hsub;
      }  else {
        ierr = TaoMatGetSubMat(tao->hessian, gpcg->Free_Local, gpcg->Work, tao->subset_type, &gpcg->Hsub_pre);CHKERRQ(ierr);
      }

      ierr = KSPReset(tao->ksp);CHKERRQ(ierr);
      ierr = KSPSetOperators(tao->ksp,gpcg->Hsub,gpcg->Hsub_pre);CHKERRQ(ierr);

      ierr = KSPSolve(tao->ksp,gpcg->R,gpcg->DXFree);CHKERRQ(ierr);
      ierr = KSPGetIterationNumber(tao->ksp,&its);CHKERRQ(ierr);
      tao->ksp_its+=its;
      tao->ksp_tot_its+=its;
      ierr = VecSet(tao->stepdirection,0.0);CHKERRQ(ierr);
      ierr = VecISAXPY(tao->stepdirection,gpcg->Free_Local,1.0,gpcg->DXFree);CHKERRQ(ierr);

      ierr = VecDot(tao->stepdirection,tao->gradient,&gdx);CHKERRQ(ierr);
      ierr = TaoLineSearchSetInitialStepLength(tao->linesearch,1.0);CHKERRQ(ierr);
      f_new=f;
      ierr = TaoLineSearchApply(tao->linesearch,tao->solution,&f_new,tao->gradient,tao->stepdirection,&stepsize,&ls_status);CHKERRQ(ierr);

      actred = f_new - f;

      /* Evaluate the function and gradient at the new point */
      ierr = VecBoundGradientProjection(tao->gradient,tao->solution,tao->XL,tao->XU, gpcg->PG);CHKERRQ(ierr);
      ierr = VecNorm(gpcg->PG, NORM_2, &gnorm);CHKERRQ(ierr);
      f=f_new;
      ierr = ISDestroy(&gpcg->Free_Local);CHKERRQ(ierr);
      ierr = VecWhichBetween(tao->XL,tao->solution,tao->XU,&gpcg->Free_Local);CHKERRQ(ierr);
    } else {
      actred = 0; gpcg->step=1.0;
      /* if there were no free variables, no cg method */
    }

    tao->niter++;
    ierr = TaoMonitor(tao,tao->niter,f,gnorm,0.0,gpcg->step,&reason);CHKERRQ(ierr);
    gpcg->f=f;gpcg->gnorm=gnorm; gpcg->actred=actred;
    if (reason!=TAO_CONTINUE_ITERATING) break;
  }  /* END MAIN LOOP  */

  PetscFunctionReturn(0);
}
Exemple #12
0
static PetscErrorCode TaoSolve_BQPIP(Tao tao)
{
  TAO_BQPIP          *qp = (TAO_BQPIP*)tao->data;
  PetscErrorCode     ierr;
  PetscInt           iter=0,its;
  PetscReal          d1,d2,ksptol,sigma;
  PetscReal          sigmamu;
  PetscReal          dstep,pstep,step=0;
  PetscReal          gap[4];
  TaoConvergedReason reason;

  PetscFunctionBegin;
  qp->dobj           = 0.0;
  qp->pobj           = 1.0;
  qp->gap            = 10.0;
  qp->rgap           = 1.0;
  qp->mu             = 1.0;
  qp->sigma          = 1.0;
  qp->dinfeas        = 1.0;
  qp->psteplength    = 0.0;
  qp->dsteplength    = 0.0;

  /* Tighten infinite bounds, things break when we don't do this
    -- see test_bqpip.c
  */
  ierr = VecSet(qp->XU,1.0e20);CHKERRQ(ierr);
  ierr = VecSet(qp->XL,-1.0e20);CHKERRQ(ierr);
  ierr = VecPointwiseMax(qp->XL,qp->XL,tao->XL);CHKERRQ(ierr);
  ierr = VecPointwiseMin(qp->XU,qp->XU,tao->XU);CHKERRQ(ierr);

  ierr = TaoComputeObjectiveAndGradient(tao,tao->solution,&qp->c,qp->C0);CHKERRQ(ierr);
  ierr = TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);CHKERRQ(ierr);
  ierr = MatMult(tao->hessian, tao->solution, qp->Work);CHKERRQ(ierr);
  ierr = VecDot(tao->solution, qp->Work, &d1);CHKERRQ(ierr);
  ierr = VecAXPY(qp->C0, -1.0, qp->Work);CHKERRQ(ierr);
  ierr = VecDot(qp->C0, tao->solution, &d2);CHKERRQ(ierr);
  qp->c -= (d1/2.0+d2);
  ierr = MatGetDiagonal(tao->hessian, qp->HDiag);CHKERRQ(ierr);

  ierr = QPIPSetInitialPoint(qp,tao);CHKERRQ(ierr);
  ierr = QPIPComputeResidual(qp,tao);CHKERRQ(ierr);

  /* Enter main loop */
  while (1){

    /* Check Stopping Condition      */
    ierr = TaoMonitor(tao,iter++,qp->pobj,PetscSqrtScalar(qp->gap + qp->dinfeas),
                            qp->pinfeas, step, &reason);CHKERRQ(ierr);
    if (reason != TAO_CONTINUE_ITERATING) break;

    /*
       Dual Infeasibility Direction should already be in the right
       hand side from computing the residuals
    */

    ierr = QPIPComputeNormFromCentralPath(qp,&d1);CHKERRQ(ierr);

    if (iter > 0 && (qp->rnorm>5*qp->mu || d1*d1>qp->m*qp->mu*qp->mu) ) {
      sigma=1.0;sigmamu=qp->mu;
      sigma=0.0;sigmamu=0;
    } else {
      sigma=0.0;sigmamu=0;
    }
    ierr = VecSet(qp->DZ, sigmamu);CHKERRQ(ierr);
    ierr = VecSet(qp->DS, sigmamu);CHKERRQ(ierr);

    if (sigmamu !=0){
      ierr = VecPointwiseDivide(qp->DZ, qp->DZ, qp->G);CHKERRQ(ierr);
      ierr = VecPointwiseDivide(qp->DS, qp->DS, qp->T);CHKERRQ(ierr);
      ierr = VecCopy(qp->DZ,qp->RHS2);CHKERRQ(ierr);
      ierr = VecAXPY(qp->RHS2, 1.0, qp->DS);CHKERRQ(ierr);
    } else {
      ierr = VecZeroEntries(qp->RHS2);CHKERRQ(ierr);
    }


    /*
       Compute the Primal Infeasiblitiy RHS and the
       Diagonal Matrix to be added to H and store in Work
    */
    ierr = VecPointwiseDivide(qp->DiagAxpy, qp->Z, qp->G);CHKERRQ(ierr);
    ierr = VecPointwiseMult(qp->GZwork, qp->DiagAxpy, qp->R3);CHKERRQ(ierr);
    ierr = VecAXPY(qp->RHS, -1.0, qp->GZwork);CHKERRQ(ierr);

    ierr = VecPointwiseDivide(qp->TSwork, qp->S, qp->T);CHKERRQ(ierr);
    ierr = VecAXPY(qp->DiagAxpy, 1.0, qp->TSwork);CHKERRQ(ierr);
    ierr = VecPointwiseMult(qp->TSwork, qp->TSwork, qp->R5);CHKERRQ(ierr);
    ierr = VecAXPY(qp->RHS, -1.0, qp->TSwork);CHKERRQ(ierr);
    ierr = VecAXPY(qp->RHS2, 1.0, qp->RHS);CHKERRQ(ierr);

    /*  Determine the solving tolerance */
    ksptol = qp->mu/10.0;
    ksptol = PetscMin(ksptol,0.001);

    ierr = MatDiagonalSet(tao->hessian, qp->DiagAxpy, ADD_VALUES);CHKERRQ(ierr);
    ierr = MatAssemblyBegin(tao->hessian,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
    ierr = MatAssemblyEnd(tao->hessian,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);

    ierr = KSPSetOperators(tao->ksp, tao->hessian, tao->hessian_pre);CHKERRQ(ierr);
    ierr = KSPSolve(tao->ksp, qp->RHS, tao->stepdirection);CHKERRQ(ierr);
    ierr = KSPGetIterationNumber(tao->ksp,&its);CHKERRQ(ierr);
    tao->ksp_its+=its;

    ierr = VecScale(qp->DiagAxpy, -1.0);CHKERRQ(ierr);
    ierr = MatDiagonalSet(tao->hessian, qp->DiagAxpy, ADD_VALUES);CHKERRQ(ierr);
    ierr = MatAssemblyBegin(tao->hessian,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
    ierr = MatAssemblyEnd(tao->hessian,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
    ierr = VecScale(qp->DiagAxpy, -1.0);CHKERRQ(ierr);
    ierr = QPComputeStepDirection(qp,tao);CHKERRQ(ierr);
    ierr = QPStepLength(qp); CHKERRQ(ierr);

    /* Calculate New Residual R1 in Work vector */
    ierr = MatMult(tao->hessian, tao->stepdirection, qp->RHS2);CHKERRQ(ierr);
    ierr = VecAXPY(qp->RHS2, 1.0, qp->DS);CHKERRQ(ierr);
    ierr = VecAXPY(qp->RHS2, -1.0, qp->DZ);CHKERRQ(ierr);
    ierr = VecAYPX(qp->RHS2, qp->dsteplength, tao->gradient);CHKERRQ(ierr);

    ierr = VecNorm(qp->RHS2, NORM_2, &qp->dinfeas);CHKERRQ(ierr);
    ierr = VecDot(qp->DZ, qp->DG, gap);CHKERRQ(ierr);
    ierr = VecDot(qp->DS, qp->DT, gap+1);CHKERRQ(ierr);

    qp->rnorm=(qp->dinfeas+qp->psteplength*qp->pinfeas)/(qp->m+qp->n);
    pstep = qp->psteplength; dstep = qp->dsteplength;
    step = PetscMin(qp->psteplength,qp->dsteplength);
    sigmamu= ( pstep*pstep*(gap[0]+gap[1]) +
               (1 - pstep + pstep*sigma)*qp->gap  )/qp->m;

    if (qp->predcorr && step < 0.9){
      if (sigmamu < qp->mu){
        sigmamu=sigmamu/qp->mu;
        sigmamu=sigmamu*sigmamu*sigmamu;
      } else {sigmamu = 1.0;}
      sigmamu = sigmamu*qp->mu;

      /* Compute Corrector Step */
      ierr = VecPointwiseMult(qp->DZ, qp->DG, qp->DZ);CHKERRQ(ierr);
      ierr = VecScale(qp->DZ, -1.0);CHKERRQ(ierr);
      ierr = VecShift(qp->DZ, sigmamu);CHKERRQ(ierr);
      ierr = VecPointwiseDivide(qp->DZ, qp->DZ, qp->G);CHKERRQ(ierr);

      ierr = VecPointwiseMult(qp->DS, qp->DS, qp->DT);CHKERRQ(ierr);
      ierr = VecScale(qp->DS, -1.0);CHKERRQ(ierr);
      ierr = VecShift(qp->DS, sigmamu);CHKERRQ(ierr);
      ierr = VecPointwiseDivide(qp->DS, qp->DS, qp->T);CHKERRQ(ierr);

      ierr = VecCopy(qp->DZ, qp->RHS2);CHKERRQ(ierr);
      ierr = VecAXPY(qp->RHS2, -1.0, qp->DS);CHKERRQ(ierr);
      ierr = VecAXPY(qp->RHS2, 1.0, qp->RHS);CHKERRQ(ierr);

      /* Approximately solve the linear system */
      ierr = MatDiagonalSet(tao->hessian, qp->DiagAxpy, ADD_VALUES);CHKERRQ(ierr);
      ierr = MatAssemblyBegin(tao->hessian,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
      ierr = MatAssemblyEnd(tao->hessian,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
      ierr = KSPSolve(tao->ksp, qp->RHS2, tao->stepdirection);CHKERRQ(ierr);
      ierr = KSPGetIterationNumber(tao->ksp,&its);CHKERRQ(ierr);
      tao->ksp_its+=its;

      ierr = MatDiagonalSet(tao->hessian, qp->HDiag, INSERT_VALUES);CHKERRQ(ierr);
      ierr = MatAssemblyBegin(tao->hessian,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
      ierr = MatAssemblyEnd(tao->hessian,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
      ierr = QPComputeStepDirection(qp,tao);CHKERRQ(ierr);
      ierr = QPStepLength(qp);CHKERRQ(ierr);

    }  /* End Corrector step */


    /* Take the step */
    pstep = qp->psteplength; dstep = qp->dsteplength;

    ierr = VecAXPY(qp->Z, dstep, qp->DZ);CHKERRQ(ierr);
    ierr = VecAXPY(qp->S, dstep, qp->DS);CHKERRQ(ierr);
    ierr = VecAXPY(tao->solution, dstep, tao->stepdirection);CHKERRQ(ierr);
    ierr = VecAXPY(qp->G, dstep, qp->DG);CHKERRQ(ierr);
    ierr = VecAXPY(qp->T, dstep, qp->DT);CHKERRQ(ierr);

    /* Compute Residuals */
    ierr = QPIPComputeResidual(qp,tao);CHKERRQ(ierr);

    /* Evaluate quadratic function */
    ierr = MatMult(tao->hessian, tao->solution, qp->Work);CHKERRQ(ierr);

    ierr = VecDot(tao->solution, qp->Work, &d1);CHKERRQ(ierr);
    ierr = VecDot(tao->solution, qp->C0, &d2);CHKERRQ(ierr);
    ierr = VecDot(qp->G, qp->Z, gap);CHKERRQ(ierr);
    ierr = VecDot(qp->T, qp->S, gap+1);CHKERRQ(ierr);

    qp->pobj=d1/2.0 + d2+qp->c;
    /* Compute the duality gap */
    qp->gap = (gap[0]+gap[1]);
    qp->dobj = qp->pobj - qp->gap;
    if (qp->m>0) qp->mu=qp->gap/(qp->m);
    qp->rgap=qp->gap/( PetscAbsReal(qp->dobj) + PetscAbsReal(qp->pobj) + 1.0 );
  }  /* END MAIN LOOP  */

  PetscFunctionReturn(0);
}
Exemple #13
0
static int TaoSolve_BCG(TAO_SOLVER tao, void *cgptr)
{
  TAO_BCG *cg = (TAO_BCG *) cgptr;
  TaoVec*    X,*G; /* solution vector,  gradient vector */
  TaoVec*    Gprev=cg->Gprev, *GP=cg->GP;
  TaoVec*    DX=cg->DX, *Work=cg->Work;
  TaoVec  *XL,*XU;
  int    iter=0,lsflag=0,info;
  double gnorm2Prev,gdotgprev,gdx;
  double zero=0.0, minus_one = -1.0;
  double f_old,f,gnorm2,step=0;
  TaoTerminateReason reason;

  TaoFunctionBegin;
  info=TaoGetSolution(tao,&X);CHKERRQ(info);
  info=TaoGetGradient(tao,&G);CHKERRQ(info);

  info = TaoGetVariableBounds(tao,&XL,&XU); CHKERRQ(info);
  info = X->median(XL,X,XU); CHKERRQ(info);

  info = TaoComputeFunctionGradient(tao,X,&f,G);CHKERRQ(info);
  info = GP->boundGradientProjection(G,XL,X,XU); CHKERRQ(info);
  info = GP->norm2squared(&gnorm2);  CHKERRQ(info);

  info = DX->setToZero(); CHKERRQ(info); 
  info = Gprev->copyFrom(GP); CHKERRQ(info);

  cg->restarts=0;
  gnorm2Prev = gnorm2;

  /* Enter loop */
  while (1){

    /* Test for convergence */
    info = TaoMonitor(tao,iter++,f,gnorm2,0.0,step,&reason);CHKERRQ(info);
    if (reason!=TAO_CONTINUE_ITERATING) break;

    /* Determine beta, depending on method */
    info = GP->dot(Gprev,&gdotgprev); CHKERRQ(info);
    if (cg->type==TAO_CG_FletcherReeves){
      cg->beta=(gnorm2)/(gnorm2Prev);
    } else if (cg->type==TAO_CG_PolakRibiere){
      cg->beta=( (gnorm2)-gdotgprev )/(gnorm2Prev);
    } else {
      cg->beta=( (gnorm2)-gdotgprev )/(gnorm2Prev);
      if (cg->beta<0.0){
	cg->beta=0.0;
      }
    }

    /* Employ occasional restarts when successive gradients not orthogonal */
    if ( fabs(gdotgprev)/(gnorm2) > cg->eta || iter==0){ 
      printf("RESTART Beta: %4.2e\n",cg->beta);
      cg->beta=0.0;
    }

    if (cg->beta==0){
      cg->restarts++;
      PLogInfo(tao,"TaoCG: Restart CG at iterate %d with gradient direction.\n",tao->iter);
    }
    info = DX->scale(cg->beta); CHKERRQ(info);

    info = DX->negate(); CHKERRQ(info);
    info = DX->boundGradientProjection(DX,XL,X,XU); CHKERRQ(info);
    info = DX->negate(); CHKERRQ(info);

    info = DX->Axpy(minus_one,G); CHKERRQ(info);

    info = Gprev->copyFrom(GP); CHKERRQ(info);
    gnorm2Prev = gnorm2;

    info = Work->copyFrom(DX); CHKERRQ(info);
    info = Work->negate(); CHKERRQ(info);
    info = Work->boundGradientProjection(Work,XL,X,XU); CHKERRQ(info);
    info = Work->negate(); CHKERRQ(info);
    info = Work->dot(G,&gdx); CHKERRQ(info);
    if (cg->beta!=0 && gdx>=0){

      info = DX->copyFrom(GP); CHKERRQ(info);
      info = DX->negate(); CHKERRQ(info);
      cg->restarts++;
    } else {

    }
    info = DX->dot(G,&gdx); CHKERRQ(info);

    
    /* Line Search */
    step=1.5*step;
    step=TaoMax(1.5*step,0.1);
    step=1.0;

    info = TaoLineSearchApply(tao,X,G,DX,Work,&f,&step,&gdx,&lsflag);

    info = GP->boundGradientProjection(G,XL,X,XU); CHKERRQ(info);
    info = GP->norm2squared(&gnorm2); CHKERRQ(info);
    
  }
  
  TaoFunctionReturn(0);
}
Exemple #14
0
static PetscErrorCode TaoSolve_BLMVM(Tao tao)
{
  PetscErrorCode               ierr;
  TAO_BLMVM                    *blmP = (TAO_BLMVM *)tao->data;
  TaoConvergedReason           reason = TAO_CONTINUE_ITERATING;
  TaoLineSearchConvergedReason ls_status = TAOLINESEARCH_CONTINUE_ITERATING;
  PetscReal                    f, fold, gdx, gnorm;
  PetscReal                    stepsize = 1.0,delta;

  PetscFunctionBegin;
  /*  Project initial point onto bounds */
  ierr = TaoComputeVariableBounds(tao);CHKERRQ(ierr);
  ierr = VecMedian(tao->XL,tao->solution,tao->XU,tao->solution);CHKERRQ(ierr);
  ierr = TaoLineSearchSetVariableBounds(tao->linesearch,tao->XL,tao->XU);CHKERRQ(ierr);


  /* Check convergence criteria */
  ierr = TaoComputeObjectiveAndGradient(tao, tao->solution,&f,blmP->unprojected_gradient);CHKERRQ(ierr);
  ierr = VecBoundGradientProjection(blmP->unprojected_gradient,tao->solution, tao->XL,tao->XU,tao->gradient);CHKERRQ(ierr);

  ierr = TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);CHKERRQ(ierr);
  if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf pr NaN");

  ierr = TaoMonitor(tao, tao->niter, f, gnorm, 0.0, stepsize, &reason);CHKERRQ(ierr);
  if (reason != TAO_CONTINUE_ITERATING) PetscFunctionReturn(0);

  /* Set initial scaling for the function */
  if (f != 0.0) {
    delta = 2.0*PetscAbsScalar(f) / (gnorm*gnorm);
  } else {
    delta = 2.0 / (gnorm*gnorm);
  }
  ierr = MatLMVMSetDelta(blmP->M,delta);CHKERRQ(ierr);

  /* Set counter for gradient/reset steps */
  blmP->grad = 0;
  blmP->reset = 0;

  /* Have not converged; continue with Newton method */
  while (reason == TAO_CONTINUE_ITERATING) {
    /* Compute direction */
    ierr = MatLMVMUpdate(blmP->M, tao->solution, tao->gradient);CHKERRQ(ierr);
    ierr = MatLMVMSolve(blmP->M, blmP->unprojected_gradient, tao->stepdirection);CHKERRQ(ierr);
    ierr = VecBoundGradientProjection(tao->stepdirection,tao->solution,tao->XL,tao->XU,tao->gradient);CHKERRQ(ierr);

    /* Check for success (descent direction) */
    ierr = VecDot(blmP->unprojected_gradient, tao->gradient, &gdx);CHKERRQ(ierr);
    if (gdx <= 0) {
      /* Step is not descent or solve was not successful
         Use steepest descent direction (scaled) */
      ++blmP->grad;

      if (f != 0.0) {
        delta = 2.0*PetscAbsScalar(f) / (gnorm*gnorm);
      } else {
        delta = 2.0 / (gnorm*gnorm);
      }
      ierr = MatLMVMSetDelta(blmP->M,delta);CHKERRQ(ierr);
      ierr = MatLMVMReset(blmP->M);CHKERRQ(ierr);
      ierr = MatLMVMUpdate(blmP->M, tao->solution, blmP->unprojected_gradient);CHKERRQ(ierr);
      ierr = MatLMVMSolve(blmP->M,blmP->unprojected_gradient, tao->stepdirection);CHKERRQ(ierr);
    }
    ierr = VecScale(tao->stepdirection,-1.0);CHKERRQ(ierr);

    /* Perform the linesearch */
    fold = f;
    ierr = VecCopy(tao->solution, blmP->Xold);CHKERRQ(ierr);
    ierr = VecCopy(blmP->unprojected_gradient, blmP->Gold);CHKERRQ(ierr);
    ierr = TaoLineSearchSetInitialStepLength(tao->linesearch,1.0);CHKERRQ(ierr);
    ierr = TaoLineSearchApply(tao->linesearch, tao->solution, &f, blmP->unprojected_gradient, tao->stepdirection, &stepsize, &ls_status);CHKERRQ(ierr);
    ierr = TaoAddLineSearchCounts(tao);CHKERRQ(ierr);

    if (ls_status != TAOLINESEARCH_SUCCESS && ls_status != TAOLINESEARCH_SUCCESS_USER) {
      /* Linesearch failed
         Reset factors and use scaled (projected) gradient step */
      ++blmP->reset;

      f = fold;
      ierr = VecCopy(blmP->Xold, tao->solution);CHKERRQ(ierr);
      ierr = VecCopy(blmP->Gold, blmP->unprojected_gradient);CHKERRQ(ierr);

      if (f != 0.0) {
        delta = 2.0* PetscAbsScalar(f) / (gnorm*gnorm);
      } else {
        delta = 2.0/ (gnorm*gnorm);
      }
      ierr = MatLMVMSetDelta(blmP->M,delta);CHKERRQ(ierr);
      ierr = MatLMVMReset(blmP->M);CHKERRQ(ierr);
      ierr = MatLMVMUpdate(blmP->M, tao->solution, blmP->unprojected_gradient);CHKERRQ(ierr);
      ierr = MatLMVMSolve(blmP->M, blmP->unprojected_gradient, tao->stepdirection);CHKERRQ(ierr);
      ierr = VecScale(tao->stepdirection, -1.0);CHKERRQ(ierr);

      /* This may be incorrect; linesearch has values fo stepmax and stepmin
         that should be reset. */
      ierr = TaoLineSearchSetInitialStepLength(tao->linesearch,1.0);CHKERRQ(ierr);
      ierr = TaoLineSearchApply(tao->linesearch,tao->solution,&f, blmP->unprojected_gradient, tao->stepdirection,  &stepsize, &ls_status);CHKERRQ(ierr);
      ierr = TaoAddLineSearchCounts(tao);CHKERRQ(ierr);

      if (ls_status != TAOLINESEARCH_SUCCESS && ls_status != TAOLINESEARCH_SUCCESS_USER) {
        tao->reason = TAO_DIVERGED_LS_FAILURE;
        break;
      }
    }

    /* Check for converged */
    ierr = VecBoundGradientProjection(blmP->unprojected_gradient, tao->solution, tao->XL, tao->XU, tao->gradient);CHKERRQ(ierr);
    ierr = TaoGradientNorm(tao, tao->gradient, NORM_2, &gnorm);CHKERRQ(ierr);


    if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Not-a-Number");
    tao->niter++;
    ierr = TaoMonitor(tao, tao->niter, f, gnorm, 0.0, stepsize, &reason);CHKERRQ(ierr);
  }
  PetscFunctionReturn(0);
}
Exemple #15
0
static PetscErrorCode TaoSolve_BMRM(Tao tao)
{
  PetscErrorCode     ierr;
  TAO_DF             df;
  TAO_BMRM           *bmrm = (TAO_BMRM*)tao->data;

  /* Values and pointers to parts of the optimization problem */
  PetscReal          f = 0.0;
  Vec                W = tao->solution;
  Vec                G = tao->gradient;
  PetscReal          lambda;
  PetscReal          bt;
  Vec_Chain          grad_list, *tail_glist, *pgrad;
  PetscInt           i;
  PetscMPIInt        rank;

  /* Used in converged criteria check */
  PetscReal          reg;
  PetscReal          jtwt = 0.0, max_jtwt, pre_epsilon, epsilon, jw, min_jw;
  PetscReal          innerSolverTol;
  MPI_Comm           comm;

  PetscFunctionBegin;
  ierr = PetscObjectGetComm((PetscObject)tao,&comm);CHKERRQ(ierr);
  ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr);
  lambda = bmrm->lambda;

  /* Check Stopping Condition */
  tao->step = 1.0;
  max_jtwt = -BMRM_INFTY;
  min_jw = BMRM_INFTY;
  innerSolverTol = 1.0;
  epsilon = 0.0;

  if (!rank) {
    ierr = init_df_solver(&df);CHKERRQ(ierr);
    grad_list.next = NULL;
    tail_glist = &grad_list;
  }

  df.tol = 1e-6;
  tao->reason = TAO_CONTINUE_ITERATING;

  /*-----------------Algorithm Begins------------------------*/
  /* make the scatter */
  ierr = VecScatterCreateToZero(W, &bmrm->scatter, &bmrm->local_w);CHKERRQ(ierr);
  ierr = VecAssemblyBegin(bmrm->local_w);CHKERRQ(ierr);
  ierr = VecAssemblyEnd(bmrm->local_w);CHKERRQ(ierr);

  /* NOTE: In application pass the sub-gradient of Remp(W) */
  ierr = TaoComputeObjectiveAndGradient(tao, W, &f, G);CHKERRQ(ierr);
  ierr = TaoLogConvergenceHistory(tao,f,1.0,0.0,tao->ksp_its);CHKERRQ(ierr);
  ierr = TaoMonitor(tao,tao->niter,f,1.0,0.0,tao->step);CHKERRQ(ierr);
  ierr = (*tao->ops->convergencetest)(tao,tao->cnvP);CHKERRQ(ierr);
  
  while (tao->reason == TAO_CONTINUE_ITERATING) {
    /* Call general purpose update function */
    if (tao->ops->update) {
      ierr = (*tao->ops->update)(tao, tao->niter);CHKERRQ(ierr);
    }
    
    /* compute bt = Remp(Wt-1) - <Wt-1, At> */
    ierr = VecDot(W, G, &bt);CHKERRQ(ierr);
    bt = f - bt;

    /* First gather the gradient to the master node */
    ierr = VecScatterBegin(bmrm->scatter, G, bmrm->local_w, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
    ierr = VecScatterEnd(bmrm->scatter, G, bmrm->local_w, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);

    /* Bring up the inner solver */
    if (!rank) {
      ierr = ensure_df_space(tao->niter+1, &df);CHKERRQ(ierr);
      ierr = make_grad_node(bmrm->local_w, &pgrad);CHKERRQ(ierr);
      tail_glist->next = pgrad;
      tail_glist = pgrad;

      df.a[tao->niter] = 1.0;
      df.f[tao->niter] = -bt;
      df.u[tao->niter] = 1.0;
      df.l[tao->niter] = 0.0;

      /* set up the Q */
      pgrad = grad_list.next;
      for (i=0; i<=tao->niter; i++) {
        if (!pgrad) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Assert that there are at least tao->niter+1 pgrad available");
        ierr = VecDot(pgrad->V, bmrm->local_w, &reg);CHKERRQ(ierr);
        df.Q[i][tao->niter] = df.Q[tao->niter][i] = reg / lambda;
        pgrad = pgrad->next;
      }

      if (tao->niter > 0) {
        df.x[tao->niter] = 0.0;
        ierr = solve(&df);CHKERRQ(ierr);
      } else
        df.x[0] = 1.0;

      /* now computing Jt*(alpha_t) which should be = Jt(wt) to check convergence */
      jtwt = 0.0;
      ierr = VecSet(bmrm->local_w, 0.0);CHKERRQ(ierr);
      pgrad = grad_list.next;
      for (i=0; i<=tao->niter; i++) {
        jtwt -= df.x[i] * df.f[i];
        ierr = VecAXPY(bmrm->local_w, -df.x[i] / lambda, pgrad->V);CHKERRQ(ierr);
        pgrad = pgrad->next;
      }

      ierr = VecNorm(bmrm->local_w, NORM_2, &reg);CHKERRQ(ierr);
      reg = 0.5*lambda*reg*reg;
      jtwt -= reg;
    } /* end if rank == 0 */

    /* scatter the new W to all nodes */
    ierr = VecScatterBegin(bmrm->scatter,bmrm->local_w,W,INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
    ierr = VecScatterEnd(bmrm->scatter,bmrm->local_w,W,INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);

    ierr = TaoComputeObjectiveAndGradient(tao, W, &f, G);CHKERRQ(ierr);

    ierr = MPI_Bcast(&jtwt,1,MPIU_REAL,0,comm);CHKERRQ(ierr);
    ierr = MPI_Bcast(&reg,1,MPIU_REAL,0,comm);CHKERRQ(ierr);

    jw = reg + f;                                       /* J(w) = regularizer + Remp(w) */
    if (jw < min_jw) min_jw = jw;
    if (jtwt > max_jtwt) max_jtwt = jtwt;

    pre_epsilon = epsilon;
    epsilon = min_jw - jtwt;

    if (!rank) {
      if (innerSolverTol > epsilon) innerSolverTol = epsilon;
      else if (innerSolverTol < 1e-7) innerSolverTol = 1e-7;

      /* if the annealing doesn't work well, lower the inner solver tolerance */
      if(pre_epsilon < epsilon) innerSolverTol *= 0.2;

      df.tol = innerSolverTol*0.5;
    }

    tao->niter++;
    ierr = TaoLogConvergenceHistory(tao,min_jw,epsilon,0.0,tao->ksp_its);CHKERRQ(ierr);
    ierr = TaoMonitor(tao,tao->niter,min_jw,epsilon,0.0,tao->step);CHKERRQ(ierr);
    ierr = (*tao->ops->convergencetest)(tao,tao->cnvP);CHKERRQ(ierr);
  }

  /* free all the memory */
  if (!rank) {
    ierr = destroy_grad_list(&grad_list);CHKERRQ(ierr);
    ierr = destroy_df_solver(&df);CHKERRQ(ierr);
  }

  ierr = VecDestroy(&bmrm->local_w);CHKERRQ(ierr);
  ierr = VecScatterDestroy(&bmrm->scatter);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}
Exemple #16
0
static int TaoSolve_BNLS(TAO_SOLVER tao, void*solver){

  TAO_BNLS *bnls = (TAO_BNLS *)solver;
  int info;
  TaoInt lsflag,iter=0;
  TaoTerminateReason reason=TAO_CONTINUE_ITERATING;
  double f,f_full,gnorm,gdx,stepsize=1.0;
  TaoTruth success;
  TaoVec *XU, *XL;
  TaoVec *X,  *G=bnls->G, *PG=bnls->PG;
  TaoVec *R=bnls->R, *DXFree=bnls->DXFree;
  TaoVec *DX=bnls->DX, *Work=bnls->Work;
  TaoMat *H, *Hsub=bnls->Hsub;
  TaoIndexSet *FreeVariables = bnls->FreeVariables;

  TaoFunctionBegin;

  /* Check if upper bound greater than lower bound. */
  info = TaoGetSolution(tao,&X);CHKERRQ(info); bnls->X=X;
  info = TaoGetVariableBounds(tao,&XL,&XU);CHKERRQ(info);
  info = TaoEvaluateVariableBounds(tao,XL,XU); CHKERRQ(info);
  info = TaoGetHessian(tao,&H);CHKERRQ(info); bnls->H=H;

  /*   Project the current point onto the feasible set */
  info = X->Median(XL,X,XU); CHKERRQ(info);
  
  TaoLinearSolver *tls;
  // Modify the linear solver to a conjugate gradient method
  info = TaoGetLinearSolver(tao, &tls); CHKERRQ(info);
  TaoLinearSolverPetsc *pls;
  pls  = dynamic_cast <TaoLinearSolverPetsc *> (tls);
  // set trust radius to zero 
  // PETSc ignores this case and should return the negative curvature direction
  // at its current default length
  pls->SetTrustRadius(0.0);

  if(!bnls->M) bnls->M = new TaoLMVMMat(X);
  TaoLMVMMat *M = bnls->M;
  KSP pksp = pls->GetKSP();
  // we will want to provide an initial guess in case neg curvature on the first iteration
  info = KSPSetInitialGuessNonzero(pksp,PETSC_TRUE); CHKERRQ(info);
  PC ppc;
  // Modify the preconditioner to use the bfgs approximation
  info = KSPGetPC(pksp, &ppc); CHKERRQ(info);
  PetscTruth  BFGSPreconditioner=PETSC_FALSE;// debug flag
  info = PetscOptionsGetTruth(PETSC_NULL,"-bnls_pc_bfgs",
                              &BFGSPreconditioner,PETSC_NULL); CHKERRQ(info);
  if( BFGSPreconditioner) 
    { 
     info=PetscInfo(tao,"TaoSolve_BNLS:  using bfgs preconditioner\n");
     info = KSPSetNormType(pksp, KSP_NORM_PRECONDITIONED); CHKERRQ(info);
     info = PCSetType(ppc, PCSHELL); CHKERRQ(info);
     info = PCShellSetName(ppc, "bfgs"); CHKERRQ(info);
     info = PCShellSetContext(ppc, M); CHKERRQ(info);
     info = PCShellSetApply(ppc, bfgs_apply); CHKERRQ(info);
    }
  else
    {// default to none
     info=PetscInfo(tao,"TaoSolve_BNLS:  using no preconditioner\n");
     info = PCSetType(ppc, PCNONE); CHKERRQ(info);
    }

  info = TaoComputeMeritFunctionGradient(tao,X,&f,G);CHKERRQ(info);
  info = PG->BoundGradientProjection(G,XL,X,XU);CHKERRQ(info);
  info = PG->Norm2(&gnorm); CHKERRQ(info);
  
  // Set initial scaling for the function
  if (f != 0.0) {
    info = M->SetDelta(2.0 * TaoAbsDouble(f) / (gnorm*gnorm)); CHKERRQ(info);
  }
  else {
    info = M->SetDelta(2.0 / (gnorm*gnorm)); CHKERRQ(info);
  }
  
  while (reason==TAO_CONTINUE_ITERATING){
    
    /* Project the gradient and calculate the norm */
    info = PG->BoundGradientProjection(G,XL,X,XU);CHKERRQ(info);
    info = PG->Norm2(&gnorm); CHKERRQ(info);
    
    info = M->Update(X, PG); CHKERRQ(info);

    PetscScalar ewAtol  = PetscMin(0.5,gnorm)*gnorm;
    info = KSPSetTolerances(pksp,PETSC_DEFAULT,ewAtol,
                            PETSC_DEFAULT, PETSC_DEFAULT); CHKERRQ(info);
    info=PetscInfo1(tao,"TaoSolve_BNLS: gnorm =%g\n",gnorm);
    pksp->printreason = PETSC_TRUE;
    info = KSPView(pksp,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(info);
    M->View();

    info = TaoMonitor(tao,iter++,f,gnorm,0.0,stepsize,&reason);
    CHKERRQ(info);
    if (reason!=TAO_CONTINUE_ITERATING) break;

    info = FreeVariables->WhichEqual(PG,G); CHKERRQ(info);

    info = TaoComputeHessian(tao,X,H);CHKERRQ(info);
    
    /* Create a reduced linear system */

    info = R->SetReducedVec(G,FreeVariables);CHKERRQ(info);
    info = R->Negate();CHKERRQ(info);

    /* Use gradient as initial guess */
    PetscTruth  UseGradientIG=PETSC_FALSE;// debug flag
    info = PetscOptionsGetTruth(PETSC_NULL,"-bnls_use_gradient_ig",
                                &UseGradientIG,PETSC_NULL); CHKERRQ(info);
    if(UseGradientIG)
      info = DX->CopyFrom(G);
    else
     {
      info=PetscInfo(tao,"TaoSolve_BNLS: use bfgs init guess \n");
      info = M->Solve(G, DX, &success);
     }
    CHKERRQ(info);
    info = DXFree->SetReducedVec(DX,FreeVariables);CHKERRQ(info);
    info = DXFree->Negate(); CHKERRQ(info);
    
    info = Hsub->SetReducedMatrix(H,FreeVariables,FreeVariables);CHKERRQ(info);

    bnls->gamma_factor /= 2;
    success = TAO_FALSE;

    while (success==TAO_FALSE) {
      
      /* Approximately solve the reduced linear system */
      info = TaoPreLinearSolve(tao,Hsub);CHKERRQ(info);
      info = TaoLinearSolve(tao,Hsub,R,DXFree,&success);CHKERRQ(info);

      info = DX->SetToZero(); CHKERRQ(info);
      info = DX->ReducedXPY(DXFree,FreeVariables);CHKERRQ(info);
      info = DX->Dot(G,&gdx); CHKERRQ(info);

      if (gdx>=0 || success==TAO_FALSE) { /* use bfgs direction */
        info = M->Solve(G, DX, &success); CHKERRQ(info);
        info = DX->BoundGradientProjection(DX,XL,X,XU); CHKERRQ(info);
        info = DX->Negate(); CHKERRQ(info);
        // Check for success (descent direction)
        info = DX->Dot(G,&gdx); CHKERRQ(info);
        if (gdx >= 0) {
          // Step is not descent or solve was not successful
          // Use steepest descent direction (scaled)
          if (f != 0.0) {
            info = M->SetDelta(2.0 * TaoAbsDouble(f) / (gnorm*gnorm)); CHKERRQ(info);
          }
          else {
            info = M->SetDelta(2.0 / (gnorm*gnorm)); CHKERRQ(info);
          }
          info = M->Reset(); CHKERRQ(info);
          info = M->Update(X, G); CHKERRQ(info);
          info = DX->CopyFrom(G);
          info = DX->Negate(); CHKERRQ(info);
          info = DX->Dot(G,&gdx); CHKERRQ(info);
          info=PetscInfo1(tao,"LMVM Solve Fail use steepest descent, gdx %22.12e \n",gdx);
        } 
        else {
          info=PetscInfo1(tao,"Newton Solve Fail use BFGS direction, gdx %22.12e \n",gdx);
        } 
	success = TAO_TRUE;
//        bnls->gamma_factor *= 2; 
//        bnls->gamma = bnls->gamma_factor*(gnorm); 
//#if !defined(PETSC_USE_COMPLEX)
//        info=PetscInfo2(tao,"TaoSolve_NLS:  modify diagonal (assume same nonzero structure), gamma_factor=%g, gamma=%g\n",bnls->gamma_factor,bnls->gamma);
//	CHKERRQ(info);
//#else
//        info=PetscInfo3(tao,"TaoSolve_NLS:  modify diagonal (asuume same nonzero structure), gamma_factor=%g, gamma=%g, gdx %22.12e \n",
//	     bnls->gamma_factor,PetscReal(bnls->gamma),gdx);CHKERRQ(info);
//#endif
//        info = Hsub->ShiftDiagonal(bnls->gamma);CHKERRQ(info);
//        if (f != 0.0) {
//          info = M->SetDelta(2.0 * TaoAbsDouble(f) / (gnorm*gnorm)); CHKERRQ(info);
//        }
//        else {
//          info = M->SetDelta(2.0 / (gnorm*gnorm)); CHKERRQ(info);
//        }
//        info = M->Reset(); CHKERRQ(info);
//        info = M->Update(X, G); CHKERRQ(info);
//        success = TAO_FALSE;
      } else {
        info=PetscInfo1(tao,"Newton Solve is descent direction, gdx %22.12e \n",gdx);
	success = TAO_TRUE;
      }

    }
    
    stepsize=1.0;	
    info = TaoLineSearchApply(tao,X,G,DX,Work,
			      &f,&f_full,&stepsize,&lsflag);
    CHKERRQ(info);

    
  }  /* END MAIN LOOP  */

  TaoFunctionReturn(0);
}
Exemple #17
0
static PetscErrorCode TaoSolve_NM(Tao tao)
{
  PetscErrorCode     ierr;
  TAO_NelderMead     *nm = (TAO_NelderMead*)tao->data;
  TaoConvergedReason reason;
  PetscReal          *x;
  PetscInt           i;
  Vec                Xmur=nm->Xmur, Xmue=nm->Xmue, Xmuc=nm->Xmuc, Xbar=nm->Xbar;
  PetscReal          fr,fe,fc;
  PetscInt           shrink;
  PetscInt           low,high;

  PetscFunctionBegin;
  nm->nshrink =      0;
  nm->nreflect =     0;
  nm->nincontract =  0;
  nm->noutcontract = 0;
  nm->nexpand =      0;

  if (tao->XL || tao->XU || tao->ops->computebounds) {
    ierr = PetscPrintf(((PetscObject)tao)->comm,"WARNING: Variable bounds have been set but will be ignored by NelderMead algorithm\n");CHKERRQ(ierr);
  }

  ierr = VecCopy(tao->solution,nm->simplex[0]);CHKERRQ(ierr);
  ierr = TaoComputeObjective(tao,nm->simplex[0],&nm->f_values[0]);CHKERRQ(ierr);
  nm->indices[0]=0;
  for (i=1;i<nm->N+1;i++){
    ierr = VecCopy(tao->solution,nm->simplex[i]);CHKERRQ(ierr);
    ierr = VecGetOwnershipRange(nm->simplex[i],&low,&high);CHKERRQ(ierr);
    if (i-1 >= low && i-1 < high) {
      ierr = VecGetArray(nm->simplex[i],&x);CHKERRQ(ierr);
      x[i-1-low] += nm->lamda;
      ierr = VecRestoreArray(nm->simplex[i],&x);CHKERRQ(ierr);
    }

    ierr = TaoComputeObjective(tao,nm->simplex[i],&nm->f_values[i]);CHKERRQ(ierr);
    nm->indices[i] = i;
  }

  /*  Xbar  = (Sum of all simplex vectors - worst vector)/N */
  ierr = NelderMeadSort(nm);CHKERRQ(ierr);
  ierr = VecSet(Xbar,0.0);CHKERRQ(ierr);
  for (i=0;i<nm->N;i++) {
    ierr = VecAXPY(Xbar,1.0,nm->simplex[nm->indices[i]]);CHKERRQ(ierr);
  }
  ierr = VecScale(Xbar,nm->oneOverN);CHKERRQ(ierr);
  reason = TAO_CONTINUE_ITERATING;
  while (1) {
    shrink = 0;
    ierr = VecCopy(nm->simplex[nm->indices[0]],tao->solution);CHKERRQ(ierr);
    ierr = TaoMonitor(tao,tao->niter++,nm->f_values[nm->indices[0]],nm->f_values[nm->indices[nm->N]]-nm->f_values[nm->indices[0]],0.0,1.0,&reason);CHKERRQ(ierr);
    if (reason != TAO_CONTINUE_ITERATING) break;

    /* x(mu) = (1 + mu)Xbar - mu*X_N+1 */
    ierr = VecAXPBYPCZ(Xmur,1+nm->mu_r,-nm->mu_r,0,Xbar,nm->simplex[nm->indices[nm->N]]);CHKERRQ(ierr);
    ierr = TaoComputeObjective(tao,Xmur,&fr);CHKERRQ(ierr);

    if (nm->f_values[nm->indices[0]] <= fr && fr < nm->f_values[nm->indices[nm->N-1]]) {
      /*  reflect */
      nm->nreflect++;
      ierr = PetscInfo(0,"Reflect\n");CHKERRQ(ierr);
      ierr = NelderMeadReplace(nm,nm->indices[nm->N],Xmur,fr);CHKERRQ(ierr);
    } else if (fr < nm->f_values[nm->indices[0]]) {
      /*  expand */
      nm->nexpand++;
      ierr = PetscInfo(0,"Expand\n");CHKERRQ(ierr);
      ierr = VecAXPBYPCZ(Xmue,1+nm->mu_e,-nm->mu_e,0,Xbar,nm->simplex[nm->indices[nm->N]]);CHKERRQ(ierr);
      ierr = TaoComputeObjective(tao,Xmue,&fe);CHKERRQ(ierr);
      if (fe < fr) {
        ierr = NelderMeadReplace(nm,nm->indices[nm->N],Xmue,fe);CHKERRQ(ierr);
      } else {
        ierr = NelderMeadReplace(nm,nm->indices[nm->N],Xmur,fr);CHKERRQ(ierr);
      }
    } else if (nm->f_values[nm->indices[nm->N-1]] <= fr && fr < nm->f_values[nm->indices[nm->N]]) {
      /* outside contraction */
      nm->noutcontract++;
      ierr = PetscInfo(0,"Outside Contraction\n");CHKERRQ(ierr);
      ierr = VecAXPBYPCZ(Xmuc,1+nm->mu_oc,-nm->mu_oc,0,Xbar,nm->simplex[nm->indices[nm->N]]);CHKERRQ(ierr);

      ierr = TaoComputeObjective(tao,Xmuc,&fc);CHKERRQ(ierr);
      if (fc <= fr) {
        ierr = NelderMeadReplace(nm,nm->indices[nm->N],Xmuc,fc);CHKERRQ(ierr);
      } else shrink=1;
    } else {
      /* inside contraction */
      nm->nincontract++;
      ierr = PetscInfo(0,"Inside Contraction\n");CHKERRQ(ierr);
      ierr = VecAXPBYPCZ(Xmuc,1+nm->mu_ic,-nm->mu_ic,0,Xbar,nm->simplex[nm->indices[nm->N]]);CHKERRQ(ierr);
      ierr = TaoComputeObjective(tao,Xmuc,&fc);CHKERRQ(ierr);
      if (fc < nm->f_values[nm->indices[nm->N]]) {
        ierr = NelderMeadReplace(nm,nm->indices[nm->N],Xmuc,fc);CHKERRQ(ierr);
      } else shrink = 1;
    }

    if (shrink) {
      nm->nshrink++;
      ierr = PetscInfo(0,"Shrink\n");CHKERRQ(ierr);

      for (i=1;i<nm->N+1;i++) {
        ierr = VecAXPBY(nm->simplex[nm->indices[i]],1.5,-0.5,nm->simplex[nm->indices[0]]);CHKERRQ(ierr);
        ierr = TaoComputeObjective(tao,nm->simplex[nm->indices[i]], &nm->f_values[nm->indices[i]]);CHKERRQ(ierr);
      }
      ierr = VecAXPBY(Xbar,1.5*nm->oneOverN,-0.5,nm->simplex[nm->indices[0]]);CHKERRQ(ierr);

      /*  Add last vector's fraction of average */
      ierr = VecAXPY(Xbar,nm->oneOverN,nm->simplex[nm->indices[nm->N]]);CHKERRQ(ierr);
      ierr = NelderMeadSort(nm);CHKERRQ(ierr);
      /*  Subtract new last vector from average */
      ierr = VecAXPY(Xbar,-nm->oneOverN,nm->simplex[nm->indices[nm->N]]);CHKERRQ(ierr);
    }
  }
  PetscFunctionReturn(0);
}
Exemple #18
0
static PetscErrorCode TaoSolve_SQPCON(Tao tao)
{
  TAO_SQPCON                   *sqpconP = (TAO_SQPCON*)tao->data;
  PetscInt                     iter=0;
  TaoConvergedReason           reason = TAO_CONTINUE_ITERATING;
  TaoLineSearchConvergedReason ls_reason = TAOLINESEARCH_CONTINUE_ITERATING;
  PetscReal                    step=1.0,f,fm, fold;
  PetscReal                    cnorm, mnorm;
  PetscBool                    use_update=PETSC_TRUE; /*  don't update Q if line search failed */
  PetscErrorCode               ierr;

  PetscFunctionBegin;
  /* Scatter to U,V */
  ierr = VecScatterBegin(sqpconP->state_scatter, tao->solution, sqpconP->U, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
  ierr = VecScatterEnd(sqpconP->state_scatter, tao->solution, sqpconP->U, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
  ierr = VecScatterBegin(sqpconP->design_scatter, tao->solution, sqpconP->V, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
  ierr = VecScatterEnd(sqpconP->design_scatter, tao->solution, sqpconP->V, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);

  /* Evaluate Function, Gradient, Constraints, and Jacobian */
  ierr = TaoComputeObjectiveAndGradient(tao,tao->solution,&f,tao->gradient);CHKERRQ(ierr);
  ierr = TaoComputeConstraints(tao,tao->solution, tao->constraints);CHKERRQ(ierr);
  ierr = TaoComputeJacobianState(tao,tao->solution, &tao->jacobian_state, &tao->jacobian_state_pre, &tao->jacobian_state_inv, &sqpconP->statematflag);CHKERRQ(ierr);
  ierr = TaoComputeJacobianDesign(tao,tao->solution, &tao->jacobian_design, &tao->jacobian_design_pre, &sqpconP->statematflag);CHKERRQ(ierr);

  /* Scatter gradient to GU,GV */
  ierr = VecScatterBegin(sqpconP->state_scatter, tao->gradient, sqpconP->GU, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
  ierr = VecScatterEnd(sqpconP->state_scatter, tao->gradient, sqpconP->GU, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
  ierr = VecScatterBegin(sqpconP->design_scatter, tao->gradient, sqpconP->GV, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
  ierr = VecScatterEnd(sqpconP->design_scatter, tao->gradient, sqpconP->GV, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
  ierr = VecNorm(tao->gradient, NORM_2, &mnorm);CHKERRQ(ierr);

  /* Evaluate constraint norm */
  ierr = VecNorm(tao->constraints, NORM_2, &cnorm);CHKERRQ(ierr);

  /* Monitor convergence */
  ierr = TaoMonitor(tao, iter,f,mnorm,cnorm,step,&reason);CHKERRQ(ierr);

  while (reason == TAO_CONTINUE_ITERATING) {
    /* Solve tbar = -A\t (t is constraints vector) */
    ierr = MatMult(tao->jacobian_state_inv, tao->constraints, sqpconP->Tbar);CHKERRQ(ierr);
    ierr = VecScale(sqpconP->Tbar, -1.0);CHKERRQ(ierr);

    /* aqwac =  A'\(Q*Tbar + c) */
    if (iter > 0) {
      ierr = MatMult(sqpconP->Q,sqpconP->Tbar,sqpconP->WV);CHKERRQ(ierr);
    } else {
      ierr = VecCopy(sqpconP->Tbar, sqpconP->WV);CHKERRQ(ierr);
    }
    ierr = VecAXPY(sqpconP->WV,1.0,sqpconP->GU);CHKERRQ(ierr);

    ierr = MatMultTranspose(tao->jacobian_state_inv, sqpconP->WV, sqpconP->aqwac);CHKERRQ(ierr);

    /* Reduced Gradient dbar = d -  B^t * aqwac */
    ierr = MatMultTranspose(tao->jacobian_design,sqpconP->aqwac, sqpconP->dbar);CHKERRQ(ierr);
    ierr = VecScale(sqpconP->dbar, -1.0);CHKERRQ(ierr);
    ierr = VecAXPY(sqpconP->dbar,1.0,sqpconP->GV);CHKERRQ(ierr);

    /* update reduced hessian */
    ierr = MatLMVMUpdate(sqpconP->R, sqpconP->V, sqpconP->dbar);CHKERRQ(ierr);

    /* Solve R*dv = -dbar using approx. hessian */
    ierr = MatLMVMSolve(sqpconP->R, sqpconP->dbar, sqpconP->DV);CHKERRQ(ierr);
    ierr = VecScale(sqpconP->DV, -1.0);CHKERRQ(ierr);

    /* Backsolve for u =  A\(g - B*dv)  = tbar - A\(B*dv)*/
    ierr = MatMult(tao->jacobian_design, sqpconP->DV, sqpconP->WL);CHKERRQ(ierr);
    ierr = MatMult(tao->jacobian_state_inv, sqpconP->WL, sqpconP->DU);CHKERRQ(ierr);
    ierr = VecScale(sqpconP->DU, -1.0);CHKERRQ(ierr);
    ierr = VecAXPY(sqpconP->DU, 1.0, sqpconP->Tbar);CHKERRQ(ierr);

    /* Assemble Big D */
    ierr = VecScatterBegin(sqpconP->state_scatter, sqpconP->DU, tao->stepdirection, INSERT_VALUES, SCATTER_REVERSE);CHKERRQ(ierr);
    ierr = VecScatterEnd(sqpconP->state_scatter, sqpconP->DU, tao->stepdirection, INSERT_VALUES, SCATTER_REVERSE);CHKERRQ(ierr);
    ierr = VecScatterBegin(sqpconP->design_scatter, sqpconP->DV, tao->stepdirection, INSERT_VALUES, SCATTER_REVERSE);CHKERRQ(ierr);
    ierr = VecScatterEnd(sqpconP->design_scatter, sqpconP->DV, tao->stepdirection, INSERT_VALUES, SCATTER_REVERSE);CHKERRQ(ierr);

    /* Perform Line Search */
    ierr = VecCopy(tao->solution, sqpconP->Xold);CHKERRQ(ierr);
    ierr = VecCopy(tao->gradient, sqpconP->Gold);CHKERRQ(ierr);
    fold = f;
    ierr = TaoLineSearchComputeObjectiveAndGradient(tao->linesearch,tao->solution,&fm,sqpconP->GL);CHKERRQ(ierr);
    ierr = TaoLineSearchSetInitialStepLength(tao->linesearch,1.0);
    ierr = TaoLineSearchApply(tao->linesearch, tao->solution, &fm, sqpconP->GL, tao->stepdirection,&step, &ls_reason);CHKERRQ(ierr);
    ierr = TaoAddLineSearchCounts(tao);CHKERRQ(ierr);
    if (ls_reason < 0) {
      ierr = VecCopy(sqpconP->Xold, tao->solution);
      ierr = VecCopy(sqpconP->Gold, tao->gradient);
      f = fold;
      ierr = VecAXPY(tao->solution, 1.0, tao->stepdirection);CHKERRQ(ierr);
      ierr = PetscInfo(tao,"Line Search Failed, using full step.");CHKERRQ(ierr);
      use_update=PETSC_FALSE;
    } else {
      use_update = PETSC_TRUE;
    }

    /* Scatter X to U,V */
    ierr = VecScatterBegin(sqpconP->state_scatter, tao->solution, sqpconP->U, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
    ierr = VecScatterEnd(sqpconP->state_scatter, tao->solution, sqpconP->U, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
    ierr = VecScatterBegin(sqpconP->design_scatter, tao->solution, sqpconP->V, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
    ierr = VecScatterEnd(sqpconP->design_scatter, tao->solution, sqpconP->V, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);


    /* Evaluate Function, Gradient, Constraints, and Jacobian */
    ierr = TaoComputeObjectiveAndGradient(tao,tao->solution,&f,tao->gradient);CHKERRQ(ierr);
    ierr = TaoComputeConstraints(tao,tao->solution, tao->constraints);CHKERRQ(ierr);
    ierr = TaoComputeJacobianState(tao,tao->solution, &tao->jacobian_state, &tao->jacobian_state_pre, &tao->jacobian_state_inv, &sqpconP->statematflag);CHKERRQ(ierr);
    ierr = TaoComputeJacobianDesign(tao,tao->solution, &tao->jacobian_design, &tao->jacobian_design_pre, &sqpconP->designmatflag);CHKERRQ(ierr);

    /* Scatter gradient to GU,GV */
    ierr = VecScatterBegin(sqpconP->state_scatter, tao->gradient, sqpconP->GU, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
    ierr = VecScatterEnd(sqpconP->state_scatter, tao->gradient, sqpconP->GU, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
    ierr = VecScatterBegin(sqpconP->design_scatter, tao->gradient, sqpconP->GV, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
    ierr = VecScatterEnd(sqpconP->design_scatter, tao->gradient, sqpconP->GV, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);

    /* Update approx to hessian of the Lagrangian wrt state (Q)
          with u_k+1, gu_k+1 */
    if (use_update) {
      ierr = MatApproxUpdate(sqpconP->Q,sqpconP->U,sqpconP->GU);CHKERRQ(ierr);
    }
    ierr = VecNorm(sqpconP->GL, NORM_2, &mnorm);CHKERRQ(ierr);

    /* Evaluate constraint norm */
    ierr = VecNorm(tao->constraints, NORM_2, &cnorm);CHKERRQ(ierr);

    /* Monitor convergence */
    iter++;
    ierr = TaoMonitor(tao, iter,f,mnorm,cnorm,step,&reason);CHKERRQ(ierr);
  }
  PetscFunctionReturn(0);
}