Exemplo n.º 1
0
/*@
  TaoLineSearchComputeObjectiveAndGradient - Computes the objective function value at a given point

  Collective on Tao

  Input Parameters:
+ ls - the TaoLineSearch context
- x - input vector

  Output Parameter:
+ f - Objective value at X
- g - Gradient vector at X

  Notes: TaoLineSearchComputeObjectiveAndGradient() is typically used within line searches
  so most users would not generally call this routine themselves.

  Level: developer

.seealso: TaoLineSearchComputeGradient(), TaoLineSearchComputeObjectiveAndGradient(), TaoLineSearchSetObjectiveRoutine()
@*/
PetscErrorCode TaoLineSearchComputeObjectiveAndGradient(TaoLineSearch ls, Vec x, PetscReal *f, Vec g)
{
  PetscErrorCode ierr;

  PetscFunctionBegin;
  PetscValidHeaderSpecific(ls,TAOLINESEARCH_CLASSID,1);
  PetscValidHeaderSpecific(x,VEC_CLASSID,2);
  PetscValidPointer(f,3);
  PetscValidHeaderSpecific(g,VEC_CLASSID,4);
  PetscCheckSameComm(ls,1,x,2);
  PetscCheckSameComm(ls,1,g,4);
  if (ls->usetaoroutines) {
      ierr = TaoComputeObjectiveAndGradient(ls->tao,x,f,g);CHKERRQ(ierr);
  } else {
    ierr = PetscLogEventBegin(TaoLineSearch_EvalEvent,ls,0,0,0);CHKERRQ(ierr);
    if (!ls->ops->computeobjective && !ls->ops->computeobjectiveandgradient) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Line Search does not have objective function set");
    if (!ls->ops->computegradient && !ls->ops->computeobjectiveandgradient) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Line Search does not have gradient function set");

    PetscStackPush("TaoLineSearch user objective/gradient routine");
    if (ls->ops->computeobjectiveandgradient) {
      ierr = (*ls->ops->computeobjectiveandgradient)(ls,x,f,g,ls->userctx_funcgrad);CHKERRQ(ierr);
    } else {
      ierr = (*ls->ops->computeobjective)(ls,x,f,ls->userctx_func);CHKERRQ(ierr);
      ierr = (*ls->ops->computegradient)(ls,x,g,ls->userctx_grad);CHKERRQ(ierr);
    }
    PetscStackPop;
    ierr = PetscLogEventEnd(TaoLineSearch_EvalEvent,ls,0,0,0);CHKERRQ(ierr);
    ierr = PetscInfo1(ls,"TaoLineSearch Function evaluation: %14.12e\n",(double)(*f));CHKERRQ(ierr);
    ls->nfgeval++;
  }
  PetscFunctionReturn(0);
}
Exemplo n.º 2
0
/* evaluate user info at current point */
PetscErrorCode IPMEvaluate(Tao tao)
{
  TAO_IPM        *ipmP = (TAO_IPM *)tao->data;
  PetscErrorCode ierr;

  PetscFunctionBegin;
  ierr = TaoComputeObjectiveAndGradient(tao,tao->solution,&ipmP->kkt_f,tao->gradient);CHKERRQ(ierr);
  ierr = TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);CHKERRQ(ierr);
  if (ipmP->me > 0) {
    ierr = TaoComputeEqualityConstraints(tao,tao->solution,tao->constraints_equality);CHKERRQ(ierr);
    ierr = TaoComputeJacobianEquality(tao,tao->solution,tao->jacobian_equality,tao->jacobian_equality_pre);CHKERRQ(ierr);
  }
  if (ipmP->mi > 0) {
    ierr = TaoComputeInequalityConstraints(tao,tao->solution,tao->constraints_inequality);CHKERRQ(ierr);
    ierr = TaoComputeJacobianInequality(tao,tao->solution,tao->jacobian_inequality,tao->jacobian_inequality_pre);CHKERRQ(ierr);
  }
  if (ipmP->nb > 0) {
    /* Ai' =   jac_ineq | I (w/lb) | -I (w/ub)  */
    ierr = IPMUpdateAi(tao);CHKERRQ(ierr);
  }
  PetscFunctionReturn(0);
}
Exemplo n.º 3
0
static PetscErrorCode TaoSolve_OWLQN(Tao tao)
{
  TAO_OWLQN                    *lmP = (TAO_OWLQN *)tao->data;
  PetscReal                    f, fold, gdx, gnorm;
  PetscReal                    step = 1.0;
  PetscReal                    delta;
  PetscErrorCode               ierr;
  PetscInt                     stepType;
  PetscInt                     iter = 0;
  TaoConvergedReason           reason = TAO_CONTINUE_ITERATING;
  TaoLineSearchConvergedReason ls_status = TAOLINESEARCH_CONTINUE_ITERATING;

  PetscFunctionBegin;
  if (tao->XL || tao->XU || tao->ops->computebounds) {
    ierr = PetscPrintf(((PetscObject)tao)->comm,"WARNING: Variable bounds have been set but will be ignored by owlqn algorithm\n");CHKERRQ(ierr);
  }

  /* Check convergence criteria */
  ierr = TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient);CHKERRQ(ierr);

  ierr = VecCopy(tao->gradient, lmP->GV);CHKERRQ(ierr);

  ierr = ComputePseudoGrad_OWLQN(tao->solution,lmP->GV,lmP->lambda);CHKERRQ(ierr);

  ierr = VecNorm(lmP->GV,NORM_2,&gnorm);CHKERRQ(ierr);

  if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");

  ierr = TaoMonitor(tao, iter, f, gnorm, 0.0, step, &reason);CHKERRQ(ierr);
  if (reason != TAO_CONTINUE_ITERATING) PetscFunctionReturn(0);

  /* Set initial scaling for the function */
  if (f != 0.0) {
    delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
  } else {
    delta = 2.0 / (gnorm*gnorm);
  }
  ierr = MatLMVMSetDelta(lmP->M,delta);CHKERRQ(ierr);

  /* Set counter for gradient/reset steps */
  lmP->bfgs = 0;
  lmP->sgrad = 0;
  lmP->grad = 0;

  /* Have not converged; continue with Newton method */
  while (reason == TAO_CONTINUE_ITERATING) {
    /* Compute direction */
    ierr = MatLMVMUpdate(lmP->M,tao->solution,tao->gradient);CHKERRQ(ierr);
    ierr = MatLMVMSolve(lmP->M, lmP->GV, lmP->D);CHKERRQ(ierr);

    ierr = ProjDirect_OWLQN(lmP->D,lmP->GV);CHKERRQ(ierr);

    ++lmP->bfgs;

    /* Check for success (descent direction) */
    ierr = VecDot(lmP->D, lmP->GV , &gdx);CHKERRQ(ierr);
    if ((gdx <= 0.0) || PetscIsInfOrNanReal(gdx)) {

      /* Step is not descent or direction produced not a number
         We can assert bfgsUpdates > 1 in this case because
         the first solve produces the scaled gradient direction,
         which is guaranteed to be descent

         Use steepest descent direction (scaled) */
      ++lmP->grad;

      if (f != 0.0) {
        delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
      } else {
        delta = 2.0 / (gnorm*gnorm);
      }
      ierr = MatLMVMSetDelta(lmP->M, delta);CHKERRQ(ierr);
      ierr = MatLMVMReset(lmP->M);CHKERRQ(ierr);
      ierr = MatLMVMUpdate(lmP->M, tao->solution, tao->gradient);CHKERRQ(ierr);
      ierr = MatLMVMSolve(lmP->M,lmP->GV, lmP->D);CHKERRQ(ierr);

      ierr = ProjDirect_OWLQN(lmP->D,lmP->GV);CHKERRQ(ierr);

      lmP->bfgs = 1;
      ++lmP->sgrad;
      stepType = OWLQN_SCALED_GRADIENT;
    } else {
      if (1 == lmP->bfgs) {
        /* The first BFGS direction is always the scaled gradient */
        ++lmP->sgrad;
        stepType = OWLQN_SCALED_GRADIENT;
      } else {
        ++lmP->bfgs;
        stepType = OWLQN_BFGS;
      }
    }

    ierr = VecScale(lmP->D, -1.0);CHKERRQ(ierr);

    /* Perform the linesearch */
    fold = f;
    ierr = VecCopy(tao->solution, lmP->Xold);CHKERRQ(ierr);
    ierr = VecCopy(tao->gradient, lmP->Gold);CHKERRQ(ierr);

    ierr = TaoLineSearchApply(tao->linesearch, tao->solution, &f, lmP->GV, lmP->D, &step,&ls_status);CHKERRQ(ierr);
    ierr = TaoAddLineSearchCounts(tao);CHKERRQ(ierr);

    while (((int)ls_status < 0) && (stepType != OWLQN_GRADIENT)) {

      /* Reset factors and use scaled gradient step */
      f = fold;
      ierr = VecCopy(lmP->Xold, tao->solution);CHKERRQ(ierr);
      ierr = VecCopy(lmP->Gold, tao->gradient);CHKERRQ(ierr);
      ierr = VecCopy(tao->gradient, lmP->GV);CHKERRQ(ierr);

      ierr = ComputePseudoGrad_OWLQN(tao->solution,lmP->GV,lmP->lambda);CHKERRQ(ierr);

      switch(stepType) {
      case OWLQN_BFGS:
        /* Failed to obtain acceptable iterate with BFGS step
           Attempt to use the scaled gradient direction */

        if (f != 0.0) {
          delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
        } else {
          delta = 2.0 / (gnorm*gnorm);
        }
        ierr = MatLMVMSetDelta(lmP->M, delta);CHKERRQ(ierr);
        ierr = MatLMVMReset(lmP->M);CHKERRQ(ierr);
        ierr = MatLMVMUpdate(lmP->M, tao->solution, tao->gradient);CHKERRQ(ierr);
        ierr = MatLMVMSolve(lmP->M, lmP->GV, lmP->D);CHKERRQ(ierr);

        ierr = ProjDirect_OWLQN(lmP->D,lmP->GV);CHKERRQ(ierr);

        lmP->bfgs = 1;
        ++lmP->sgrad;
        stepType = OWLQN_SCALED_GRADIENT;
        break;

      case OWLQN_SCALED_GRADIENT:
        /* The scaled gradient step did not produce a new iterate;
           attempt to use the gradient direction.
           Need to make sure we are not using a different diagonal scaling */
        ierr = MatLMVMSetDelta(lmP->M, 1.0);CHKERRQ(ierr);
        ierr = MatLMVMReset(lmP->M);CHKERRQ(ierr);
        ierr = MatLMVMUpdate(lmP->M, tao->solution, tao->gradient);CHKERRQ(ierr);
        ierr = MatLMVMSolve(lmP->M, lmP->GV, lmP->D);CHKERRQ(ierr);

        ierr = ProjDirect_OWLQN(lmP->D,lmP->GV);CHKERRQ(ierr);

        lmP->bfgs = 1;
        ++lmP->grad;
        stepType = OWLQN_GRADIENT;
        break;
      }
      ierr = VecScale(lmP->D, -1.0);CHKERRQ(ierr);


      /* Perform the linesearch */
      ierr = TaoLineSearchApply(tao->linesearch, tao->solution, &f, lmP->GV, lmP->D, &step, &ls_status);CHKERRQ(ierr);
      ierr = TaoAddLineSearchCounts(tao);CHKERRQ(ierr);
    }

    if ((int)ls_status < 0) {
      /* Failed to find an improving point*/
      f = fold;
      ierr = VecCopy(lmP->Xold, tao->solution);CHKERRQ(ierr);
      ierr = VecCopy(lmP->Gold, tao->gradient);CHKERRQ(ierr);
      ierr = VecCopy(tao->gradient, lmP->GV);CHKERRQ(ierr);
      step = 0.0;
    } else {
      /* a little hack here, because that gv is used to store g */
      ierr = VecCopy(lmP->GV, tao->gradient);CHKERRQ(ierr);
    }

    ierr = ComputePseudoGrad_OWLQN(tao->solution,lmP->GV,lmP->lambda);CHKERRQ(ierr);

    /* Check for termination */

    ierr = VecNorm(lmP->GV,NORM_2,&gnorm);CHKERRQ(ierr);

    iter++;
    ierr = TaoMonitor(tao,iter,f,gnorm,0.0,step,&reason);CHKERRQ(ierr);

    if ((int)ls_status < 0) break;
  }
  PetscFunctionReturn(0);
}
Exemplo n.º 4
0
Arquivo: tron.c Projeto: plguhur/petsc
static PetscErrorCode TaoSolve_TRON(Tao tao)
{
  TAO_TRON                     *tron = (TAO_TRON *)tao->data;
  PetscErrorCode               ierr;
  PetscInt                     its;
  TaoConvergedReason           reason = TAO_CONTINUE_ITERATING;
  TaoLineSearchConvergedReason ls_reason = TAOLINESEARCH_CONTINUE_ITERATING;
  PetscReal                    prered,actred,delta,f,f_new,rhok,gdx,xdiff,stepsize;

  PetscFunctionBegin;
  tron->pgstepsize=1.0;
  tao->trust = tao->trust0;
  /*   Project the current point onto the feasible set */
  ierr = TaoComputeVariableBounds(tao);CHKERRQ(ierr);
  ierr = VecMedian(tao->XL,tao->solution,tao->XU,tao->solution);CHKERRQ(ierr);
  ierr = TaoLineSearchSetVariableBounds(tao->linesearch,tao->XL,tao->XU);CHKERRQ(ierr);

  ierr = TaoComputeObjectiveAndGradient(tao,tao->solution,&tron->f,tao->gradient);CHKERRQ(ierr);
  ierr = ISDestroy(&tron->Free_Local);CHKERRQ(ierr);

  ierr = VecWhichBetween(tao->XL,tao->solution,tao->XU,&tron->Free_Local);CHKERRQ(ierr);

  /* Project the gradient and calculate the norm */
  ierr = VecBoundGradientProjection(tao->gradient,tao->solution, tao->XL, tao->XU, tao->gradient);CHKERRQ(ierr);
  ierr = VecNorm(tao->gradient,NORM_2,&tron->gnorm);CHKERRQ(ierr);

  if (PetscIsInfOrNanReal(tron->f) || PetscIsInfOrNanReal(tron->gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf pr NaN");
  if (tao->trust <= 0) {
    tao->trust=PetscMax(tron->gnorm*tron->gnorm,1.0);
  }

  tron->stepsize=tao->trust;
  ierr = TaoMonitor(tao, tao->niter, tron->f, tron->gnorm, 0.0, tron->stepsize, &reason);CHKERRQ(ierr);
  while (reason==TAO_CONTINUE_ITERATING){
    tao->ksp_its=0;
    ierr = TronGradientProjections(tao,tron);CHKERRQ(ierr);
    f=tron->f; delta=tao->trust;
    tron->n_free_last = tron->n_free;
    ierr = TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);CHKERRQ(ierr);

    ierr = ISGetSize(tron->Free_Local, &tron->n_free);CHKERRQ(ierr);

    /* If no free variables */
    if (tron->n_free == 0) {
      actred=0;
      ierr = PetscInfo(tao,"No free variables in tron iteration.\n");CHKERRQ(ierr);
      ierr = VecNorm(tao->gradient,NORM_2,&tron->gnorm);CHKERRQ(ierr);
      ierr = TaoMonitor(tao, tao->niter, tron->f, tron->gnorm, 0.0, delta, &reason);CHKERRQ(ierr);
      if (!reason) {
        reason = TAO_CONVERGED_STEPTOL;
        ierr = TaoSetConvergedReason(tao,reason);CHKERRQ(ierr);
      }

      break;

    }
    /* use free_local to mask/submat gradient, hessian, stepdirection */
    ierr = TaoVecGetSubVec(tao->gradient,tron->Free_Local,tao->subset_type,0.0,&tron->R);CHKERRQ(ierr);
    ierr = TaoVecGetSubVec(tao->gradient,tron->Free_Local,tao->subset_type,0.0,&tron->DXFree);CHKERRQ(ierr);
    ierr = VecSet(tron->DXFree,0.0);CHKERRQ(ierr);
    ierr = VecScale(tron->R, -1.0);CHKERRQ(ierr);
    ierr = TaoMatGetSubMat(tao->hessian, tron->Free_Local, tron->diag, tao->subset_type, &tron->H_sub);CHKERRQ(ierr);
    if (tao->hessian == tao->hessian_pre) {
      ierr = MatDestroy(&tron->Hpre_sub);CHKERRQ(ierr);
      ierr = PetscObjectReference((PetscObject)(tron->H_sub));CHKERRQ(ierr);
      tron->Hpre_sub = tron->H_sub;
    } else {
      ierr = TaoMatGetSubMat(tao->hessian_pre, tron->Free_Local, tron->diag, tao->subset_type,&tron->Hpre_sub);CHKERRQ(ierr);
    }
    ierr = KSPReset(tao->ksp);CHKERRQ(ierr);
    ierr = KSPSetOperators(tao->ksp, tron->H_sub, tron->Hpre_sub);CHKERRQ(ierr);
    while (1) {

      /* Approximately solve the reduced linear system */
      ierr = KSPSTCGSetRadius(tao->ksp,delta);CHKERRQ(ierr);

      ierr = KSPSolve(tao->ksp, tron->R, tron->DXFree);CHKERRQ(ierr);
      ierr = KSPGetIterationNumber(tao->ksp,&its);CHKERRQ(ierr);
      tao->ksp_its+=its;
      tao->ksp_tot_its+=its;
      ierr = VecSet(tao->stepdirection,0.0);CHKERRQ(ierr);

      /* Add dxfree matrix to compute step direction vector */
      ierr = VecISAXPY(tao->stepdirection,tron->Free_Local,1.0,tron->DXFree);CHKERRQ(ierr);
      if (0) {
        PetscReal rhs,stepnorm;
        ierr = VecNorm(tron->R,NORM_2,&rhs);CHKERRQ(ierr);
        ierr = VecNorm(tron->DXFree,NORM_2,&stepnorm);CHKERRQ(ierr);
        ierr = PetscPrintf(PETSC_COMM_WORLD,"|rhs|=%g\t|s|=%g\n",(double)rhs,(double)stepnorm);CHKERRQ(ierr);
      }


      ierr = VecDot(tao->gradient, tao->stepdirection, &gdx);CHKERRQ(ierr);
      ierr = PetscInfo1(tao,"Expected decrease in function value: %14.12e\n",(double)gdx);CHKERRQ(ierr);

      ierr = VecCopy(tao->solution, tron->X_New);CHKERRQ(ierr);
      ierr = VecCopy(tao->gradient, tron->G_New);CHKERRQ(ierr);

      stepsize=1.0;f_new=f;

      ierr = TaoLineSearchSetInitialStepLength(tao->linesearch,1.0);CHKERRQ(ierr);
      ierr = TaoLineSearchApply(tao->linesearch, tron->X_New, &f_new, tron->G_New, tao->stepdirection,&stepsize,&ls_reason);CHKERRQ(ierr);CHKERRQ(ierr);
      ierr = TaoAddLineSearchCounts(tao);CHKERRQ(ierr);

      ierr = MatMult(tao->hessian, tao->stepdirection, tron->Work);CHKERRQ(ierr);
      ierr = VecAYPX(tron->Work, 0.5, tao->gradient);CHKERRQ(ierr);
      ierr = VecDot(tao->stepdirection, tron->Work, &prered);CHKERRQ(ierr);
      actred = f_new - f;
      if (actred<0) {
        rhok=PetscAbs(-actred/prered);
      } else {
        rhok=0.0;
      }

      /* Compare actual improvement to the quadratic model */
      if (rhok > tron->eta1) { /* Accept the point */
        /* d = x_new - x */
        ierr = VecCopy(tron->X_New, tao->stepdirection);CHKERRQ(ierr);
        ierr = VecAXPY(tao->stepdirection, -1.0, tao->solution);CHKERRQ(ierr);

        ierr = VecNorm(tao->stepdirection, NORM_2, &xdiff);CHKERRQ(ierr);
        xdiff *= stepsize;

        /* Adjust trust region size */
        if (rhok < tron->eta2 ){
          delta = PetscMin(xdiff,delta)*tron->sigma1;
        } else if (rhok > tron->eta4 ){
          delta= PetscMin(xdiff,delta)*tron->sigma3;
        } else if (rhok > tron->eta3 ){
          delta=PetscMin(xdiff,delta)*tron->sigma2;
        }
        ierr = VecBoundGradientProjection(tron->G_New,tron->X_New, tao->XL, tao->XU, tao->gradient);CHKERRQ(ierr);
        if (tron->Free_Local) {
          ierr = ISDestroy(&tron->Free_Local);CHKERRQ(ierr);
        }
        ierr = VecWhichBetween(tao->XL, tron->X_New, tao->XU, &tron->Free_Local);CHKERRQ(ierr);
        f=f_new;
        ierr = VecNorm(tao->gradient,NORM_2,&tron->gnorm);CHKERRQ(ierr);
        ierr = VecCopy(tron->X_New, tao->solution);CHKERRQ(ierr);
        ierr = VecCopy(tron->G_New, tao->gradient);CHKERRQ(ierr);
        break;
      }
      else if (delta <= 1e-30) {
        break;
      }
      else {
        delta /= 4.0;
      }
    } /* end linear solve loop */


    tron->f=f; tron->actred=actred; tao->trust=delta;
    tao->niter++;
    ierr = TaoMonitor(tao, tao->niter, tron->f, tron->gnorm, 0.0, delta, &reason);CHKERRQ(ierr);
  }  /* END MAIN LOOP  */

  PetscFunctionReturn(0);
}
Exemplo n.º 5
0
Arquivo: ntr.c Projeto: PeiLiu90/petsc
static PetscErrorCode TaoSolve_NTR(Tao tao)
{
  TAO_NTR            *tr = (TAO_NTR *)tao->data;
  PC                 pc;
  KSPConvergedReason ksp_reason;
  TaoConvergedReason reason;
  PetscReal          fmin, ftrial, prered, actred, kappa, sigma, beta;
  PetscReal          tau, tau_1, tau_2, tau_max, tau_min, max_radius;
  PetscReal          f, gnorm;

  PetscReal          delta;
  PetscReal          norm_d;
  PetscErrorCode     ierr;
  PetscInt           iter = 0;
  PetscInt           bfgsUpdates = 0;
  PetscInt           needH;

  PetscInt           i_max = 5;
  PetscInt           j_max = 1;
  PetscInt           i, j, N, n, its;

  PetscFunctionBegin;
  if (tao->XL || tao->XU || tao->ops->computebounds) {
    ierr = PetscPrintf(((PetscObject)tao)->comm,"WARNING: Variable bounds have been set but will be ignored by ntr algorithm\n");CHKERRQ(ierr);
  }

  tao->trust = tao->trust0;

  /* Modify the radius if it is too large or small */
  tao->trust = PetscMax(tao->trust, tr->min_radius);
  tao->trust = PetscMin(tao->trust, tr->max_radius);


  if (NTR_PC_BFGS == tr->pc_type && !tr->M) {
    ierr = VecGetLocalSize(tao->solution,&n);CHKERRQ(ierr);
    ierr = VecGetSize(tao->solution,&N);CHKERRQ(ierr);
    ierr = MatCreateLMVM(((PetscObject)tao)->comm,n,N,&tr->M);CHKERRQ(ierr);
    ierr = MatLMVMAllocateVectors(tr->M,tao->solution);CHKERRQ(ierr);
  }

  /* Check convergence criteria */
  ierr = TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient);CHKERRQ(ierr);
  ierr = VecNorm(tao->gradient,NORM_2,&gnorm);CHKERRQ(ierr);
  if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
  needH = 1;

  ierr = TaoMonitor(tao, iter, f, gnorm, 0.0, 1.0, &reason);CHKERRQ(ierr);
  if (reason != TAO_CONTINUE_ITERATING) PetscFunctionReturn(0);

  /* Create vectors for the limited memory preconditioner */
  if ((NTR_PC_BFGS == tr->pc_type) &&
      (BFGS_SCALE_BFGS != tr->bfgs_scale_type)) {
    if (!tr->Diag) {
        ierr = VecDuplicate(tao->solution, &tr->Diag);CHKERRQ(ierr);
    }
  }

  switch(tr->ksp_type) {
  case NTR_KSP_NASH:
    ierr = KSPSetType(tao->ksp, KSPNASH);CHKERRQ(ierr);
    if (tao->ksp->ops->setfromoptions) {
      (*tao->ksp->ops->setfromoptions)(tao->ksp);
    }
    break;

  case NTR_KSP_STCG:
    ierr = KSPSetType(tao->ksp, KSPSTCG);CHKERRQ(ierr);
    if (tao->ksp->ops->setfromoptions) {
      (*tao->ksp->ops->setfromoptions)(tao->ksp);
    }
    break;

  default:
    ierr = KSPSetType(tao->ksp, KSPGLTR);CHKERRQ(ierr);
    if (tao->ksp->ops->setfromoptions) {
      (*tao->ksp->ops->setfromoptions)(tao->ksp);
    }
    break;
  }

  /*  Modify the preconditioner to use the bfgs approximation */
  ierr = KSPGetPC(tao->ksp, &pc);CHKERRQ(ierr);
  switch(tr->pc_type) {
  case NTR_PC_NONE:
    ierr = PCSetType(pc, PCNONE);CHKERRQ(ierr);
    if (pc->ops->setfromoptions) {
      (*pc->ops->setfromoptions)(pc);
    }
    break;

  case NTR_PC_AHESS:
    ierr = PCSetType(pc, PCJACOBI);CHKERRQ(ierr);
    if (pc->ops->setfromoptions) {
      (*pc->ops->setfromoptions)(pc);
    }
    ierr = PCJacobiSetUseAbs(pc);CHKERRQ(ierr);
    break;

  case NTR_PC_BFGS:
    ierr = PCSetType(pc, PCSHELL);CHKERRQ(ierr);
    if (pc->ops->setfromoptions) {
      (*pc->ops->setfromoptions)(pc);
    }
    ierr = PCShellSetName(pc, "bfgs");CHKERRQ(ierr);
    ierr = PCShellSetContext(pc, tr->M);CHKERRQ(ierr);
    ierr = PCShellSetApply(pc, MatLMVMSolveShell);CHKERRQ(ierr);
    break;

  default:
    /*  Use the pc method set by pc_type */
    break;
  }

  /*  Initialize trust-region radius */
  switch(tr->init_type) {
  case NTR_INIT_CONSTANT:
    /*  Use the initial radius specified */
    break;

  case NTR_INIT_INTERPOLATION:
    /*  Use the initial radius specified */
    max_radius = 0.0;

    for (j = 0; j < j_max; ++j) {
      fmin = f;
      sigma = 0.0;

      if (needH) {
        ierr = TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);CHKERRQ(ierr);
        needH = 0;
      }

      for (i = 0; i < i_max; ++i) {

        ierr = VecCopy(tao->solution, tr->W);CHKERRQ(ierr);
        ierr = VecAXPY(tr->W, -tao->trust/gnorm, tao->gradient);CHKERRQ(ierr);
        ierr = TaoComputeObjective(tao, tr->W, &ftrial);CHKERRQ(ierr);

        if (PetscIsInfOrNanReal(ftrial)) {
          tau = tr->gamma1_i;
        }
        else {
          if (ftrial < fmin) {
            fmin = ftrial;
            sigma = -tao->trust / gnorm;
          }

          ierr = MatMult(tao->hessian, tao->gradient, tao->stepdirection);CHKERRQ(ierr);
          ierr = VecDot(tao->gradient, tao->stepdirection, &prered);CHKERRQ(ierr);

          prered = tao->trust * (gnorm - 0.5 * tao->trust * prered / (gnorm * gnorm));
          actred = f - ftrial;
          if ((PetscAbsScalar(actred) <= tr->epsilon) &&
              (PetscAbsScalar(prered) <= tr->epsilon)) {
            kappa = 1.0;
          }
          else {
            kappa = actred / prered;
          }

          tau_1 = tr->theta_i * gnorm * tao->trust / (tr->theta_i * gnorm * tao->trust + (1.0 - tr->theta_i) * prered - actred);
          tau_2 = tr->theta_i * gnorm * tao->trust / (tr->theta_i * gnorm * tao->trust - (1.0 + tr->theta_i) * prered + actred);
          tau_min = PetscMin(tau_1, tau_2);
          tau_max = PetscMax(tau_1, tau_2);

          if (PetscAbsScalar(kappa - 1.0) <= tr->mu1_i) {
            /*  Great agreement */
            max_radius = PetscMax(max_radius, tao->trust);

            if (tau_max < 1.0) {
              tau = tr->gamma3_i;
            }
            else if (tau_max > tr->gamma4_i) {
              tau = tr->gamma4_i;
            }
            else {
              tau = tau_max;
            }
          }
          else if (PetscAbsScalar(kappa - 1.0) <= tr->mu2_i) {
            /*  Good agreement */
            max_radius = PetscMax(max_radius, tao->trust);

            if (tau_max < tr->gamma2_i) {
              tau = tr->gamma2_i;
            }
            else if (tau_max > tr->gamma3_i) {
              tau = tr->gamma3_i;
            }
            else {
              tau = tau_max;
            }
          }
          else {
            /*  Not good agreement */
            if (tau_min > 1.0) {
              tau = tr->gamma2_i;
            }
            else if (tau_max < tr->gamma1_i) {
              tau = tr->gamma1_i;
            }
            else if ((tau_min < tr->gamma1_i) && (tau_max >= 1.0)) {
              tau = tr->gamma1_i;
            }
            else if ((tau_1 >= tr->gamma1_i) && (tau_1 < 1.0) &&
                     ((tau_2 < tr->gamma1_i) || (tau_2 >= 1.0))) {
              tau = tau_1;
            }
            else if ((tau_2 >= tr->gamma1_i) && (tau_2 < 1.0) &&
                     ((tau_1 < tr->gamma1_i) || (tau_2 >= 1.0))) {
              tau = tau_2;
            }
            else {
              tau = tau_max;
            }
          }
        }
        tao->trust = tau * tao->trust;
      }

      if (fmin < f) {
        f = fmin;
        ierr = VecAXPY(tao->solution, sigma, tao->gradient);CHKERRQ(ierr);
        ierr = TaoComputeGradient(tao,tao->solution, tao->gradient);CHKERRQ(ierr);

        ierr = VecNorm(tao->gradient, NORM_2, &gnorm);CHKERRQ(ierr);

        if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
        needH = 1;

        ierr = TaoMonitor(tao, iter, f, gnorm, 0.0, 1.0, &reason);CHKERRQ(ierr);
        if (reason != TAO_CONTINUE_ITERATING) {
          PetscFunctionReturn(0);
        }
      }
    }
    tao->trust = PetscMax(tao->trust, max_radius);

    /*  Modify the radius if it is too large or small */
    tao->trust = PetscMax(tao->trust, tr->min_radius);
    tao->trust = PetscMin(tao->trust, tr->max_radius);
    break;

  default:
    /*  Norm of the first direction will initialize radius */
    tao->trust = 0.0;
    break;
  }

  /* Set initial scaling for the BFGS preconditioner
     This step is done after computing the initial trust-region radius
     since the function value may have decreased */
  if (NTR_PC_BFGS == tr->pc_type) {
    if (f != 0.0) {
      delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
    }
    else {
      delta = 2.0 / (gnorm*gnorm);
    }
    ierr = MatLMVMSetDelta(tr->M,delta);CHKERRQ(ierr);
  }

  /* Have not converged; continue with Newton method */
  while (reason == TAO_CONTINUE_ITERATING) {
    ++iter;
    tao->ksp_its=0;
    /* Compute the Hessian */
    if (needH) {
      ierr = TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);CHKERRQ(ierr);
      needH = 0;
    }

    if (NTR_PC_BFGS == tr->pc_type) {
      if (BFGS_SCALE_AHESS == tr->bfgs_scale_type) {
        /* Obtain diagonal for the bfgs preconditioner */
        ierr = MatGetDiagonal(tao->hessian, tr->Diag);CHKERRQ(ierr);
        ierr = VecAbs(tr->Diag);CHKERRQ(ierr);
        ierr = VecReciprocal(tr->Diag);CHKERRQ(ierr);
        ierr = MatLMVMSetScale(tr->M,tr->Diag);CHKERRQ(ierr);
      }

      /* Update the limited memory preconditioner */
      ierr = MatLMVMUpdate(tr->M, tao->solution, tao->gradient);CHKERRQ(ierr);
      ++bfgsUpdates;
    }

    while (reason == TAO_CONTINUE_ITERATING) {
      ierr = KSPSetOperators(tao->ksp, tao->hessian, tao->hessian_pre);CHKERRQ(ierr);

      /* Solve the trust region subproblem */
      if (NTR_KSP_NASH == tr->ksp_type) {
        ierr = KSPNASHSetRadius(tao->ksp,tao->trust);CHKERRQ(ierr);
        ierr = KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);CHKERRQ(ierr);
        ierr = KSPGetIterationNumber(tao->ksp,&its);CHKERRQ(ierr);
        tao->ksp_its+=its;
        tao->ksp_tot_its+=its;
        ierr = KSPNASHGetNormD(tao->ksp, &norm_d);CHKERRQ(ierr);
      } else if (NTR_KSP_STCG == tr->ksp_type) {
        ierr = KSPSTCGSetRadius(tao->ksp,tao->trust);CHKERRQ(ierr);
        ierr = KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);CHKERRQ(ierr);
        ierr = KSPGetIterationNumber(tao->ksp,&its);CHKERRQ(ierr);
        tao->ksp_its+=its;
        tao->ksp_tot_its+=its;
        ierr = KSPSTCGGetNormD(tao->ksp, &norm_d);CHKERRQ(ierr);
      } else { /* NTR_KSP_GLTR */
        ierr = KSPGLTRSetRadius(tao->ksp,tao->trust);CHKERRQ(ierr);
        ierr = KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);CHKERRQ(ierr);
        ierr = KSPGetIterationNumber(tao->ksp,&its);CHKERRQ(ierr);
        tao->ksp_its+=its;
        tao->ksp_tot_its+=its;
        ierr = KSPGLTRGetNormD(tao->ksp, &norm_d);CHKERRQ(ierr);
      }

      if (0.0 == tao->trust) {
        /* Radius was uninitialized; use the norm of the direction */
        if (norm_d > 0.0) {
          tao->trust = norm_d;

          /* Modify the radius if it is too large or small */
          tao->trust = PetscMax(tao->trust, tr->min_radius);
          tao->trust = PetscMin(tao->trust, tr->max_radius);
        }
        else {
          /* The direction was bad; set radius to default value and re-solve
             the trust-region subproblem to get a direction */
          tao->trust = tao->trust0;

          /* Modify the radius if it is too large or small */
          tao->trust = PetscMax(tao->trust, tr->min_radius);
          tao->trust = PetscMin(tao->trust, tr->max_radius);

          if (NTR_KSP_NASH == tr->ksp_type) {
            ierr = KSPNASHSetRadius(tao->ksp,tao->trust);CHKERRQ(ierr);
            ierr = KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);CHKERRQ(ierr);
            ierr = KSPGetIterationNumber(tao->ksp,&its);CHKERRQ(ierr);
            tao->ksp_its+=its;
            tao->ksp_tot_its+=its;
            ierr = KSPNASHGetNormD(tao->ksp, &norm_d);CHKERRQ(ierr);
          } else if (NTR_KSP_STCG == tr->ksp_type) {
            ierr = KSPSTCGSetRadius(tao->ksp,tao->trust);CHKERRQ(ierr);
            ierr = KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);CHKERRQ(ierr);
            ierr = KSPGetIterationNumber(tao->ksp,&its);CHKERRQ(ierr);
            tao->ksp_its+=its;
            tao->ksp_tot_its+=its;
            ierr = KSPSTCGGetNormD(tao->ksp, &norm_d);CHKERRQ(ierr);
          } else { /* NTR_KSP_GLTR */
            ierr = KSPGLTRSetRadius(tao->ksp,tao->trust);CHKERRQ(ierr);
            ierr = KSPSolve(tao->ksp, tao->gradient, tao->stepdirection);CHKERRQ(ierr);
            ierr = KSPGetIterationNumber(tao->ksp,&its);CHKERRQ(ierr);
            tao->ksp_its+=its;
            tao->ksp_tot_its+=its;
            ierr = KSPGLTRGetNormD(tao->ksp, &norm_d);CHKERRQ(ierr);
          }

          if (norm_d == 0.0) SETERRQ(PETSC_COMM_SELF,1, "Initial direction zero");
        }
      }
      ierr = VecScale(tao->stepdirection, -1.0);CHKERRQ(ierr);
      ierr = KSPGetConvergedReason(tao->ksp, &ksp_reason);CHKERRQ(ierr);
      if ((KSP_DIVERGED_INDEFINITE_PC == ksp_reason) &&
          (NTR_PC_BFGS == tr->pc_type) && (bfgsUpdates > 1)) {
        /* Preconditioner is numerically indefinite; reset the
           approximate if using BFGS preconditioning. */

        if (f != 0.0) {
          delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
        }
        else {
          delta = 2.0 / (gnorm*gnorm);
        }
        ierr = MatLMVMSetDelta(tr->M, delta);CHKERRQ(ierr);
        ierr = MatLMVMReset(tr->M);CHKERRQ(ierr);
        ierr = MatLMVMUpdate(tr->M, tao->solution, tao->gradient);CHKERRQ(ierr);
        bfgsUpdates = 1;
      }

      if (NTR_UPDATE_REDUCTION == tr->update_type) {
        /* Get predicted reduction */
        if (NTR_KSP_NASH == tr->ksp_type) {
          ierr = KSPNASHGetObjFcn(tao->ksp,&prered);CHKERRQ(ierr);
        } else if (NTR_KSP_STCG == tr->ksp_type) {
          ierr = KSPSTCGGetObjFcn(tao->ksp,&prered);CHKERRQ(ierr);
        } else { /* gltr */
          ierr = KSPGLTRGetObjFcn(tao->ksp,&prered);CHKERRQ(ierr);
        }

        if (prered >= 0.0) {
          /* The predicted reduction has the wrong sign.  This cannot
             happen in infinite precision arithmetic.  Step should
             be rejected! */
          tao->trust = tr->alpha1 * PetscMin(tao->trust, norm_d);
        }
        else {
          /* Compute trial step and function value */
          ierr = VecCopy(tao->solution,tr->W);CHKERRQ(ierr);
          ierr = VecAXPY(tr->W, 1.0, tao->stepdirection);CHKERRQ(ierr);
          ierr = TaoComputeObjective(tao, tr->W, &ftrial);CHKERRQ(ierr);

          if (PetscIsInfOrNanReal(ftrial)) {
            tao->trust = tr->alpha1 * PetscMin(tao->trust, norm_d);
          } else {
            /* Compute and actual reduction */
            actred = f - ftrial;
            prered = -prered;
            if ((PetscAbsScalar(actred) <= tr->epsilon) &&
                (PetscAbsScalar(prered) <= tr->epsilon)) {
              kappa = 1.0;
            }
            else {
              kappa = actred / prered;
            }

            /* Accept or reject the step and update radius */
            if (kappa < tr->eta1) {
              /* Reject the step */
              tao->trust = tr->alpha1 * PetscMin(tao->trust, norm_d);
            }
            else {
              /* Accept the step */
              if (kappa < tr->eta2) {
                /* Marginal bad step */
                tao->trust = tr->alpha2 * PetscMin(tao->trust, norm_d);
              }
              else if (kappa < tr->eta3) {
                /* Reasonable step */
                tao->trust = tr->alpha3 * tao->trust;
              }
              else if (kappa < tr->eta4) {
                /* Good step */
                tao->trust = PetscMax(tr->alpha4 * norm_d, tao->trust);
              }
              else {
                /* Very good step */
                tao->trust = PetscMax(tr->alpha5 * norm_d, tao->trust);
              }
              break;
            }
          }
        }
      }
      else {
        /* Get predicted reduction */
        if (NTR_KSP_NASH == tr->ksp_type) {
          ierr = KSPNASHGetObjFcn(tao->ksp,&prered);CHKERRQ(ierr);
        } else if (NTR_KSP_STCG == tr->ksp_type) {
          ierr = KSPSTCGGetObjFcn(tao->ksp,&prered);CHKERRQ(ierr);
        } else { /* gltr */
          ierr = KSPGLTRGetObjFcn(tao->ksp,&prered);CHKERRQ(ierr);
        }

        if (prered >= 0.0) {
          /* The predicted reduction has the wrong sign.  This cannot
             happen in infinite precision arithmetic.  Step should
             be rejected! */
          tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
        }
        else {
          ierr = VecCopy(tao->solution, tr->W);CHKERRQ(ierr);
          ierr = VecAXPY(tr->W, 1.0, tao->stepdirection);CHKERRQ(ierr);
          ierr = TaoComputeObjective(tao, tr->W, &ftrial);CHKERRQ(ierr);
          if (PetscIsInfOrNanReal(ftrial)) {
            tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
          }
          else {
            ierr = VecDot(tao->gradient, tao->stepdirection, &beta);CHKERRQ(ierr);
            actred = f - ftrial;
            prered = -prered;
            if ((PetscAbsScalar(actred) <= tr->epsilon) &&
                (PetscAbsScalar(prered) <= tr->epsilon)) {
              kappa = 1.0;
            }
            else {
              kappa = actred / prered;
            }

            tau_1 = tr->theta * beta / (tr->theta * beta - (1.0 - tr->theta) * prered + actred);
            tau_2 = tr->theta * beta / (tr->theta * beta + (1.0 + tr->theta) * prered - actred);
            tau_min = PetscMin(tau_1, tau_2);
            tau_max = PetscMax(tau_1, tau_2);

            if (kappa >= 1.0 - tr->mu1) {
              /* Great agreement; accept step and update radius */
              if (tau_max < 1.0) {
                tao->trust = PetscMax(tao->trust, tr->gamma3 * norm_d);
              }
              else if (tau_max > tr->gamma4) {
                tao->trust = PetscMax(tao->trust, tr->gamma4 * norm_d);
              }
              else {
                tao->trust = PetscMax(tao->trust, tau_max * norm_d);
              }
              break;
            }
            else if (kappa >= 1.0 - tr->mu2) {
              /* Good agreement */

              if (tau_max < tr->gamma2) {
                tao->trust = tr->gamma2 * PetscMin(tao->trust, norm_d);
              }
              else if (tau_max > tr->gamma3) {
                tao->trust = PetscMax(tao->trust, tr->gamma3 * norm_d);
              }
              else if (tau_max < 1.0) {
                tao->trust = tau_max * PetscMin(tao->trust, norm_d);
              }
              else {
                tao->trust = PetscMax(tao->trust, tau_max * norm_d);
              }
              break;
            }
            else {
              /* Not good agreement */
              if (tau_min > 1.0) {
                tao->trust = tr->gamma2 * PetscMin(tao->trust, norm_d);
              }
              else if (tau_max < tr->gamma1) {
                tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
              }
              else if ((tau_min < tr->gamma1) && (tau_max >= 1.0)) {
                tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
              }
              else if ((tau_1 >= tr->gamma1) && (tau_1 < 1.0) &&
                       ((tau_2 < tr->gamma1) || (tau_2 >= 1.0))) {
                tao->trust = tau_1 * PetscMin(tao->trust, norm_d);
              }
              else if ((tau_2 >= tr->gamma1) && (tau_2 < 1.0) &&
                       ((tau_1 < tr->gamma1) || (tau_2 >= 1.0))) {
                tao->trust = tau_2 * PetscMin(tao->trust, norm_d);
              }
              else {
                tao->trust = tau_max * PetscMin(tao->trust, norm_d);
              }
            }
          }
        }
      }

      /* The step computed was not good and the radius was decreased.
         Monitor the radius to terminate. */
      ierr = TaoMonitor(tao, iter, f, gnorm, 0.0, tao->trust, &reason);CHKERRQ(ierr);
    }

    /* The radius may have been increased; modify if it is too large */
    tao->trust = PetscMin(tao->trust, tr->max_radius);

    if (reason == TAO_CONTINUE_ITERATING) {
      ierr = VecCopy(tr->W, tao->solution);CHKERRQ(ierr);
      f = ftrial;
      ierr = TaoComputeGradient(tao, tao->solution, tao->gradient);
      ierr = VecNorm(tao->gradient, NORM_2, &gnorm);CHKERRQ(ierr);
      if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
      needH = 1;
      ierr = TaoMonitor(tao, iter, f, gnorm, 0.0, tao->trust, &reason);CHKERRQ(ierr);
    }
  }
  PetscFunctionReturn(0);
}
Exemplo n.º 6
0
static PetscErrorCode TaoSolve_SQPCON(Tao tao)
{
  TAO_SQPCON                   *sqpconP = (TAO_SQPCON*)tao->data;
  PetscInt                     iter=0;
  TaoConvergedReason           reason = TAO_CONTINUE_ITERATING;
  TaoLineSearchConvergedReason ls_reason = TAOLINESEARCH_CONTINUE_ITERATING;
  PetscReal                    step=1.0,f,fm, fold;
  PetscReal                    cnorm, mnorm;
  PetscBool                    use_update=PETSC_TRUE; /*  don't update Q if line search failed */
  PetscErrorCode               ierr;

  PetscFunctionBegin;
  /* Scatter to U,V */
  ierr = VecScatterBegin(sqpconP->state_scatter, tao->solution, sqpconP->U, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
  ierr = VecScatterEnd(sqpconP->state_scatter, tao->solution, sqpconP->U, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
  ierr = VecScatterBegin(sqpconP->design_scatter, tao->solution, sqpconP->V, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
  ierr = VecScatterEnd(sqpconP->design_scatter, tao->solution, sqpconP->V, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);

  /* Evaluate Function, Gradient, Constraints, and Jacobian */
  ierr = TaoComputeObjectiveAndGradient(tao,tao->solution,&f,tao->gradient);CHKERRQ(ierr);
  ierr = TaoComputeConstraints(tao,tao->solution, tao->constraints);CHKERRQ(ierr);
  ierr = TaoComputeJacobianState(tao,tao->solution, &tao->jacobian_state, &tao->jacobian_state_pre, &tao->jacobian_state_inv, &sqpconP->statematflag);CHKERRQ(ierr);
  ierr = TaoComputeJacobianDesign(tao,tao->solution, &tao->jacobian_design, &tao->jacobian_design_pre, &sqpconP->statematflag);CHKERRQ(ierr);

  /* Scatter gradient to GU,GV */
  ierr = VecScatterBegin(sqpconP->state_scatter, tao->gradient, sqpconP->GU, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
  ierr = VecScatterEnd(sqpconP->state_scatter, tao->gradient, sqpconP->GU, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
  ierr = VecScatterBegin(sqpconP->design_scatter, tao->gradient, sqpconP->GV, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
  ierr = VecScatterEnd(sqpconP->design_scatter, tao->gradient, sqpconP->GV, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
  ierr = VecNorm(tao->gradient, NORM_2, &mnorm);CHKERRQ(ierr);

  /* Evaluate constraint norm */
  ierr = VecNorm(tao->constraints, NORM_2, &cnorm);CHKERRQ(ierr);

  /* Monitor convergence */
  ierr = TaoMonitor(tao, iter,f,mnorm,cnorm,step,&reason);CHKERRQ(ierr);

  while (reason == TAO_CONTINUE_ITERATING) {
    /* Solve tbar = -A\t (t is constraints vector) */
    ierr = MatMult(tao->jacobian_state_inv, tao->constraints, sqpconP->Tbar);CHKERRQ(ierr);
    ierr = VecScale(sqpconP->Tbar, -1.0);CHKERRQ(ierr);

    /* aqwac =  A'\(Q*Tbar + c) */
    if (iter > 0) {
      ierr = MatMult(sqpconP->Q,sqpconP->Tbar,sqpconP->WV);CHKERRQ(ierr);
    } else {
      ierr = VecCopy(sqpconP->Tbar, sqpconP->WV);CHKERRQ(ierr);
    }
    ierr = VecAXPY(sqpconP->WV,1.0,sqpconP->GU);CHKERRQ(ierr);

    ierr = MatMultTranspose(tao->jacobian_state_inv, sqpconP->WV, sqpconP->aqwac);CHKERRQ(ierr);

    /* Reduced Gradient dbar = d -  B^t * aqwac */
    ierr = MatMultTranspose(tao->jacobian_design,sqpconP->aqwac, sqpconP->dbar);CHKERRQ(ierr);
    ierr = VecScale(sqpconP->dbar, -1.0);CHKERRQ(ierr);
    ierr = VecAXPY(sqpconP->dbar,1.0,sqpconP->GV);CHKERRQ(ierr);

    /* update reduced hessian */
    ierr = MatLMVMUpdate(sqpconP->R, sqpconP->V, sqpconP->dbar);CHKERRQ(ierr);

    /* Solve R*dv = -dbar using approx. hessian */
    ierr = MatLMVMSolve(sqpconP->R, sqpconP->dbar, sqpconP->DV);CHKERRQ(ierr);
    ierr = VecScale(sqpconP->DV, -1.0);CHKERRQ(ierr);

    /* Backsolve for u =  A\(g - B*dv)  = tbar - A\(B*dv)*/
    ierr = MatMult(tao->jacobian_design, sqpconP->DV, sqpconP->WL);CHKERRQ(ierr);
    ierr = MatMult(tao->jacobian_state_inv, sqpconP->WL, sqpconP->DU);CHKERRQ(ierr);
    ierr = VecScale(sqpconP->DU, -1.0);CHKERRQ(ierr);
    ierr = VecAXPY(sqpconP->DU, 1.0, sqpconP->Tbar);CHKERRQ(ierr);

    /* Assemble Big D */
    ierr = VecScatterBegin(sqpconP->state_scatter, sqpconP->DU, tao->stepdirection, INSERT_VALUES, SCATTER_REVERSE);CHKERRQ(ierr);
    ierr = VecScatterEnd(sqpconP->state_scatter, sqpconP->DU, tao->stepdirection, INSERT_VALUES, SCATTER_REVERSE);CHKERRQ(ierr);
    ierr = VecScatterBegin(sqpconP->design_scatter, sqpconP->DV, tao->stepdirection, INSERT_VALUES, SCATTER_REVERSE);CHKERRQ(ierr);
    ierr = VecScatterEnd(sqpconP->design_scatter, sqpconP->DV, tao->stepdirection, INSERT_VALUES, SCATTER_REVERSE);CHKERRQ(ierr);

    /* Perform Line Search */
    ierr = VecCopy(tao->solution, sqpconP->Xold);CHKERRQ(ierr);
    ierr = VecCopy(tao->gradient, sqpconP->Gold);CHKERRQ(ierr);
    fold = f;
    ierr = TaoLineSearchComputeObjectiveAndGradient(tao->linesearch,tao->solution,&fm,sqpconP->GL);CHKERRQ(ierr);
    ierr = TaoLineSearchSetInitialStepLength(tao->linesearch,1.0);
    ierr = TaoLineSearchApply(tao->linesearch, tao->solution, &fm, sqpconP->GL, tao->stepdirection,&step, &ls_reason);CHKERRQ(ierr);
    ierr = TaoAddLineSearchCounts(tao);CHKERRQ(ierr);
    if (ls_reason < 0) {
      ierr = VecCopy(sqpconP->Xold, tao->solution);
      ierr = VecCopy(sqpconP->Gold, tao->gradient);
      f = fold;
      ierr = VecAXPY(tao->solution, 1.0, tao->stepdirection);CHKERRQ(ierr);
      ierr = PetscInfo(tao,"Line Search Failed, using full step.");CHKERRQ(ierr);
      use_update=PETSC_FALSE;
    } else {
      use_update = PETSC_TRUE;
    }

    /* Scatter X to U,V */
    ierr = VecScatterBegin(sqpconP->state_scatter, tao->solution, sqpconP->U, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
    ierr = VecScatterEnd(sqpconP->state_scatter, tao->solution, sqpconP->U, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
    ierr = VecScatterBegin(sqpconP->design_scatter, tao->solution, sqpconP->V, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
    ierr = VecScatterEnd(sqpconP->design_scatter, tao->solution, sqpconP->V, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);


    /* Evaluate Function, Gradient, Constraints, and Jacobian */
    ierr = TaoComputeObjectiveAndGradient(tao,tao->solution,&f,tao->gradient);CHKERRQ(ierr);
    ierr = TaoComputeConstraints(tao,tao->solution, tao->constraints);CHKERRQ(ierr);
    ierr = TaoComputeJacobianState(tao,tao->solution, &tao->jacobian_state, &tao->jacobian_state_pre, &tao->jacobian_state_inv, &sqpconP->statematflag);CHKERRQ(ierr);
    ierr = TaoComputeJacobianDesign(tao,tao->solution, &tao->jacobian_design, &tao->jacobian_design_pre, &sqpconP->designmatflag);CHKERRQ(ierr);

    /* Scatter gradient to GU,GV */
    ierr = VecScatterBegin(sqpconP->state_scatter, tao->gradient, sqpconP->GU, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
    ierr = VecScatterEnd(sqpconP->state_scatter, tao->gradient, sqpconP->GU, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
    ierr = VecScatterBegin(sqpconP->design_scatter, tao->gradient, sqpconP->GV, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
    ierr = VecScatterEnd(sqpconP->design_scatter, tao->gradient, sqpconP->GV, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);

    /* Update approx to hessian of the Lagrangian wrt state (Q)
          with u_k+1, gu_k+1 */
    if (use_update) {
      ierr = MatApproxUpdate(sqpconP->Q,sqpconP->U,sqpconP->GU);CHKERRQ(ierr);
    }
    ierr = VecNorm(sqpconP->GL, NORM_2, &mnorm);CHKERRQ(ierr);

    /* Evaluate constraint norm */
    ierr = VecNorm(tao->constraints, NORM_2, &cnorm);CHKERRQ(ierr);

    /* Monitor convergence */
    iter++;
    ierr = TaoMonitor(tao, iter,f,mnorm,cnorm,step,&reason);CHKERRQ(ierr);
  }
  PetscFunctionReturn(0);
}
Exemplo n.º 7
0
static PetscErrorCode TaoSolve_GPCG(Tao tao)
{
  TAO_GPCG                     *gpcg = (TAO_GPCG *)tao->data;
  PetscErrorCode               ierr;
  PetscInt                     its;
  PetscReal                    actred,f,f_new,gnorm,gdx,stepsize,xtb;
  PetscReal                    xtHx;
  TaoConvergedReason           reason = TAO_CONTINUE_ITERATING;
  TaoLineSearchConvergedReason ls_status = TAOLINESEARCH_CONTINUE_ITERATING;

  PetscFunctionBegin;

  ierr = TaoComputeVariableBounds(tao);CHKERRQ(ierr);
  ierr = VecMedian(tao->XL,tao->solution,tao->XU,tao->solution);CHKERRQ(ierr);
  ierr = TaoLineSearchSetVariableBounds(tao->linesearch,tao->XL,tao->XU);CHKERRQ(ierr);

  /* Using f = .5*x'Hx + x'b + c and g=Hx + b,  compute b,c */
  ierr = TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);CHKERRQ(ierr);
  ierr = TaoComputeObjectiveAndGradient(tao,tao->solution,&f,tao->gradient);CHKERRQ(ierr);
  ierr = VecCopy(tao->gradient, gpcg->B);CHKERRQ(ierr);
  ierr = MatMult(tao->hessian,tao->solution,gpcg->Work);CHKERRQ(ierr);
  ierr = VecDot(gpcg->Work, tao->solution, &xtHx);CHKERRQ(ierr);
  ierr = VecAXPY(gpcg->B,-1.0,gpcg->Work);CHKERRQ(ierr);
  ierr = VecDot(gpcg->B,tao->solution,&xtb);CHKERRQ(ierr);
  gpcg->c=f-xtHx/2.0-xtb;
  if (gpcg->Free_Local) {
      ierr = ISDestroy(&gpcg->Free_Local);CHKERRQ(ierr);
  }
  ierr = VecWhichBetween(tao->XL,tao->solution,tao->XU,&gpcg->Free_Local);CHKERRQ(ierr);

  /* Project the gradient and calculate the norm */
  ierr = VecCopy(tao->gradient,gpcg->G_New);CHKERRQ(ierr);
  ierr = VecBoundGradientProjection(tao->gradient,tao->solution,tao->XL,tao->XU,gpcg->PG);CHKERRQ(ierr);
  ierr = VecNorm(gpcg->PG,NORM_2,&gpcg->gnorm);CHKERRQ(ierr);
  tao->step=1.0;
  gpcg->f = f;

    /* Check Stopping Condition      */
  ierr=TaoMonitor(tao,tao->niter,f,gpcg->gnorm,0.0,tao->step,&reason);CHKERRQ(ierr);

  while (reason == TAO_CONTINUE_ITERATING){
    tao->ksp_its=0;

    ierr = GPCGGradProjections(tao);CHKERRQ(ierr);
    ierr = ISGetSize(gpcg->Free_Local,&gpcg->n_free);CHKERRQ(ierr);

    f=gpcg->f; gnorm=gpcg->gnorm;

    ierr = KSPReset(tao->ksp);CHKERRQ(ierr);

    if (gpcg->n_free > 0){
      /* Create a reduced linear system */
      ierr = VecDestroy(&gpcg->R);CHKERRQ(ierr);
      ierr = VecDestroy(&gpcg->DXFree);CHKERRQ(ierr);
      ierr = TaoVecGetSubVec(tao->gradient,gpcg->Free_Local, tao->subset_type, 0.0, &gpcg->R);CHKERRQ(ierr);
      ierr = VecScale(gpcg->R, -1.0);CHKERRQ(ierr);
      ierr = TaoVecGetSubVec(tao->stepdirection,gpcg->Free_Local,tao->subset_type, 0.0, &gpcg->DXFree);CHKERRQ(ierr);
      ierr = VecSet(gpcg->DXFree,0.0);CHKERRQ(ierr);

      ierr = TaoMatGetSubMat(tao->hessian, gpcg->Free_Local, gpcg->Work, tao->subset_type, &gpcg->Hsub);CHKERRQ(ierr);

      if (tao->hessian_pre == tao->hessian) {
        ierr = MatDestroy(&gpcg->Hsub_pre);CHKERRQ(ierr);
        ierr = PetscObjectReference((PetscObject)gpcg->Hsub);CHKERRQ(ierr);
        gpcg->Hsub_pre = gpcg->Hsub;
      }  else {
        ierr = TaoMatGetSubMat(tao->hessian, gpcg->Free_Local, gpcg->Work, tao->subset_type, &gpcg->Hsub_pre);CHKERRQ(ierr);
      }

      ierr = KSPReset(tao->ksp);CHKERRQ(ierr);
      ierr = KSPSetOperators(tao->ksp,gpcg->Hsub,gpcg->Hsub_pre);CHKERRQ(ierr);

      ierr = KSPSolve(tao->ksp,gpcg->R,gpcg->DXFree);CHKERRQ(ierr);
      ierr = KSPGetIterationNumber(tao->ksp,&its);CHKERRQ(ierr);
      tao->ksp_its+=its;
      tao->ksp_tot_its+=its;
      ierr = VecSet(tao->stepdirection,0.0);CHKERRQ(ierr);
      ierr = VecISAXPY(tao->stepdirection,gpcg->Free_Local,1.0,gpcg->DXFree);CHKERRQ(ierr);

      ierr = VecDot(tao->stepdirection,tao->gradient,&gdx);CHKERRQ(ierr);
      ierr = TaoLineSearchSetInitialStepLength(tao->linesearch,1.0);CHKERRQ(ierr);
      f_new=f;
      ierr = TaoLineSearchApply(tao->linesearch,tao->solution,&f_new,tao->gradient,tao->stepdirection,&stepsize,&ls_status);CHKERRQ(ierr);

      actred = f_new - f;

      /* Evaluate the function and gradient at the new point */
      ierr = VecBoundGradientProjection(tao->gradient,tao->solution,tao->XL,tao->XU, gpcg->PG);CHKERRQ(ierr);
      ierr = VecNorm(gpcg->PG, NORM_2, &gnorm);CHKERRQ(ierr);
      f=f_new;
      ierr = ISDestroy(&gpcg->Free_Local);CHKERRQ(ierr);
      ierr = VecWhichBetween(tao->XL,tao->solution,tao->XU,&gpcg->Free_Local);CHKERRQ(ierr);
    } else {
      actred = 0; gpcg->step=1.0;
      /* if there were no free variables, no cg method */
    }

    tao->niter++;
    ierr = TaoMonitor(tao,tao->niter,f,gnorm,0.0,gpcg->step,&reason);CHKERRQ(ierr);
    gpcg->f=f;gpcg->gnorm=gnorm; gpcg->actred=actred;
    if (reason!=TAO_CONTINUE_ITERATING) break;
  }  /* END MAIN LOOP  */

  PetscFunctionReturn(0);
}
Exemplo n.º 8
0
static PetscErrorCode TaoSolve_BQPIP(Tao tao)
{
  TAO_BQPIP          *qp = (TAO_BQPIP*)tao->data;
  PetscErrorCode     ierr;
  PetscInt           iter=0,its;
  PetscReal          d1,d2,ksptol,sigma;
  PetscReal          sigmamu;
  PetscReal          dstep,pstep,step=0;
  PetscReal          gap[4];
  TaoConvergedReason reason;

  PetscFunctionBegin;
  qp->dobj           = 0.0;
  qp->pobj           = 1.0;
  qp->gap            = 10.0;
  qp->rgap           = 1.0;
  qp->mu             = 1.0;
  qp->sigma          = 1.0;
  qp->dinfeas        = 1.0;
  qp->psteplength    = 0.0;
  qp->dsteplength    = 0.0;

  /* Tighten infinite bounds, things break when we don't do this
    -- see test_bqpip.c
  */
  ierr = VecSet(qp->XU,1.0e20);CHKERRQ(ierr);
  ierr = VecSet(qp->XL,-1.0e20);CHKERRQ(ierr);
  ierr = VecPointwiseMax(qp->XL,qp->XL,tao->XL);CHKERRQ(ierr);
  ierr = VecPointwiseMin(qp->XU,qp->XU,tao->XU);CHKERRQ(ierr);

  ierr = TaoComputeObjectiveAndGradient(tao,tao->solution,&qp->c,qp->C0);CHKERRQ(ierr);
  ierr = TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);CHKERRQ(ierr);
  ierr = MatMult(tao->hessian, tao->solution, qp->Work);CHKERRQ(ierr);
  ierr = VecDot(tao->solution, qp->Work, &d1);CHKERRQ(ierr);
  ierr = VecAXPY(qp->C0, -1.0, qp->Work);CHKERRQ(ierr);
  ierr = VecDot(qp->C0, tao->solution, &d2);CHKERRQ(ierr);
  qp->c -= (d1/2.0+d2);
  ierr = MatGetDiagonal(tao->hessian, qp->HDiag);CHKERRQ(ierr);

  ierr = QPIPSetInitialPoint(qp,tao);CHKERRQ(ierr);
  ierr = QPIPComputeResidual(qp,tao);CHKERRQ(ierr);

  /* Enter main loop */
  while (1){

    /* Check Stopping Condition      */
    ierr = TaoMonitor(tao,iter++,qp->pobj,PetscSqrtScalar(qp->gap + qp->dinfeas),
                            qp->pinfeas, step, &reason);CHKERRQ(ierr);
    if (reason != TAO_CONTINUE_ITERATING) break;

    /*
       Dual Infeasibility Direction should already be in the right
       hand side from computing the residuals
    */

    ierr = QPIPComputeNormFromCentralPath(qp,&d1);CHKERRQ(ierr);

    if (iter > 0 && (qp->rnorm>5*qp->mu || d1*d1>qp->m*qp->mu*qp->mu) ) {
      sigma=1.0;sigmamu=qp->mu;
      sigma=0.0;sigmamu=0;
    } else {
      sigma=0.0;sigmamu=0;
    }
    ierr = VecSet(qp->DZ, sigmamu);CHKERRQ(ierr);
    ierr = VecSet(qp->DS, sigmamu);CHKERRQ(ierr);

    if (sigmamu !=0){
      ierr = VecPointwiseDivide(qp->DZ, qp->DZ, qp->G);CHKERRQ(ierr);
      ierr = VecPointwiseDivide(qp->DS, qp->DS, qp->T);CHKERRQ(ierr);
      ierr = VecCopy(qp->DZ,qp->RHS2);CHKERRQ(ierr);
      ierr = VecAXPY(qp->RHS2, 1.0, qp->DS);CHKERRQ(ierr);
    } else {
      ierr = VecZeroEntries(qp->RHS2);CHKERRQ(ierr);
    }


    /*
       Compute the Primal Infeasiblitiy RHS and the
       Diagonal Matrix to be added to H and store in Work
    */
    ierr = VecPointwiseDivide(qp->DiagAxpy, qp->Z, qp->G);CHKERRQ(ierr);
    ierr = VecPointwiseMult(qp->GZwork, qp->DiagAxpy, qp->R3);CHKERRQ(ierr);
    ierr = VecAXPY(qp->RHS, -1.0, qp->GZwork);CHKERRQ(ierr);

    ierr = VecPointwiseDivide(qp->TSwork, qp->S, qp->T);CHKERRQ(ierr);
    ierr = VecAXPY(qp->DiagAxpy, 1.0, qp->TSwork);CHKERRQ(ierr);
    ierr = VecPointwiseMult(qp->TSwork, qp->TSwork, qp->R5);CHKERRQ(ierr);
    ierr = VecAXPY(qp->RHS, -1.0, qp->TSwork);CHKERRQ(ierr);
    ierr = VecAXPY(qp->RHS2, 1.0, qp->RHS);CHKERRQ(ierr);

    /*  Determine the solving tolerance */
    ksptol = qp->mu/10.0;
    ksptol = PetscMin(ksptol,0.001);

    ierr = MatDiagonalSet(tao->hessian, qp->DiagAxpy, ADD_VALUES);CHKERRQ(ierr);
    ierr = MatAssemblyBegin(tao->hessian,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
    ierr = MatAssemblyEnd(tao->hessian,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);

    ierr = KSPSetOperators(tao->ksp, tao->hessian, tao->hessian_pre);CHKERRQ(ierr);
    ierr = KSPSolve(tao->ksp, qp->RHS, tao->stepdirection);CHKERRQ(ierr);
    ierr = KSPGetIterationNumber(tao->ksp,&its);CHKERRQ(ierr);
    tao->ksp_its+=its;

    ierr = VecScale(qp->DiagAxpy, -1.0);CHKERRQ(ierr);
    ierr = MatDiagonalSet(tao->hessian, qp->DiagAxpy, ADD_VALUES);CHKERRQ(ierr);
    ierr = MatAssemblyBegin(tao->hessian,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
    ierr = MatAssemblyEnd(tao->hessian,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
    ierr = VecScale(qp->DiagAxpy, -1.0);CHKERRQ(ierr);
    ierr = QPComputeStepDirection(qp,tao);CHKERRQ(ierr);
    ierr = QPStepLength(qp); CHKERRQ(ierr);

    /* Calculate New Residual R1 in Work vector */
    ierr = MatMult(tao->hessian, tao->stepdirection, qp->RHS2);CHKERRQ(ierr);
    ierr = VecAXPY(qp->RHS2, 1.0, qp->DS);CHKERRQ(ierr);
    ierr = VecAXPY(qp->RHS2, -1.0, qp->DZ);CHKERRQ(ierr);
    ierr = VecAYPX(qp->RHS2, qp->dsteplength, tao->gradient);CHKERRQ(ierr);

    ierr = VecNorm(qp->RHS2, NORM_2, &qp->dinfeas);CHKERRQ(ierr);
    ierr = VecDot(qp->DZ, qp->DG, gap);CHKERRQ(ierr);
    ierr = VecDot(qp->DS, qp->DT, gap+1);CHKERRQ(ierr);

    qp->rnorm=(qp->dinfeas+qp->psteplength*qp->pinfeas)/(qp->m+qp->n);
    pstep = qp->psteplength; dstep = qp->dsteplength;
    step = PetscMin(qp->psteplength,qp->dsteplength);
    sigmamu= ( pstep*pstep*(gap[0]+gap[1]) +
               (1 - pstep + pstep*sigma)*qp->gap  )/qp->m;

    if (qp->predcorr && step < 0.9){
      if (sigmamu < qp->mu){
        sigmamu=sigmamu/qp->mu;
        sigmamu=sigmamu*sigmamu*sigmamu;
      } else {sigmamu = 1.0;}
      sigmamu = sigmamu*qp->mu;

      /* Compute Corrector Step */
      ierr = VecPointwiseMult(qp->DZ, qp->DG, qp->DZ);CHKERRQ(ierr);
      ierr = VecScale(qp->DZ, -1.0);CHKERRQ(ierr);
      ierr = VecShift(qp->DZ, sigmamu);CHKERRQ(ierr);
      ierr = VecPointwiseDivide(qp->DZ, qp->DZ, qp->G);CHKERRQ(ierr);

      ierr = VecPointwiseMult(qp->DS, qp->DS, qp->DT);CHKERRQ(ierr);
      ierr = VecScale(qp->DS, -1.0);CHKERRQ(ierr);
      ierr = VecShift(qp->DS, sigmamu);CHKERRQ(ierr);
      ierr = VecPointwiseDivide(qp->DS, qp->DS, qp->T);CHKERRQ(ierr);

      ierr = VecCopy(qp->DZ, qp->RHS2);CHKERRQ(ierr);
      ierr = VecAXPY(qp->RHS2, -1.0, qp->DS);CHKERRQ(ierr);
      ierr = VecAXPY(qp->RHS2, 1.0, qp->RHS);CHKERRQ(ierr);

      /* Approximately solve the linear system */
      ierr = MatDiagonalSet(tao->hessian, qp->DiagAxpy, ADD_VALUES);CHKERRQ(ierr);
      ierr = MatAssemblyBegin(tao->hessian,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
      ierr = MatAssemblyEnd(tao->hessian,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
      ierr = KSPSolve(tao->ksp, qp->RHS2, tao->stepdirection);CHKERRQ(ierr);
      ierr = KSPGetIterationNumber(tao->ksp,&its);CHKERRQ(ierr);
      tao->ksp_its+=its;

      ierr = MatDiagonalSet(tao->hessian, qp->HDiag, INSERT_VALUES);CHKERRQ(ierr);
      ierr = MatAssemblyBegin(tao->hessian,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
      ierr = MatAssemblyEnd(tao->hessian,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
      ierr = QPComputeStepDirection(qp,tao);CHKERRQ(ierr);
      ierr = QPStepLength(qp);CHKERRQ(ierr);

    }  /* End Corrector step */


    /* Take the step */
    pstep = qp->psteplength; dstep = qp->dsteplength;

    ierr = VecAXPY(qp->Z, dstep, qp->DZ);CHKERRQ(ierr);
    ierr = VecAXPY(qp->S, dstep, qp->DS);CHKERRQ(ierr);
    ierr = VecAXPY(tao->solution, dstep, tao->stepdirection);CHKERRQ(ierr);
    ierr = VecAXPY(qp->G, dstep, qp->DG);CHKERRQ(ierr);
    ierr = VecAXPY(qp->T, dstep, qp->DT);CHKERRQ(ierr);

    /* Compute Residuals */
    ierr = QPIPComputeResidual(qp,tao);CHKERRQ(ierr);

    /* Evaluate quadratic function */
    ierr = MatMult(tao->hessian, tao->solution, qp->Work);CHKERRQ(ierr);

    ierr = VecDot(tao->solution, qp->Work, &d1);CHKERRQ(ierr);
    ierr = VecDot(tao->solution, qp->C0, &d2);CHKERRQ(ierr);
    ierr = VecDot(qp->G, qp->Z, gap);CHKERRQ(ierr);
    ierr = VecDot(qp->T, qp->S, gap+1);CHKERRQ(ierr);

    qp->pobj=d1/2.0 + d2+qp->c;
    /* Compute the duality gap */
    qp->gap = (gap[0]+gap[1]);
    qp->dobj = qp->pobj - qp->gap;
    if (qp->m>0) qp->mu=qp->gap/(qp->m);
    qp->rgap=qp->gap/( PetscAbsReal(qp->dobj) + PetscAbsReal(qp->pobj) + 1.0 );
  }  /* END MAIN LOOP  */

  PetscFunctionReturn(0);
}
Exemplo n.º 9
0
static PetscErrorCode TaoSolve_BLMVM(Tao tao)
{
  PetscErrorCode               ierr;
  TAO_BLMVM                    *blmP = (TAO_BLMVM *)tao->data;
  TaoConvergedReason           reason = TAO_CONTINUE_ITERATING;
  TaoLineSearchConvergedReason ls_status = TAOLINESEARCH_CONTINUE_ITERATING;
  PetscReal                    f, fold, gdx, gnorm;
  PetscReal                    stepsize = 1.0,delta;

  PetscFunctionBegin;
  /*  Project initial point onto bounds */
  ierr = TaoComputeVariableBounds(tao);CHKERRQ(ierr);
  ierr = VecMedian(tao->XL,tao->solution,tao->XU,tao->solution);CHKERRQ(ierr);
  ierr = TaoLineSearchSetVariableBounds(tao->linesearch,tao->XL,tao->XU);CHKERRQ(ierr);


  /* Check convergence criteria */
  ierr = TaoComputeObjectiveAndGradient(tao, tao->solution,&f,blmP->unprojected_gradient);CHKERRQ(ierr);
  ierr = VecBoundGradientProjection(blmP->unprojected_gradient,tao->solution, tao->XL,tao->XU,tao->gradient);CHKERRQ(ierr);

  ierr = TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);CHKERRQ(ierr);
  if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf pr NaN");

  ierr = TaoMonitor(tao, tao->niter, f, gnorm, 0.0, stepsize, &reason);CHKERRQ(ierr);
  if (reason != TAO_CONTINUE_ITERATING) PetscFunctionReturn(0);

  /* Set initial scaling for the function */
  if (f != 0.0) {
    delta = 2.0*PetscAbsScalar(f) / (gnorm*gnorm);
  } else {
    delta = 2.0 / (gnorm*gnorm);
  }
  ierr = MatLMVMSetDelta(blmP->M,delta);CHKERRQ(ierr);

  /* Set counter for gradient/reset steps */
  blmP->grad = 0;
  blmP->reset = 0;

  /* Have not converged; continue with Newton method */
  while (reason == TAO_CONTINUE_ITERATING) {
    /* Compute direction */
    ierr = MatLMVMUpdate(blmP->M, tao->solution, tao->gradient);CHKERRQ(ierr);
    ierr = MatLMVMSolve(blmP->M, blmP->unprojected_gradient, tao->stepdirection);CHKERRQ(ierr);
    ierr = VecBoundGradientProjection(tao->stepdirection,tao->solution,tao->XL,tao->XU,tao->gradient);CHKERRQ(ierr);

    /* Check for success (descent direction) */
    ierr = VecDot(blmP->unprojected_gradient, tao->gradient, &gdx);CHKERRQ(ierr);
    if (gdx <= 0) {
      /* Step is not descent or solve was not successful
         Use steepest descent direction (scaled) */
      ++blmP->grad;

      if (f != 0.0) {
        delta = 2.0*PetscAbsScalar(f) / (gnorm*gnorm);
      } else {
        delta = 2.0 / (gnorm*gnorm);
      }
      ierr = MatLMVMSetDelta(blmP->M,delta);CHKERRQ(ierr);
      ierr = MatLMVMReset(blmP->M);CHKERRQ(ierr);
      ierr = MatLMVMUpdate(blmP->M, tao->solution, blmP->unprojected_gradient);CHKERRQ(ierr);
      ierr = MatLMVMSolve(blmP->M,blmP->unprojected_gradient, tao->stepdirection);CHKERRQ(ierr);
    }
    ierr = VecScale(tao->stepdirection,-1.0);CHKERRQ(ierr);

    /* Perform the linesearch */
    fold = f;
    ierr = VecCopy(tao->solution, blmP->Xold);CHKERRQ(ierr);
    ierr = VecCopy(blmP->unprojected_gradient, blmP->Gold);CHKERRQ(ierr);
    ierr = TaoLineSearchSetInitialStepLength(tao->linesearch,1.0);CHKERRQ(ierr);
    ierr = TaoLineSearchApply(tao->linesearch, tao->solution, &f, blmP->unprojected_gradient, tao->stepdirection, &stepsize, &ls_status);CHKERRQ(ierr);
    ierr = TaoAddLineSearchCounts(tao);CHKERRQ(ierr);

    if (ls_status != TAOLINESEARCH_SUCCESS && ls_status != TAOLINESEARCH_SUCCESS_USER) {
      /* Linesearch failed
         Reset factors and use scaled (projected) gradient step */
      ++blmP->reset;

      f = fold;
      ierr = VecCopy(blmP->Xold, tao->solution);CHKERRQ(ierr);
      ierr = VecCopy(blmP->Gold, blmP->unprojected_gradient);CHKERRQ(ierr);

      if (f != 0.0) {
        delta = 2.0* PetscAbsScalar(f) / (gnorm*gnorm);
      } else {
        delta = 2.0/ (gnorm*gnorm);
      }
      ierr = MatLMVMSetDelta(blmP->M,delta);CHKERRQ(ierr);
      ierr = MatLMVMReset(blmP->M);CHKERRQ(ierr);
      ierr = MatLMVMUpdate(blmP->M, tao->solution, blmP->unprojected_gradient);CHKERRQ(ierr);
      ierr = MatLMVMSolve(blmP->M, blmP->unprojected_gradient, tao->stepdirection);CHKERRQ(ierr);
      ierr = VecScale(tao->stepdirection, -1.0);CHKERRQ(ierr);

      /* This may be incorrect; linesearch has values fo stepmax and stepmin
         that should be reset. */
      ierr = TaoLineSearchSetInitialStepLength(tao->linesearch,1.0);CHKERRQ(ierr);
      ierr = TaoLineSearchApply(tao->linesearch,tao->solution,&f, blmP->unprojected_gradient, tao->stepdirection,  &stepsize, &ls_status);CHKERRQ(ierr);
      ierr = TaoAddLineSearchCounts(tao);CHKERRQ(ierr);

      if (ls_status != TAOLINESEARCH_SUCCESS && ls_status != TAOLINESEARCH_SUCCESS_USER) {
        tao->reason = TAO_DIVERGED_LS_FAILURE;
        break;
      }
    }

    /* Check for converged */
    ierr = VecBoundGradientProjection(blmP->unprojected_gradient, tao->solution, tao->XL, tao->XU, tao->gradient);CHKERRQ(ierr);
    ierr = TaoGradientNorm(tao, tao->gradient, NORM_2, &gnorm);CHKERRQ(ierr);


    if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Not-a-Number");
    tao->niter++;
    ierr = TaoMonitor(tao, tao->niter, f, gnorm, 0.0, stepsize, &reason);CHKERRQ(ierr);
  }
  PetscFunctionReturn(0);
}
Exemplo n.º 10
0
static PetscErrorCode TaoSolve_BMRM(Tao tao)
{
  PetscErrorCode     ierr;
  TAO_DF             df;
  TAO_BMRM           *bmrm = (TAO_BMRM*)tao->data;

  /* Values and pointers to parts of the optimization problem */
  PetscReal          f = 0.0;
  Vec                W = tao->solution;
  Vec                G = tao->gradient;
  PetscReal          lambda;
  PetscReal          bt;
  Vec_Chain          grad_list, *tail_glist, *pgrad;
  PetscInt           i;
  PetscMPIInt        rank;

  /* Used in converged criteria check */
  PetscReal          reg;
  PetscReal          jtwt = 0.0, max_jtwt, pre_epsilon, epsilon, jw, min_jw;
  PetscReal          innerSolverTol;
  MPI_Comm           comm;

  PetscFunctionBegin;
  ierr = PetscObjectGetComm((PetscObject)tao,&comm);CHKERRQ(ierr);
  ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr);
  lambda = bmrm->lambda;

  /* Check Stopping Condition */
  tao->step = 1.0;
  max_jtwt = -BMRM_INFTY;
  min_jw = BMRM_INFTY;
  innerSolverTol = 1.0;
  epsilon = 0.0;

  if (!rank) {
    ierr = init_df_solver(&df);CHKERRQ(ierr);
    grad_list.next = NULL;
    tail_glist = &grad_list;
  }

  df.tol = 1e-6;
  tao->reason = TAO_CONTINUE_ITERATING;

  /*-----------------Algorithm Begins------------------------*/
  /* make the scatter */
  ierr = VecScatterCreateToZero(W, &bmrm->scatter, &bmrm->local_w);CHKERRQ(ierr);
  ierr = VecAssemblyBegin(bmrm->local_w);CHKERRQ(ierr);
  ierr = VecAssemblyEnd(bmrm->local_w);CHKERRQ(ierr);

  /* NOTE: In application pass the sub-gradient of Remp(W) */
  ierr = TaoComputeObjectiveAndGradient(tao, W, &f, G);CHKERRQ(ierr);
  ierr = TaoLogConvergenceHistory(tao,f,1.0,0.0,tao->ksp_its);CHKERRQ(ierr);
  ierr = TaoMonitor(tao,tao->niter,f,1.0,0.0,tao->step);CHKERRQ(ierr);
  ierr = (*tao->ops->convergencetest)(tao,tao->cnvP);CHKERRQ(ierr);
  
  while (tao->reason == TAO_CONTINUE_ITERATING) {
    /* Call general purpose update function */
    if (tao->ops->update) {
      ierr = (*tao->ops->update)(tao, tao->niter);CHKERRQ(ierr);
    }
    
    /* compute bt = Remp(Wt-1) - <Wt-1, At> */
    ierr = VecDot(W, G, &bt);CHKERRQ(ierr);
    bt = f - bt;

    /* First gather the gradient to the master node */
    ierr = VecScatterBegin(bmrm->scatter, G, bmrm->local_w, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
    ierr = VecScatterEnd(bmrm->scatter, G, bmrm->local_w, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);

    /* Bring up the inner solver */
    if (!rank) {
      ierr = ensure_df_space(tao->niter+1, &df);CHKERRQ(ierr);
      ierr = make_grad_node(bmrm->local_w, &pgrad);CHKERRQ(ierr);
      tail_glist->next = pgrad;
      tail_glist = pgrad;

      df.a[tao->niter] = 1.0;
      df.f[tao->niter] = -bt;
      df.u[tao->niter] = 1.0;
      df.l[tao->niter] = 0.0;

      /* set up the Q */
      pgrad = grad_list.next;
      for (i=0; i<=tao->niter; i++) {
        if (!pgrad) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Assert that there are at least tao->niter+1 pgrad available");
        ierr = VecDot(pgrad->V, bmrm->local_w, &reg);CHKERRQ(ierr);
        df.Q[i][tao->niter] = df.Q[tao->niter][i] = reg / lambda;
        pgrad = pgrad->next;
      }

      if (tao->niter > 0) {
        df.x[tao->niter] = 0.0;
        ierr = solve(&df);CHKERRQ(ierr);
      } else
        df.x[0] = 1.0;

      /* now computing Jt*(alpha_t) which should be = Jt(wt) to check convergence */
      jtwt = 0.0;
      ierr = VecSet(bmrm->local_w, 0.0);CHKERRQ(ierr);
      pgrad = grad_list.next;
      for (i=0; i<=tao->niter; i++) {
        jtwt -= df.x[i] * df.f[i];
        ierr = VecAXPY(bmrm->local_w, -df.x[i] / lambda, pgrad->V);CHKERRQ(ierr);
        pgrad = pgrad->next;
      }

      ierr = VecNorm(bmrm->local_w, NORM_2, &reg);CHKERRQ(ierr);
      reg = 0.5*lambda*reg*reg;
      jtwt -= reg;
    } /* end if rank == 0 */

    /* scatter the new W to all nodes */
    ierr = VecScatterBegin(bmrm->scatter,bmrm->local_w,W,INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
    ierr = VecScatterEnd(bmrm->scatter,bmrm->local_w,W,INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);

    ierr = TaoComputeObjectiveAndGradient(tao, W, &f, G);CHKERRQ(ierr);

    ierr = MPI_Bcast(&jtwt,1,MPIU_REAL,0,comm);CHKERRQ(ierr);
    ierr = MPI_Bcast(&reg,1,MPIU_REAL,0,comm);CHKERRQ(ierr);

    jw = reg + f;                                       /* J(w) = regularizer + Remp(w) */
    if (jw < min_jw) min_jw = jw;
    if (jtwt > max_jtwt) max_jtwt = jtwt;

    pre_epsilon = epsilon;
    epsilon = min_jw - jtwt;

    if (!rank) {
      if (innerSolverTol > epsilon) innerSolverTol = epsilon;
      else if (innerSolverTol < 1e-7) innerSolverTol = 1e-7;

      /* if the annealing doesn't work well, lower the inner solver tolerance */
      if(pre_epsilon < epsilon) innerSolverTol *= 0.2;

      df.tol = innerSolverTol*0.5;
    }

    tao->niter++;
    ierr = TaoLogConvergenceHistory(tao,min_jw,epsilon,0.0,tao->ksp_its);CHKERRQ(ierr);
    ierr = TaoMonitor(tao,tao->niter,min_jw,epsilon,0.0,tao->step);CHKERRQ(ierr);
    ierr = (*tao->ops->convergencetest)(tao,tao->cnvP);CHKERRQ(ierr);
  }

  /* free all the memory */
  if (!rank) {
    ierr = destroy_grad_list(&grad_list);CHKERRQ(ierr);
    ierr = destroy_df_solver(&df);CHKERRQ(ierr);
  }

  ierr = VecDestroy(&bmrm->local_w);CHKERRQ(ierr);
  ierr = VecScatterDestroy(&bmrm->scatter);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}