コード例 #1
0
ファイル: gpcg.c プロジェクト: fuentesdt/tao-1.10.1-p3
static int TaoGradProjections(TAO_SOLVER tao, TAO_GPCG *gpcg)
{
  int info;
  TaoInt lsflag=0,i;
  TaoTruth optimal_face=TAO_FALSE;
  double actred=-1.0,actred_max=0.0, gAg,gtg=gpcg->gnorm,alpha;
  double f_new,f_full,gdx;
  TaoMat *H;
  TaoVec *DX=gpcg->DX,*XL=gpcg->XL,*XU=gpcg->XU,*Work=gpcg->Work;
  TaoVec *X=gpcg->X,*G=gpcg->G;
  /*
     The gradient and function value passed into and out of this
     routine should be current and correct.
     
     The free, active, and binding variables should be already identified
  */
  
  TaoFunctionBegin;
  
  info = TaoGetSolution(tao,&X);CHKERRQ(info);
  info = TaoGetHessian(tao,&H);CHKERRQ(info);
  info = TaoGetVariableBounds(tao,&XL,&XU);CHKERRQ(info);

  for (i=0;i<gpcg->maxgpits;i++){

    if ( -actred <= (gpcg->pg_ftol)*actred_max) break;
 
    info = DX->BoundGradientProjection(G,XL,X,XU); CHKERRQ(info);
    info = DX->Negate(); CHKERRQ(info);
    info = DX->Dot(G,&gdx); CHKERRQ(info);

    info= H->Multiply(DX,Work); CHKERRQ(info);
    info= DX->Dot(Work,&gAg); CHKERRQ(info);
 
    gpcg->gp_iterates++; gpcg->total_gp_its++;    
  
    gtg=-gdx;
    alpha = TaoAbsDouble(gtg/gAg);
    gpcg->stepsize = alpha; f_new=gpcg->f;

    info = TaoLineSearchApply(tao,X,G,DX,Work,
			      &f_new,&f_full,&gpcg->stepsize,&lsflag);
    CHKERRQ(info);

    /* Update the iterate */
    actred = f_new - gpcg->f;
    actred_max = TaoMax(actred_max,-(f_new - gpcg->f));
    gpcg->f = f_new;
    info = GPCGCheckOptimalFace(X,XL,XU,G,Work,gpcg->Free_Local,gpcg->TT,
				&optimal_face); CHKERRQ(info);

    if ( optimal_face == TAO_TRUE ) break;

  }
  
  gpcg->gnorm=gtg;
  TaoFunctionReturn(0);

} /* End gradient projections */
コード例 #2
0
ファイル: ssils.c プロジェクト: OpenCMISS-Dependencies/petsc
static PetscErrorCode TaoSolve_SSILS(Tao tao)
{
  TAO_SSLS                     *ssls = (TAO_SSLS *)tao->data;
  PetscReal                    psi, ndpsi, normd, innerd, t=0;
  PetscReal                    delta, rho;
  PetscInt                     iter=0,kspits;
  TaoConvergedReason           reason;
  TaoLineSearchConvergedReason ls_reason;
  PetscErrorCode               ierr;

  PetscFunctionBegin;
  /* Assume that Setup has been called!
     Set the structure for the Jacobian and create a linear solver. */
  delta = ssls->delta;
  rho = ssls->rho;

  ierr = TaoComputeVariableBounds(tao);CHKERRQ(ierr);
  ierr = VecMedian(tao->XL,tao->solution,tao->XU,tao->solution);CHKERRQ(ierr);
  ierr = TaoLineSearchSetObjectiveAndGradientRoutine(tao->linesearch,Tao_SSLS_FunctionGradient,tao);CHKERRQ(ierr);
  ierr = TaoLineSearchSetObjectiveRoutine(tao->linesearch,Tao_SSLS_Function,tao);CHKERRQ(ierr);

  /* Calculate the function value and fischer function value at the
     current iterate */
  ierr = TaoLineSearchComputeObjectiveAndGradient(tao->linesearch,tao->solution,&psi,ssls->dpsi);CHKERRQ(ierr);
  ierr = VecNorm(ssls->dpsi,NORM_2,&ndpsi);CHKERRQ(ierr);

  while (1) {
    ierr=PetscInfo3(tao, "iter: %D, merit: %g, ndpsi: %g\n",iter, (double)ssls->merit, (double)ndpsi);CHKERRQ(ierr);
    /* Check the termination criteria */
    ierr = TaoMonitor(tao,iter++,ssls->merit,ndpsi,0.0,t,&reason);CHKERRQ(ierr);
    if (reason!=TAO_CONTINUE_ITERATING) break;

    /* Calculate direction.  (Really negative of newton direction.  Therefore,
       rest of the code uses -d.) */
    ierr = KSPSetOperators(tao->ksp,tao->jacobian,tao->jacobian_pre);CHKERRQ(ierr);
    ierr = KSPSolve(tao->ksp,ssls->ff,tao->stepdirection);CHKERRQ(ierr);
    ierr = KSPGetIterationNumber(tao->ksp,&kspits);CHKERRQ(ierr);
    tao->ksp_its+=kspits;
    ierr = VecNorm(tao->stepdirection,NORM_2,&normd);CHKERRQ(ierr);
    ierr = VecDot(tao->stepdirection,ssls->dpsi,&innerd);CHKERRQ(ierr);

    /* Make sure that we have a descent direction */
    if (innerd <= delta*pow(normd, rho)) {
      ierr = PetscInfo(tao, "newton direction not descent\n");CHKERRQ(ierr);
      ierr = VecCopy(ssls->dpsi,tao->stepdirection);CHKERRQ(ierr);
      ierr = VecDot(tao->stepdirection,ssls->dpsi,&innerd);CHKERRQ(ierr);
    }

    ierr = VecScale(tao->stepdirection, -1.0);CHKERRQ(ierr);
    innerd = -innerd;

    ierr = TaoLineSearchSetInitialStepLength(tao->linesearch,1.0);
    ierr = TaoLineSearchApply(tao->linesearch,tao->solution,&psi,ssls->dpsi,tao->stepdirection,&t,&ls_reason);CHKERRQ(ierr);
    ierr = VecNorm(ssls->dpsi,NORM_2,&ndpsi);CHKERRQ(ierr);
  }
  PetscFunctionReturn(0);
}
コード例 #3
0
ファイル: tron.c プロジェクト: plguhur/petsc
static PetscErrorCode TronGradientProjections(Tao tao,TAO_TRON *tron)
{
  PetscErrorCode                 ierr;
  PetscInt                       i;
  TaoLineSearchConvergedReason ls_reason;
  PetscReal                      actred=-1.0,actred_max=0.0;
  PetscReal                      f_new;
  /*
     The gradient and function value passed into and out of this
     routine should be current and correct.

     The free, active, and binding variables should be already identified
  */
  PetscFunctionBegin;
  if (tron->Free_Local) {
    ierr = ISDestroy(&tron->Free_Local);CHKERRQ(ierr);
  }
  ierr = VecWhichBetween(tao->XL,tao->solution,tao->XU,&tron->Free_Local);CHKERRQ(ierr);

  for (i=0;i<tron->maxgpits;i++){

    if ( -actred <= (tron->pg_ftol)*actred_max) break;

    tron->gp_iterates++; tron->total_gp_its++;
    f_new=tron->f;

    ierr = VecCopy(tao->gradient, tao->stepdirection);CHKERRQ(ierr);
    ierr = VecScale(tao->stepdirection, -1.0);CHKERRQ(ierr);
    ierr = TaoLineSearchSetInitialStepLength(tao->linesearch,tron->pgstepsize);CHKERRQ(ierr);
    ierr = TaoLineSearchApply(tao->linesearch, tao->solution, &f_new, tao->gradient, tao->stepdirection,
                              &tron->pgstepsize, &ls_reason);CHKERRQ(ierr);
    ierr = TaoAddLineSearchCounts(tao);CHKERRQ(ierr);


    /* Update the iterate */
    actred = f_new - tron->f;
    actred_max = PetscMax(actred_max,-(f_new - tron->f));
    tron->f = f_new;
    if (tron->Free_Local) {
      ierr = ISDestroy(&tron->Free_Local);CHKERRQ(ierr);
    }
    ierr = VecWhichBetween(tao->XL,tao->solution,tao->XU,&tron->Free_Local);CHKERRQ(ierr);
  }

  PetscFunctionReturn(0);
}
コード例 #4
0
ファイル: gpcg.c プロジェクト: pombredanne/petsc
static PetscErrorCode GPCGGradProjections(Tao tao)
{
  PetscErrorCode                 ierr;
  TAO_GPCG                       *gpcg = (TAO_GPCG *)tao->data;
  PetscInt                       i;
  PetscReal                      actred=-1.0,actred_max=0.0, gAg,gtg=gpcg->gnorm,alpha;
  PetscReal                      f_new,gdx,stepsize;
  Vec                            DX=tao->stepdirection,XL=tao->XL,XU=tao->XU,Work=gpcg->Work;
  Vec                            X=tao->solution,G=tao->gradient;
  TaoLineSearchConvergedReason lsflag=TAOLINESEARCH_CONTINUE_ITERATING;

  /*
     The free, active, and binding variables should be already identified
  */
  PetscFunctionBegin;
  for (i=0;i<gpcg->maxgpits;i++){
    if ( -actred <= (gpcg->pg_ftol)*actred_max) break;
    ierr = VecBoundGradientProjection(G,X,XL,XU,DX);CHKERRQ(ierr);
    ierr = VecScale(DX,-1.0);CHKERRQ(ierr);
    ierr = VecDot(DX,G,&gdx);CHKERRQ(ierr);

    ierr = MatMult(tao->hessian,DX,Work);CHKERRQ(ierr);
    ierr = VecDot(DX,Work,&gAg);CHKERRQ(ierr);

    gpcg->gp_iterates++;
    gpcg->total_gp_its++;

    gtg=-gdx;
    alpha = PetscAbsReal(gtg/gAg);
    ierr = TaoLineSearchSetInitialStepLength(tao->linesearch,alpha);CHKERRQ(ierr);
    f_new=gpcg->f;
    ierr = TaoLineSearchApply(tao->linesearch,X,&f_new,G,DX,&stepsize,&lsflag);CHKERRQ(ierr);

    /* Update the iterate */
    actred = f_new - gpcg->f;
    actred_max = PetscMax(actred_max,-(f_new - gpcg->f));
    gpcg->f = f_new;
    ierr = ISDestroy(&gpcg->Free_Local);CHKERRQ(ierr);
    ierr = VecWhichBetween(XL,X,XU,&gpcg->Free_Local);CHKERRQ(ierr);
  }

  gpcg->gnorm=gtg;
  PetscFunctionReturn(0);
} /* End gradient projections */
コード例 #5
0
ファイル: bnls.c プロジェクト: fuentesdt/tao-1.10.1-p3
static int TaoSolve_BNLS(TAO_SOLVER tao, void*solver){

  TAO_BNLS *bnls = (TAO_BNLS *)solver;
  int info;
  TaoInt lsflag,iter=0;
  TaoTerminateReason reason=TAO_CONTINUE_ITERATING;
  double f,f_full,gnorm,gdx,stepsize=1.0;
  TaoTruth success;
  TaoVec *XU, *XL;
  TaoVec *X,  *G=bnls->G, *PG=bnls->PG;
  TaoVec *R=bnls->R, *DXFree=bnls->DXFree;
  TaoVec *DX=bnls->DX, *Work=bnls->Work;
  TaoMat *H, *Hsub=bnls->Hsub;
  TaoIndexSet *FreeVariables = bnls->FreeVariables;

  TaoFunctionBegin;

  /* Check if upper bound greater than lower bound. */
  info = TaoGetSolution(tao,&X);CHKERRQ(info); bnls->X=X;
  info = TaoGetVariableBounds(tao,&XL,&XU);CHKERRQ(info);
  info = TaoEvaluateVariableBounds(tao,XL,XU); CHKERRQ(info);
  info = TaoGetHessian(tao,&H);CHKERRQ(info); bnls->H=H;

  /*   Project the current point onto the feasible set */
  info = X->Median(XL,X,XU); CHKERRQ(info);
  
  TaoLinearSolver *tls;
  // Modify the linear solver to a conjugate gradient method
  info = TaoGetLinearSolver(tao, &tls); CHKERRQ(info);
  TaoLinearSolverPetsc *pls;
  pls  = dynamic_cast <TaoLinearSolverPetsc *> (tls);
  // set trust radius to zero 
  // PETSc ignores this case and should return the negative curvature direction
  // at its current default length
  pls->SetTrustRadius(0.0);

  if(!bnls->M) bnls->M = new TaoLMVMMat(X);
  TaoLMVMMat *M = bnls->M;
  KSP pksp = pls->GetKSP();
  // we will want to provide an initial guess in case neg curvature on the first iteration
  info = KSPSetInitialGuessNonzero(pksp,PETSC_TRUE); CHKERRQ(info);
  PC ppc;
  // Modify the preconditioner to use the bfgs approximation
  info = KSPGetPC(pksp, &ppc); CHKERRQ(info);
  PetscTruth  BFGSPreconditioner=PETSC_FALSE;// debug flag
  info = PetscOptionsGetTruth(PETSC_NULL,"-bnls_pc_bfgs",
                              &BFGSPreconditioner,PETSC_NULL); CHKERRQ(info);
  if( BFGSPreconditioner) 
    { 
     info=PetscInfo(tao,"TaoSolve_BNLS:  using bfgs preconditioner\n");
     info = KSPSetNormType(pksp, KSP_NORM_PRECONDITIONED); CHKERRQ(info);
     info = PCSetType(ppc, PCSHELL); CHKERRQ(info);
     info = PCShellSetName(ppc, "bfgs"); CHKERRQ(info);
     info = PCShellSetContext(ppc, M); CHKERRQ(info);
     info = PCShellSetApply(ppc, bfgs_apply); CHKERRQ(info);
    }
  else
    {// default to none
     info=PetscInfo(tao,"TaoSolve_BNLS:  using no preconditioner\n");
     info = PCSetType(ppc, PCNONE); CHKERRQ(info);
    }

  info = TaoComputeMeritFunctionGradient(tao,X,&f,G);CHKERRQ(info);
  info = PG->BoundGradientProjection(G,XL,X,XU);CHKERRQ(info);
  info = PG->Norm2(&gnorm); CHKERRQ(info);
  
  // Set initial scaling for the function
  if (f != 0.0) {
    info = M->SetDelta(2.0 * TaoAbsDouble(f) / (gnorm*gnorm)); CHKERRQ(info);
  }
  else {
    info = M->SetDelta(2.0 / (gnorm*gnorm)); CHKERRQ(info);
  }
  
  while (reason==TAO_CONTINUE_ITERATING){
    
    /* Project the gradient and calculate the norm */
    info = PG->BoundGradientProjection(G,XL,X,XU);CHKERRQ(info);
    info = PG->Norm2(&gnorm); CHKERRQ(info);
    
    info = M->Update(X, PG); CHKERRQ(info);

    PetscScalar ewAtol  = PetscMin(0.5,gnorm)*gnorm;
    info = KSPSetTolerances(pksp,PETSC_DEFAULT,ewAtol,
                            PETSC_DEFAULT, PETSC_DEFAULT); CHKERRQ(info);
    info=PetscInfo1(tao,"TaoSolve_BNLS: gnorm =%g\n",gnorm);
    pksp->printreason = PETSC_TRUE;
    info = KSPView(pksp,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(info);
    M->View();

    info = TaoMonitor(tao,iter++,f,gnorm,0.0,stepsize,&reason);
    CHKERRQ(info);
    if (reason!=TAO_CONTINUE_ITERATING) break;

    info = FreeVariables->WhichEqual(PG,G); CHKERRQ(info);

    info = TaoComputeHessian(tao,X,H);CHKERRQ(info);
    
    /* Create a reduced linear system */

    info = R->SetReducedVec(G,FreeVariables);CHKERRQ(info);
    info = R->Negate();CHKERRQ(info);

    /* Use gradient as initial guess */
    PetscTruth  UseGradientIG=PETSC_FALSE;// debug flag
    info = PetscOptionsGetTruth(PETSC_NULL,"-bnls_use_gradient_ig",
                                &UseGradientIG,PETSC_NULL); CHKERRQ(info);
    if(UseGradientIG)
      info = DX->CopyFrom(G);
    else
     {
      info=PetscInfo(tao,"TaoSolve_BNLS: use bfgs init guess \n");
      info = M->Solve(G, DX, &success);
     }
    CHKERRQ(info);
    info = DXFree->SetReducedVec(DX,FreeVariables);CHKERRQ(info);
    info = DXFree->Negate(); CHKERRQ(info);
    
    info = Hsub->SetReducedMatrix(H,FreeVariables,FreeVariables);CHKERRQ(info);

    bnls->gamma_factor /= 2;
    success = TAO_FALSE;

    while (success==TAO_FALSE) {
      
      /* Approximately solve the reduced linear system */
      info = TaoPreLinearSolve(tao,Hsub);CHKERRQ(info);
      info = TaoLinearSolve(tao,Hsub,R,DXFree,&success);CHKERRQ(info);

      info = DX->SetToZero(); CHKERRQ(info);
      info = DX->ReducedXPY(DXFree,FreeVariables);CHKERRQ(info);
      info = DX->Dot(G,&gdx); CHKERRQ(info);

      if (gdx>=0 || success==TAO_FALSE) { /* use bfgs direction */
        info = M->Solve(G, DX, &success); CHKERRQ(info);
        info = DX->BoundGradientProjection(DX,XL,X,XU); CHKERRQ(info);
        info = DX->Negate(); CHKERRQ(info);
        // Check for success (descent direction)
        info = DX->Dot(G,&gdx); CHKERRQ(info);
        if (gdx >= 0) {
          // Step is not descent or solve was not successful
          // Use steepest descent direction (scaled)
          if (f != 0.0) {
            info = M->SetDelta(2.0 * TaoAbsDouble(f) / (gnorm*gnorm)); CHKERRQ(info);
          }
          else {
            info = M->SetDelta(2.0 / (gnorm*gnorm)); CHKERRQ(info);
          }
          info = M->Reset(); CHKERRQ(info);
          info = M->Update(X, G); CHKERRQ(info);
          info = DX->CopyFrom(G);
          info = DX->Negate(); CHKERRQ(info);
          info = DX->Dot(G,&gdx); CHKERRQ(info);
          info=PetscInfo1(tao,"LMVM Solve Fail use steepest descent, gdx %22.12e \n",gdx);
        } 
        else {
          info=PetscInfo1(tao,"Newton Solve Fail use BFGS direction, gdx %22.12e \n",gdx);
        } 
	success = TAO_TRUE;
//        bnls->gamma_factor *= 2; 
//        bnls->gamma = bnls->gamma_factor*(gnorm); 
//#if !defined(PETSC_USE_COMPLEX)
//        info=PetscInfo2(tao,"TaoSolve_NLS:  modify diagonal (assume same nonzero structure), gamma_factor=%g, gamma=%g\n",bnls->gamma_factor,bnls->gamma);
//	CHKERRQ(info);
//#else
//        info=PetscInfo3(tao,"TaoSolve_NLS:  modify diagonal (asuume same nonzero structure), gamma_factor=%g, gamma=%g, gdx %22.12e \n",
//	     bnls->gamma_factor,PetscReal(bnls->gamma),gdx);CHKERRQ(info);
//#endif
//        info = Hsub->ShiftDiagonal(bnls->gamma);CHKERRQ(info);
//        if (f != 0.0) {
//          info = M->SetDelta(2.0 * TaoAbsDouble(f) / (gnorm*gnorm)); CHKERRQ(info);
//        }
//        else {
//          info = M->SetDelta(2.0 / (gnorm*gnorm)); CHKERRQ(info);
//        }
//        info = M->Reset(); CHKERRQ(info);
//        info = M->Update(X, G); CHKERRQ(info);
//        success = TAO_FALSE;
      } else {
        info=PetscInfo1(tao,"Newton Solve is descent direction, gdx %22.12e \n",gdx);
	success = TAO_TRUE;
      }

    }
    
    stepsize=1.0;	
    info = TaoLineSearchApply(tao,X,G,DX,Work,
			      &f,&f_full,&stepsize,&lsflag);
    CHKERRQ(info);

    
  }  /* END MAIN LOOP  */

  TaoFunctionReturn(0);
}
コード例 #6
0
ファイル: owlqn.c プロジェクト: OpenCMISS-Dependencies/petsc
static PetscErrorCode TaoSolve_OWLQN(Tao tao)
{
  TAO_OWLQN                    *lmP = (TAO_OWLQN *)tao->data;
  PetscReal                    f, fold, gdx, gnorm;
  PetscReal                    step = 1.0;
  PetscReal                    delta;
  PetscErrorCode               ierr;
  PetscInt                     stepType;
  PetscInt                     iter = 0;
  TaoConvergedReason           reason = TAO_CONTINUE_ITERATING;
  TaoLineSearchConvergedReason ls_status = TAOLINESEARCH_CONTINUE_ITERATING;

  PetscFunctionBegin;
  if (tao->XL || tao->XU || tao->ops->computebounds) {
    ierr = PetscPrintf(((PetscObject)tao)->comm,"WARNING: Variable bounds have been set but will be ignored by owlqn algorithm\n");CHKERRQ(ierr);
  }

  /* Check convergence criteria */
  ierr = TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient);CHKERRQ(ierr);

  ierr = VecCopy(tao->gradient, lmP->GV);CHKERRQ(ierr);

  ierr = ComputePseudoGrad_OWLQN(tao->solution,lmP->GV,lmP->lambda);CHKERRQ(ierr);

  ierr = VecNorm(lmP->GV,NORM_2,&gnorm);CHKERRQ(ierr);

  if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");

  ierr = TaoMonitor(tao, iter, f, gnorm, 0.0, step, &reason);CHKERRQ(ierr);
  if (reason != TAO_CONTINUE_ITERATING) PetscFunctionReturn(0);

  /* Set initial scaling for the function */
  if (f != 0.0) {
    delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
  } else {
    delta = 2.0 / (gnorm*gnorm);
  }
  ierr = MatLMVMSetDelta(lmP->M,delta);CHKERRQ(ierr);

  /* Set counter for gradient/reset steps */
  lmP->bfgs = 0;
  lmP->sgrad = 0;
  lmP->grad = 0;

  /* Have not converged; continue with Newton method */
  while (reason == TAO_CONTINUE_ITERATING) {
    /* Compute direction */
    ierr = MatLMVMUpdate(lmP->M,tao->solution,tao->gradient);CHKERRQ(ierr);
    ierr = MatLMVMSolve(lmP->M, lmP->GV, lmP->D);CHKERRQ(ierr);

    ierr = ProjDirect_OWLQN(lmP->D,lmP->GV);CHKERRQ(ierr);

    ++lmP->bfgs;

    /* Check for success (descent direction) */
    ierr = VecDot(lmP->D, lmP->GV , &gdx);CHKERRQ(ierr);
    if ((gdx <= 0.0) || PetscIsInfOrNanReal(gdx)) {

      /* Step is not descent or direction produced not a number
         We can assert bfgsUpdates > 1 in this case because
         the first solve produces the scaled gradient direction,
         which is guaranteed to be descent

         Use steepest descent direction (scaled) */
      ++lmP->grad;

      if (f != 0.0) {
        delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
      } else {
        delta = 2.0 / (gnorm*gnorm);
      }
      ierr = MatLMVMSetDelta(lmP->M, delta);CHKERRQ(ierr);
      ierr = MatLMVMReset(lmP->M);CHKERRQ(ierr);
      ierr = MatLMVMUpdate(lmP->M, tao->solution, tao->gradient);CHKERRQ(ierr);
      ierr = MatLMVMSolve(lmP->M,lmP->GV, lmP->D);CHKERRQ(ierr);

      ierr = ProjDirect_OWLQN(lmP->D,lmP->GV);CHKERRQ(ierr);

      lmP->bfgs = 1;
      ++lmP->sgrad;
      stepType = OWLQN_SCALED_GRADIENT;
    } else {
      if (1 == lmP->bfgs) {
        /* The first BFGS direction is always the scaled gradient */
        ++lmP->sgrad;
        stepType = OWLQN_SCALED_GRADIENT;
      } else {
        ++lmP->bfgs;
        stepType = OWLQN_BFGS;
      }
    }

    ierr = VecScale(lmP->D, -1.0);CHKERRQ(ierr);

    /* Perform the linesearch */
    fold = f;
    ierr = VecCopy(tao->solution, lmP->Xold);CHKERRQ(ierr);
    ierr = VecCopy(tao->gradient, lmP->Gold);CHKERRQ(ierr);

    ierr = TaoLineSearchApply(tao->linesearch, tao->solution, &f, lmP->GV, lmP->D, &step,&ls_status);CHKERRQ(ierr);
    ierr = TaoAddLineSearchCounts(tao);CHKERRQ(ierr);

    while (((int)ls_status < 0) && (stepType != OWLQN_GRADIENT)) {

      /* Reset factors and use scaled gradient step */
      f = fold;
      ierr = VecCopy(lmP->Xold, tao->solution);CHKERRQ(ierr);
      ierr = VecCopy(lmP->Gold, tao->gradient);CHKERRQ(ierr);
      ierr = VecCopy(tao->gradient, lmP->GV);CHKERRQ(ierr);

      ierr = ComputePseudoGrad_OWLQN(tao->solution,lmP->GV,lmP->lambda);CHKERRQ(ierr);

      switch(stepType) {
      case OWLQN_BFGS:
        /* Failed to obtain acceptable iterate with BFGS step
           Attempt to use the scaled gradient direction */

        if (f != 0.0) {
          delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
        } else {
          delta = 2.0 / (gnorm*gnorm);
        }
        ierr = MatLMVMSetDelta(lmP->M, delta);CHKERRQ(ierr);
        ierr = MatLMVMReset(lmP->M);CHKERRQ(ierr);
        ierr = MatLMVMUpdate(lmP->M, tao->solution, tao->gradient);CHKERRQ(ierr);
        ierr = MatLMVMSolve(lmP->M, lmP->GV, lmP->D);CHKERRQ(ierr);

        ierr = ProjDirect_OWLQN(lmP->D,lmP->GV);CHKERRQ(ierr);

        lmP->bfgs = 1;
        ++lmP->sgrad;
        stepType = OWLQN_SCALED_GRADIENT;
        break;

      case OWLQN_SCALED_GRADIENT:
        /* The scaled gradient step did not produce a new iterate;
           attempt to use the gradient direction.
           Need to make sure we are not using a different diagonal scaling */
        ierr = MatLMVMSetDelta(lmP->M, 1.0);CHKERRQ(ierr);
        ierr = MatLMVMReset(lmP->M);CHKERRQ(ierr);
        ierr = MatLMVMUpdate(lmP->M, tao->solution, tao->gradient);CHKERRQ(ierr);
        ierr = MatLMVMSolve(lmP->M, lmP->GV, lmP->D);CHKERRQ(ierr);

        ierr = ProjDirect_OWLQN(lmP->D,lmP->GV);CHKERRQ(ierr);

        lmP->bfgs = 1;
        ++lmP->grad;
        stepType = OWLQN_GRADIENT;
        break;
      }
      ierr = VecScale(lmP->D, -1.0);CHKERRQ(ierr);


      /* Perform the linesearch */
      ierr = TaoLineSearchApply(tao->linesearch, tao->solution, &f, lmP->GV, lmP->D, &step, &ls_status);CHKERRQ(ierr);
      ierr = TaoAddLineSearchCounts(tao);CHKERRQ(ierr);
    }

    if ((int)ls_status < 0) {
      /* Failed to find an improving point*/
      f = fold;
      ierr = VecCopy(lmP->Xold, tao->solution);CHKERRQ(ierr);
      ierr = VecCopy(lmP->Gold, tao->gradient);CHKERRQ(ierr);
      ierr = VecCopy(tao->gradient, lmP->GV);CHKERRQ(ierr);
      step = 0.0;
    } else {
      /* a little hack here, because that gv is used to store g */
      ierr = VecCopy(lmP->GV, tao->gradient);CHKERRQ(ierr);
    }

    ierr = ComputePseudoGrad_OWLQN(tao->solution,lmP->GV,lmP->lambda);CHKERRQ(ierr);

    /* Check for termination */

    ierr = VecNorm(lmP->GV,NORM_2,&gnorm);CHKERRQ(ierr);

    iter++;
    ierr = TaoMonitor(tao,iter,f,gnorm,0.0,step,&reason);CHKERRQ(ierr);

    if ((int)ls_status < 0) break;
  }
  PetscFunctionReturn(0);
}
コード例 #7
0
ファイル: gpcg.c プロジェクト: fuentesdt/tao-1.10.1-p3
static int TaoSolve_GPCG(TAO_SOLVER tao, void *solver)
{
  TAO_GPCG *gpcg = (TAO_GPCG *)solver ;
  int info;
  TaoInt lsflag,iter=0;
  TaoTruth optimal_face=TAO_FALSE,success;
  double actred,f,f_new,f_full,gnorm,gdx,stepsize;
  double c;
  TaoVec *XU, *XL;
  TaoVec *X,  *G=gpcg->G , *B=gpcg->B, *PG=gpcg->PG;
  TaoVec *R=gpcg->R, *DXFree=gpcg->DXFree;
  TaoVec *G_New=gpcg->G_New;
  TaoVec *DX=gpcg->DX, *Work=gpcg->Work;
  TaoMat *H, *Hsub=gpcg->Hsub;
  TaoIndexSet *Free_Local = gpcg->Free_Local, *TIS=gpcg->TT;
  TaoTerminateReason reason;

  TaoFunctionBegin;

  /* Check if upper bound greater than lower bound. */
  info = TaoGetSolution(tao,&X);CHKERRQ(info);
  info = TaoGetHessian(tao,&H);CHKERRQ(info);

  info = TaoGetVariableBounds(tao,&XL,&XU);CHKERRQ(info);
  info = TaoEvaluateVariableBounds(tao,XL,XU); CHKERRQ(info);
  info = X->Median(XL,X,XU); CHKERRQ(info);

  info = TaoComputeHessian(tao,X,H); CHKERRQ(info);
  info = TaoComputeFunctionGradient(tao,X,&f,B);
  CHKERRQ(info);

  /* Compute quadratic representation */
  info = H->Multiply(X,Work); CHKERRQ(info);
  info = X->Dot(Work,&c); CHKERRQ(info);
  info = B->Axpy(-1.0,Work); CHKERRQ(info);
  info = X->Dot(B,&stepsize); CHKERRQ(info);
  gpcg->c=f-c/2.0-stepsize;

  info = Free_Local->WhichBetween(XL,X,XU); CHKERRQ(info);
  
  info = TaoGPCGComputeFunctionGradient(tao, X, &gpcg->f , G);
  
  /* Project the gradient and calculate the norm */
  info = G_New->CopyFrom(G);CHKERRQ(info);
  info = PG->BoundGradientProjection(G,XL,X,XU);CHKERRQ(info);
  info = PG->Norm2(&gpcg->gnorm); CHKERRQ(info);
  gpcg->step=1.0;

    /* Check Stopping Condition      */
  info=TaoMonitor(tao,iter++,gpcg->f,gpcg->gnorm,0,gpcg->step,&reason); CHKERRQ(info);

  while (reason == TAO_CONTINUE_ITERATING){

    info = TaoGradProjections(tao, gpcg); CHKERRQ(info);

    info = Free_Local->WhichBetween(XL,X,XU); CHKERRQ(info);
    info = Free_Local->GetSize(&gpcg->n_free); CHKERRQ(info);
    f=gpcg->f; gnorm=gpcg->gnorm; 

    if (gpcg->n_free > 0){
      
      /* Create a reduced linear system */
      info = R->SetReducedVec(G,Free_Local);CHKERRQ(info);
      info = R->Negate(); CHKERRQ(info);
      info = DXFree->SetReducedVec(DX,Free_Local);CHKERRQ(info);
      info = DXFree->SetToZero(); CHKERRQ(info);

      info = Hsub->SetReducedMatrix(H,Free_Local,Free_Local);CHKERRQ(info);

      info = TaoPreLinearSolve(tao,Hsub);CHKERRQ(info);

      /* Approximately solve the reduced linear system */
      info = TaoLinearSolve(tao,Hsub,R,DXFree,&success);CHKERRQ(info);
      
      info=DX->SetToZero(); CHKERRQ(info);
      info=DX->ReducedXPY(DXFree,Free_Local);CHKERRQ(info);
      
      info = G->Dot(DX,&gdx); CHKERRQ(info);
      
      stepsize=1.0; f_new=f;
      info = TaoLineSearchApply(tao,X,G,DX,Work,
				&f_new,&f_full,&stepsize,&lsflag);
      CHKERRQ(info);
      
      actred = f_new - f;
      
      /* Evaluate the function and gradient at the new point */      
      info =  PG->BoundGradientProjection(G,XL,X,XU);
      CHKERRQ(info);
      info = PG->Norm2(&gnorm);  CHKERRQ(info);      
      f=f_new;
      
      info = GPCGCheckOptimalFace(X,XL,XU,PG,Work, Free_Local, TIS,
				  &optimal_face); CHKERRQ(info);
      
    } else {
      
      actred = 0; stepsize=1.0;
      /* if there were no free variables, no cg method */

    }

    info = TaoMonitor(tao,iter,f,gnorm,0.0,stepsize,&reason); CHKERRQ(info);
    gpcg->f=f;gpcg->gnorm=gnorm; gpcg->actred=actred;
    if (reason!=TAO_CONTINUE_ITERATING) break;
    iter++;


  }  /* END MAIN LOOP  */

  TaoFunctionReturn(0);
}
コード例 #8
0
ファイル: ssfls.c プロジェクト: fuentesdt/tao-1.10.1-p3
static int TaoSolve_SSFLS(TAO_SOLVER tao, void *solver)
{
  TAO_SSLS *ssls = (TAO_SSLS *)solver;
  //  TaoLinearSolver *lsolver;
  TaoVec *x, *l, *u, *ff, *dpsi, *d, *w;
  TaoMat *J;
  double psi, psi_full, ndpsi, normd, innerd, t=0;
  double delta, rho;
  int iter=0, info;
  TaoTerminateReason reason;
  TaoTruth flag;

  TaoFunctionBegin;

  // Assume that Setup has been called!
  // Set the structure for the Jacobian and create a linear solver.
 
  delta = ssls->delta;
  rho = ssls->rho;

  info = TaoGetSolution(tao, &x); CHKERRQ(info);
  l=ssls->xl;
  u=ssls->xu;
  info = TaoEvaluateVariableBounds(tao,l,u); CHKERRQ(info);
  info = x->Median(l,x,u); CHKERRQ(info);
  info = TaoGetJacobian(tao, &J); CHKERRQ(info);

  ff = ssls->ff;
  dpsi = ssls->dpsi;
  d = ssls->d;
  w = ssls->w;

  info = x->PointwiseMaximum(x, l); CHKERRQ(info);
  info = x->PointwiseMinimum(x, u); CHKERRQ(info);
  info = TaoSetMeritFunction(tao, Tao_SSLS_Function, Tao_SSLS_FunctionGradient,
			     TAO_NULL, TAO_NULL, TAO_NULL, ssls); CHKERRQ(info);

  // Calculate the function value and fischer function value at the 
  // current iterate
  info = TaoComputeMeritFunctionGradient(tao, x, &psi, dpsi); CHKERRQ(info);
  info = dpsi->Norm2(&ndpsi);

  while (1) {
    info=PetscInfo3(tao, "TaoSolve_SSFLS: %d: merit: %5.4e, ndpsi: %5.4e\n",
		       iter, ssls->merit, ndpsi);CHKERRQ(info);

    // Check the termination criteria
    info = TaoMonitor(tao,iter++,ssls->merit,ndpsi,0.0,t,&reason); 
           CHKERRQ(info);
    if (reason!=TAO_CONTINUE_ITERATING) break;

    // Calculate direction.  (Really negative of newton direction.  Therefore,
    // rest of the code uses -d.)
    info = TaoPreLinearSolve(tao, J); CHKERRQ(info);
    info = TaoLinearSolve(tao, J, ff, d, &flag); CHKERRQ(info);
    
    info = w->CopyFrom(d); CHKERRQ(info);
    info = w->Negate(); CHKERRQ(info);
    info = w->BoundGradientProjection(w,l, x, u);

    info = w->Norm2(&normd); CHKERRQ(info);
    info = w->Dot(dpsi, &innerd); CHKERRQ(info);

    // Make sure that we have a descent direction
    if (innerd >= -delta*pow(normd, rho)) {
      info = PetscInfo1(tao, "TaoSolve_SSFLS: %d: newton direction not descent\n", iter); CHKERRQ(info);
      info = d->CopyFrom(dpsi); CHKERRQ(info);
      info = w->Dot(dpsi, &innerd); CHKERRQ(info);
    }
    info = d->Negate(); CHKERRQ(info);
    innerd = -innerd;

    t = 1;
    info = TaoLineSearchApply(tao, x, dpsi, d, w, 
                              &psi, &psi_full, &t, &tao->lsflag); CHKERRQ(info);
    info = dpsi->Norm2(&ndpsi);
  }
  TaoFunctionReturn(0);
}
コード例 #9
0
ファイル: tron.c プロジェクト: plguhur/petsc
static PetscErrorCode TaoSolve_TRON(Tao tao)
{
  TAO_TRON                     *tron = (TAO_TRON *)tao->data;
  PetscErrorCode               ierr;
  PetscInt                     its;
  TaoConvergedReason           reason = TAO_CONTINUE_ITERATING;
  TaoLineSearchConvergedReason ls_reason = TAOLINESEARCH_CONTINUE_ITERATING;
  PetscReal                    prered,actred,delta,f,f_new,rhok,gdx,xdiff,stepsize;

  PetscFunctionBegin;
  tron->pgstepsize=1.0;
  tao->trust = tao->trust0;
  /*   Project the current point onto the feasible set */
  ierr = TaoComputeVariableBounds(tao);CHKERRQ(ierr);
  ierr = VecMedian(tao->XL,tao->solution,tao->XU,tao->solution);CHKERRQ(ierr);
  ierr = TaoLineSearchSetVariableBounds(tao->linesearch,tao->XL,tao->XU);CHKERRQ(ierr);

  ierr = TaoComputeObjectiveAndGradient(tao,tao->solution,&tron->f,tao->gradient);CHKERRQ(ierr);
  ierr = ISDestroy(&tron->Free_Local);CHKERRQ(ierr);

  ierr = VecWhichBetween(tao->XL,tao->solution,tao->XU,&tron->Free_Local);CHKERRQ(ierr);

  /* Project the gradient and calculate the norm */
  ierr = VecBoundGradientProjection(tao->gradient,tao->solution, tao->XL, tao->XU, tao->gradient);CHKERRQ(ierr);
  ierr = VecNorm(tao->gradient,NORM_2,&tron->gnorm);CHKERRQ(ierr);

  if (PetscIsInfOrNanReal(tron->f) || PetscIsInfOrNanReal(tron->gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf pr NaN");
  if (tao->trust <= 0) {
    tao->trust=PetscMax(tron->gnorm*tron->gnorm,1.0);
  }

  tron->stepsize=tao->trust;
  ierr = TaoMonitor(tao, tao->niter, tron->f, tron->gnorm, 0.0, tron->stepsize, &reason);CHKERRQ(ierr);
  while (reason==TAO_CONTINUE_ITERATING){
    tao->ksp_its=0;
    ierr = TronGradientProjections(tao,tron);CHKERRQ(ierr);
    f=tron->f; delta=tao->trust;
    tron->n_free_last = tron->n_free;
    ierr = TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);CHKERRQ(ierr);

    ierr = ISGetSize(tron->Free_Local, &tron->n_free);CHKERRQ(ierr);

    /* If no free variables */
    if (tron->n_free == 0) {
      actred=0;
      ierr = PetscInfo(tao,"No free variables in tron iteration.\n");CHKERRQ(ierr);
      ierr = VecNorm(tao->gradient,NORM_2,&tron->gnorm);CHKERRQ(ierr);
      ierr = TaoMonitor(tao, tao->niter, tron->f, tron->gnorm, 0.0, delta, &reason);CHKERRQ(ierr);
      if (!reason) {
        reason = TAO_CONVERGED_STEPTOL;
        ierr = TaoSetConvergedReason(tao,reason);CHKERRQ(ierr);
      }

      break;

    }
    /* use free_local to mask/submat gradient, hessian, stepdirection */
    ierr = TaoVecGetSubVec(tao->gradient,tron->Free_Local,tao->subset_type,0.0,&tron->R);CHKERRQ(ierr);
    ierr = TaoVecGetSubVec(tao->gradient,tron->Free_Local,tao->subset_type,0.0,&tron->DXFree);CHKERRQ(ierr);
    ierr = VecSet(tron->DXFree,0.0);CHKERRQ(ierr);
    ierr = VecScale(tron->R, -1.0);CHKERRQ(ierr);
    ierr = TaoMatGetSubMat(tao->hessian, tron->Free_Local, tron->diag, tao->subset_type, &tron->H_sub);CHKERRQ(ierr);
    if (tao->hessian == tao->hessian_pre) {
      ierr = MatDestroy(&tron->Hpre_sub);CHKERRQ(ierr);
      ierr = PetscObjectReference((PetscObject)(tron->H_sub));CHKERRQ(ierr);
      tron->Hpre_sub = tron->H_sub;
    } else {
      ierr = TaoMatGetSubMat(tao->hessian_pre, tron->Free_Local, tron->diag, tao->subset_type,&tron->Hpre_sub);CHKERRQ(ierr);
    }
    ierr = KSPReset(tao->ksp);CHKERRQ(ierr);
    ierr = KSPSetOperators(tao->ksp, tron->H_sub, tron->Hpre_sub);CHKERRQ(ierr);
    while (1) {

      /* Approximately solve the reduced linear system */
      ierr = KSPSTCGSetRadius(tao->ksp,delta);CHKERRQ(ierr);

      ierr = KSPSolve(tao->ksp, tron->R, tron->DXFree);CHKERRQ(ierr);
      ierr = KSPGetIterationNumber(tao->ksp,&its);CHKERRQ(ierr);
      tao->ksp_its+=its;
      tao->ksp_tot_its+=its;
      ierr = VecSet(tao->stepdirection,0.0);CHKERRQ(ierr);

      /* Add dxfree matrix to compute step direction vector */
      ierr = VecISAXPY(tao->stepdirection,tron->Free_Local,1.0,tron->DXFree);CHKERRQ(ierr);
      if (0) {
        PetscReal rhs,stepnorm;
        ierr = VecNorm(tron->R,NORM_2,&rhs);CHKERRQ(ierr);
        ierr = VecNorm(tron->DXFree,NORM_2,&stepnorm);CHKERRQ(ierr);
        ierr = PetscPrintf(PETSC_COMM_WORLD,"|rhs|=%g\t|s|=%g\n",(double)rhs,(double)stepnorm);CHKERRQ(ierr);
      }


      ierr = VecDot(tao->gradient, tao->stepdirection, &gdx);CHKERRQ(ierr);
      ierr = PetscInfo1(tao,"Expected decrease in function value: %14.12e\n",(double)gdx);CHKERRQ(ierr);

      ierr = VecCopy(tao->solution, tron->X_New);CHKERRQ(ierr);
      ierr = VecCopy(tao->gradient, tron->G_New);CHKERRQ(ierr);

      stepsize=1.0;f_new=f;

      ierr = TaoLineSearchSetInitialStepLength(tao->linesearch,1.0);CHKERRQ(ierr);
      ierr = TaoLineSearchApply(tao->linesearch, tron->X_New, &f_new, tron->G_New, tao->stepdirection,&stepsize,&ls_reason);CHKERRQ(ierr);CHKERRQ(ierr);
      ierr = TaoAddLineSearchCounts(tao);CHKERRQ(ierr);

      ierr = MatMult(tao->hessian, tao->stepdirection, tron->Work);CHKERRQ(ierr);
      ierr = VecAYPX(tron->Work, 0.5, tao->gradient);CHKERRQ(ierr);
      ierr = VecDot(tao->stepdirection, tron->Work, &prered);CHKERRQ(ierr);
      actred = f_new - f;
      if (actred<0) {
        rhok=PetscAbs(-actred/prered);
      } else {
        rhok=0.0;
      }

      /* Compare actual improvement to the quadratic model */
      if (rhok > tron->eta1) { /* Accept the point */
        /* d = x_new - x */
        ierr = VecCopy(tron->X_New, tao->stepdirection);CHKERRQ(ierr);
        ierr = VecAXPY(tao->stepdirection, -1.0, tao->solution);CHKERRQ(ierr);

        ierr = VecNorm(tao->stepdirection, NORM_2, &xdiff);CHKERRQ(ierr);
        xdiff *= stepsize;

        /* Adjust trust region size */
        if (rhok < tron->eta2 ){
          delta = PetscMin(xdiff,delta)*tron->sigma1;
        } else if (rhok > tron->eta4 ){
          delta= PetscMin(xdiff,delta)*tron->sigma3;
        } else if (rhok > tron->eta3 ){
          delta=PetscMin(xdiff,delta)*tron->sigma2;
        }
        ierr = VecBoundGradientProjection(tron->G_New,tron->X_New, tao->XL, tao->XU, tao->gradient);CHKERRQ(ierr);
        if (tron->Free_Local) {
          ierr = ISDestroy(&tron->Free_Local);CHKERRQ(ierr);
        }
        ierr = VecWhichBetween(tao->XL, tron->X_New, tao->XU, &tron->Free_Local);CHKERRQ(ierr);
        f=f_new;
        ierr = VecNorm(tao->gradient,NORM_2,&tron->gnorm);CHKERRQ(ierr);
        ierr = VecCopy(tron->X_New, tao->solution);CHKERRQ(ierr);
        ierr = VecCopy(tron->G_New, tao->gradient);CHKERRQ(ierr);
        break;
      }
      else if (delta <= 1e-30) {
        break;
      }
      else {
        delta /= 4.0;
      }
    } /* end linear solve loop */


    tron->f=f; tron->actred=actred; tao->trust=delta;
    tao->niter++;
    ierr = TaoMonitor(tao, tao->niter, tron->f, tron->gnorm, 0.0, delta, &reason);CHKERRQ(ierr);
  }  /* END MAIN LOOP  */

  PetscFunctionReturn(0);
}
コード例 #10
0
ファイル: sqpcon.c プロジェクト: 00liujj/petsc
static PetscErrorCode TaoSolve_SQPCON(Tao tao)
{
  TAO_SQPCON                   *sqpconP = (TAO_SQPCON*)tao->data;
  PetscInt                     iter=0;
  TaoConvergedReason           reason = TAO_CONTINUE_ITERATING;
  TaoLineSearchConvergedReason ls_reason = TAOLINESEARCH_CONTINUE_ITERATING;
  PetscReal                    step=1.0,f,fm, fold;
  PetscReal                    cnorm, mnorm;
  PetscBool                    use_update=PETSC_TRUE; /*  don't update Q if line search failed */
  PetscErrorCode               ierr;

  PetscFunctionBegin;
  /* Scatter to U,V */
  ierr = VecScatterBegin(sqpconP->state_scatter, tao->solution, sqpconP->U, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
  ierr = VecScatterEnd(sqpconP->state_scatter, tao->solution, sqpconP->U, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
  ierr = VecScatterBegin(sqpconP->design_scatter, tao->solution, sqpconP->V, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
  ierr = VecScatterEnd(sqpconP->design_scatter, tao->solution, sqpconP->V, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);

  /* Evaluate Function, Gradient, Constraints, and Jacobian */
  ierr = TaoComputeObjectiveAndGradient(tao,tao->solution,&f,tao->gradient);CHKERRQ(ierr);
  ierr = TaoComputeConstraints(tao,tao->solution, tao->constraints);CHKERRQ(ierr);
  ierr = TaoComputeJacobianState(tao,tao->solution, &tao->jacobian_state, &tao->jacobian_state_pre, &tao->jacobian_state_inv, &sqpconP->statematflag);CHKERRQ(ierr);
  ierr = TaoComputeJacobianDesign(tao,tao->solution, &tao->jacobian_design, &tao->jacobian_design_pre, &sqpconP->statematflag);CHKERRQ(ierr);

  /* Scatter gradient to GU,GV */
  ierr = VecScatterBegin(sqpconP->state_scatter, tao->gradient, sqpconP->GU, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
  ierr = VecScatterEnd(sqpconP->state_scatter, tao->gradient, sqpconP->GU, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
  ierr = VecScatterBegin(sqpconP->design_scatter, tao->gradient, sqpconP->GV, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
  ierr = VecScatterEnd(sqpconP->design_scatter, tao->gradient, sqpconP->GV, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
  ierr = VecNorm(tao->gradient, NORM_2, &mnorm);CHKERRQ(ierr);

  /* Evaluate constraint norm */
  ierr = VecNorm(tao->constraints, NORM_2, &cnorm);CHKERRQ(ierr);

  /* Monitor convergence */
  ierr = TaoMonitor(tao, iter,f,mnorm,cnorm,step,&reason);CHKERRQ(ierr);

  while (reason == TAO_CONTINUE_ITERATING) {
    /* Solve tbar = -A\t (t is constraints vector) */
    ierr = MatMult(tao->jacobian_state_inv, tao->constraints, sqpconP->Tbar);CHKERRQ(ierr);
    ierr = VecScale(sqpconP->Tbar, -1.0);CHKERRQ(ierr);

    /* aqwac =  A'\(Q*Tbar + c) */
    if (iter > 0) {
      ierr = MatMult(sqpconP->Q,sqpconP->Tbar,sqpconP->WV);CHKERRQ(ierr);
    } else {
      ierr = VecCopy(sqpconP->Tbar, sqpconP->WV);CHKERRQ(ierr);
    }
    ierr = VecAXPY(sqpconP->WV,1.0,sqpconP->GU);CHKERRQ(ierr);

    ierr = MatMultTranspose(tao->jacobian_state_inv, sqpconP->WV, sqpconP->aqwac);CHKERRQ(ierr);

    /* Reduced Gradient dbar = d -  B^t * aqwac */
    ierr = MatMultTranspose(tao->jacobian_design,sqpconP->aqwac, sqpconP->dbar);CHKERRQ(ierr);
    ierr = VecScale(sqpconP->dbar, -1.0);CHKERRQ(ierr);
    ierr = VecAXPY(sqpconP->dbar,1.0,sqpconP->GV);CHKERRQ(ierr);

    /* update reduced hessian */
    ierr = MatLMVMUpdate(sqpconP->R, sqpconP->V, sqpconP->dbar);CHKERRQ(ierr);

    /* Solve R*dv = -dbar using approx. hessian */
    ierr = MatLMVMSolve(sqpconP->R, sqpconP->dbar, sqpconP->DV);CHKERRQ(ierr);
    ierr = VecScale(sqpconP->DV, -1.0);CHKERRQ(ierr);

    /* Backsolve for u =  A\(g - B*dv)  = tbar - A\(B*dv)*/
    ierr = MatMult(tao->jacobian_design, sqpconP->DV, sqpconP->WL);CHKERRQ(ierr);
    ierr = MatMult(tao->jacobian_state_inv, sqpconP->WL, sqpconP->DU);CHKERRQ(ierr);
    ierr = VecScale(sqpconP->DU, -1.0);CHKERRQ(ierr);
    ierr = VecAXPY(sqpconP->DU, 1.0, sqpconP->Tbar);CHKERRQ(ierr);

    /* Assemble Big D */
    ierr = VecScatterBegin(sqpconP->state_scatter, sqpconP->DU, tao->stepdirection, INSERT_VALUES, SCATTER_REVERSE);CHKERRQ(ierr);
    ierr = VecScatterEnd(sqpconP->state_scatter, sqpconP->DU, tao->stepdirection, INSERT_VALUES, SCATTER_REVERSE);CHKERRQ(ierr);
    ierr = VecScatterBegin(sqpconP->design_scatter, sqpconP->DV, tao->stepdirection, INSERT_VALUES, SCATTER_REVERSE);CHKERRQ(ierr);
    ierr = VecScatterEnd(sqpconP->design_scatter, sqpconP->DV, tao->stepdirection, INSERT_VALUES, SCATTER_REVERSE);CHKERRQ(ierr);

    /* Perform Line Search */
    ierr = VecCopy(tao->solution, sqpconP->Xold);CHKERRQ(ierr);
    ierr = VecCopy(tao->gradient, sqpconP->Gold);CHKERRQ(ierr);
    fold = f;
    ierr = TaoLineSearchComputeObjectiveAndGradient(tao->linesearch,tao->solution,&fm,sqpconP->GL);CHKERRQ(ierr);
    ierr = TaoLineSearchSetInitialStepLength(tao->linesearch,1.0);
    ierr = TaoLineSearchApply(tao->linesearch, tao->solution, &fm, sqpconP->GL, tao->stepdirection,&step, &ls_reason);CHKERRQ(ierr);
    ierr = TaoAddLineSearchCounts(tao);CHKERRQ(ierr);
    if (ls_reason < 0) {
      ierr = VecCopy(sqpconP->Xold, tao->solution);
      ierr = VecCopy(sqpconP->Gold, tao->gradient);
      f = fold;
      ierr = VecAXPY(tao->solution, 1.0, tao->stepdirection);CHKERRQ(ierr);
      ierr = PetscInfo(tao,"Line Search Failed, using full step.");CHKERRQ(ierr);
      use_update=PETSC_FALSE;
    } else {
      use_update = PETSC_TRUE;
    }

    /* Scatter X to U,V */
    ierr = VecScatterBegin(sqpconP->state_scatter, tao->solution, sqpconP->U, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
    ierr = VecScatterEnd(sqpconP->state_scatter, tao->solution, sqpconP->U, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
    ierr = VecScatterBegin(sqpconP->design_scatter, tao->solution, sqpconP->V, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
    ierr = VecScatterEnd(sqpconP->design_scatter, tao->solution, sqpconP->V, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);


    /* Evaluate Function, Gradient, Constraints, and Jacobian */
    ierr = TaoComputeObjectiveAndGradient(tao,tao->solution,&f,tao->gradient);CHKERRQ(ierr);
    ierr = TaoComputeConstraints(tao,tao->solution, tao->constraints);CHKERRQ(ierr);
    ierr = TaoComputeJacobianState(tao,tao->solution, &tao->jacobian_state, &tao->jacobian_state_pre, &tao->jacobian_state_inv, &sqpconP->statematflag);CHKERRQ(ierr);
    ierr = TaoComputeJacobianDesign(tao,tao->solution, &tao->jacobian_design, &tao->jacobian_design_pre, &sqpconP->designmatflag);CHKERRQ(ierr);

    /* Scatter gradient to GU,GV */
    ierr = VecScatterBegin(sqpconP->state_scatter, tao->gradient, sqpconP->GU, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
    ierr = VecScatterEnd(sqpconP->state_scatter, tao->gradient, sqpconP->GU, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
    ierr = VecScatterBegin(sqpconP->design_scatter, tao->gradient, sqpconP->GV, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);
    ierr = VecScatterEnd(sqpconP->design_scatter, tao->gradient, sqpconP->GV, INSERT_VALUES, SCATTER_FORWARD);CHKERRQ(ierr);

    /* Update approx to hessian of the Lagrangian wrt state (Q)
          with u_k+1, gu_k+1 */
    if (use_update) {
      ierr = MatApproxUpdate(sqpconP->Q,sqpconP->U,sqpconP->GU);CHKERRQ(ierr);
    }
    ierr = VecNorm(sqpconP->GL, NORM_2, &mnorm);CHKERRQ(ierr);

    /* Evaluate constraint norm */
    ierr = VecNorm(tao->constraints, NORM_2, &cnorm);CHKERRQ(ierr);

    /* Monitor convergence */
    iter++;
    ierr = TaoMonitor(tao, iter,f,mnorm,cnorm,step,&reason);CHKERRQ(ierr);
  }
  PetscFunctionReturn(0);
}
コード例 #11
0
ファイル: gpcg.c プロジェクト: pombredanne/petsc
static PetscErrorCode TaoSolve_GPCG(Tao tao)
{
  TAO_GPCG                     *gpcg = (TAO_GPCG *)tao->data;
  PetscErrorCode               ierr;
  PetscInt                     its;
  PetscReal                    actred,f,f_new,gnorm,gdx,stepsize,xtb;
  PetscReal                    xtHx;
  TaoConvergedReason           reason = TAO_CONTINUE_ITERATING;
  TaoLineSearchConvergedReason ls_status = TAOLINESEARCH_CONTINUE_ITERATING;

  PetscFunctionBegin;

  ierr = TaoComputeVariableBounds(tao);CHKERRQ(ierr);
  ierr = VecMedian(tao->XL,tao->solution,tao->XU,tao->solution);CHKERRQ(ierr);
  ierr = TaoLineSearchSetVariableBounds(tao->linesearch,tao->XL,tao->XU);CHKERRQ(ierr);

  /* Using f = .5*x'Hx + x'b + c and g=Hx + b,  compute b,c */
  ierr = TaoComputeHessian(tao,tao->solution,tao->hessian,tao->hessian_pre);CHKERRQ(ierr);
  ierr = TaoComputeObjectiveAndGradient(tao,tao->solution,&f,tao->gradient);CHKERRQ(ierr);
  ierr = VecCopy(tao->gradient, gpcg->B);CHKERRQ(ierr);
  ierr = MatMult(tao->hessian,tao->solution,gpcg->Work);CHKERRQ(ierr);
  ierr = VecDot(gpcg->Work, tao->solution, &xtHx);CHKERRQ(ierr);
  ierr = VecAXPY(gpcg->B,-1.0,gpcg->Work);CHKERRQ(ierr);
  ierr = VecDot(gpcg->B,tao->solution,&xtb);CHKERRQ(ierr);
  gpcg->c=f-xtHx/2.0-xtb;
  if (gpcg->Free_Local) {
      ierr = ISDestroy(&gpcg->Free_Local);CHKERRQ(ierr);
  }
  ierr = VecWhichBetween(tao->XL,tao->solution,tao->XU,&gpcg->Free_Local);CHKERRQ(ierr);

  /* Project the gradient and calculate the norm */
  ierr = VecCopy(tao->gradient,gpcg->G_New);CHKERRQ(ierr);
  ierr = VecBoundGradientProjection(tao->gradient,tao->solution,tao->XL,tao->XU,gpcg->PG);CHKERRQ(ierr);
  ierr = VecNorm(gpcg->PG,NORM_2,&gpcg->gnorm);CHKERRQ(ierr);
  tao->step=1.0;
  gpcg->f = f;

    /* Check Stopping Condition      */
  ierr=TaoMonitor(tao,tao->niter,f,gpcg->gnorm,0.0,tao->step,&reason);CHKERRQ(ierr);

  while (reason == TAO_CONTINUE_ITERATING){
    tao->ksp_its=0;

    ierr = GPCGGradProjections(tao);CHKERRQ(ierr);
    ierr = ISGetSize(gpcg->Free_Local,&gpcg->n_free);CHKERRQ(ierr);

    f=gpcg->f; gnorm=gpcg->gnorm;

    ierr = KSPReset(tao->ksp);CHKERRQ(ierr);

    if (gpcg->n_free > 0){
      /* Create a reduced linear system */
      ierr = VecDestroy(&gpcg->R);CHKERRQ(ierr);
      ierr = VecDestroy(&gpcg->DXFree);CHKERRQ(ierr);
      ierr = TaoVecGetSubVec(tao->gradient,gpcg->Free_Local, tao->subset_type, 0.0, &gpcg->R);CHKERRQ(ierr);
      ierr = VecScale(gpcg->R, -1.0);CHKERRQ(ierr);
      ierr = TaoVecGetSubVec(tao->stepdirection,gpcg->Free_Local,tao->subset_type, 0.0, &gpcg->DXFree);CHKERRQ(ierr);
      ierr = VecSet(gpcg->DXFree,0.0);CHKERRQ(ierr);

      ierr = TaoMatGetSubMat(tao->hessian, gpcg->Free_Local, gpcg->Work, tao->subset_type, &gpcg->Hsub);CHKERRQ(ierr);

      if (tao->hessian_pre == tao->hessian) {
        ierr = MatDestroy(&gpcg->Hsub_pre);CHKERRQ(ierr);
        ierr = PetscObjectReference((PetscObject)gpcg->Hsub);CHKERRQ(ierr);
        gpcg->Hsub_pre = gpcg->Hsub;
      }  else {
        ierr = TaoMatGetSubMat(tao->hessian, gpcg->Free_Local, gpcg->Work, tao->subset_type, &gpcg->Hsub_pre);CHKERRQ(ierr);
      }

      ierr = KSPReset(tao->ksp);CHKERRQ(ierr);
      ierr = KSPSetOperators(tao->ksp,gpcg->Hsub,gpcg->Hsub_pre);CHKERRQ(ierr);

      ierr = KSPSolve(tao->ksp,gpcg->R,gpcg->DXFree);CHKERRQ(ierr);
      ierr = KSPGetIterationNumber(tao->ksp,&its);CHKERRQ(ierr);
      tao->ksp_its+=its;
      tao->ksp_tot_its+=its;
      ierr = VecSet(tao->stepdirection,0.0);CHKERRQ(ierr);
      ierr = VecISAXPY(tao->stepdirection,gpcg->Free_Local,1.0,gpcg->DXFree);CHKERRQ(ierr);

      ierr = VecDot(tao->stepdirection,tao->gradient,&gdx);CHKERRQ(ierr);
      ierr = TaoLineSearchSetInitialStepLength(tao->linesearch,1.0);CHKERRQ(ierr);
      f_new=f;
      ierr = TaoLineSearchApply(tao->linesearch,tao->solution,&f_new,tao->gradient,tao->stepdirection,&stepsize,&ls_status);CHKERRQ(ierr);

      actred = f_new - f;

      /* Evaluate the function and gradient at the new point */
      ierr = VecBoundGradientProjection(tao->gradient,tao->solution,tao->XL,tao->XU, gpcg->PG);CHKERRQ(ierr);
      ierr = VecNorm(gpcg->PG, NORM_2, &gnorm);CHKERRQ(ierr);
      f=f_new;
      ierr = ISDestroy(&gpcg->Free_Local);CHKERRQ(ierr);
      ierr = VecWhichBetween(tao->XL,tao->solution,tao->XU,&gpcg->Free_Local);CHKERRQ(ierr);
    } else {
      actred = 0; gpcg->step=1.0;
      /* if there were no free variables, no cg method */
    }

    tao->niter++;
    ierr = TaoMonitor(tao,tao->niter,f,gnorm,0.0,gpcg->step,&reason);CHKERRQ(ierr);
    gpcg->f=f;gpcg->gnorm=gnorm; gpcg->actred=actred;
    if (reason!=TAO_CONTINUE_ITERATING) break;
  }  /* END MAIN LOOP  */

  PetscFunctionReturn(0);
}
コード例 #12
0
ファイル: bcg.c プロジェクト: fuentesdt/tao-1.10.1-p3
static int TaoSolve_BCG(TAO_SOLVER tao, void *cgptr)
{
  TAO_BCG *cg = (TAO_BCG *) cgptr;
  TaoVec*    X,*G; /* solution vector,  gradient vector */
  TaoVec*    Gprev=cg->Gprev, *GP=cg->GP;
  TaoVec*    DX=cg->DX, *Work=cg->Work;
  TaoVec  *XL,*XU;
  int    iter=0,lsflag=0,info;
  double gnorm2Prev,gdotgprev,gdx;
  double zero=0.0, minus_one = -1.0;
  double f_old,f,gnorm2,step=0;
  TaoTerminateReason reason;

  TaoFunctionBegin;
  info=TaoGetSolution(tao,&X);CHKERRQ(info);
  info=TaoGetGradient(tao,&G);CHKERRQ(info);

  info = TaoGetVariableBounds(tao,&XL,&XU); CHKERRQ(info);
  info = X->median(XL,X,XU); CHKERRQ(info);

  info = TaoComputeFunctionGradient(tao,X,&f,G);CHKERRQ(info);
  info = GP->boundGradientProjection(G,XL,X,XU); CHKERRQ(info);
  info = GP->norm2squared(&gnorm2);  CHKERRQ(info);

  info = DX->setToZero(); CHKERRQ(info); 
  info = Gprev->copyFrom(GP); CHKERRQ(info);

  cg->restarts=0;
  gnorm2Prev = gnorm2;

  /* Enter loop */
  while (1){

    /* Test for convergence */
    info = TaoMonitor(tao,iter++,f,gnorm2,0.0,step,&reason);CHKERRQ(info);
    if (reason!=TAO_CONTINUE_ITERATING) break;

    /* Determine beta, depending on method */
    info = GP->dot(Gprev,&gdotgprev); CHKERRQ(info);
    if (cg->type==TAO_CG_FletcherReeves){
      cg->beta=(gnorm2)/(gnorm2Prev);
    } else if (cg->type==TAO_CG_PolakRibiere){
      cg->beta=( (gnorm2)-gdotgprev )/(gnorm2Prev);
    } else {
      cg->beta=( (gnorm2)-gdotgprev )/(gnorm2Prev);
      if (cg->beta<0.0){
	cg->beta=0.0;
      }
    }

    /* Employ occasional restarts when successive gradients not orthogonal */
    if ( fabs(gdotgprev)/(gnorm2) > cg->eta || iter==0){ 
      printf("RESTART Beta: %4.2e\n",cg->beta);
      cg->beta=0.0;
    }

    if (cg->beta==0){
      cg->restarts++;
      PLogInfo(tao,"TaoCG: Restart CG at iterate %d with gradient direction.\n",tao->iter);
    }
    info = DX->scale(cg->beta); CHKERRQ(info);

    info = DX->negate(); CHKERRQ(info);
    info = DX->boundGradientProjection(DX,XL,X,XU); CHKERRQ(info);
    info = DX->negate(); CHKERRQ(info);

    info = DX->Axpy(minus_one,G); CHKERRQ(info);

    info = Gprev->copyFrom(GP); CHKERRQ(info);
    gnorm2Prev = gnorm2;

    info = Work->copyFrom(DX); CHKERRQ(info);
    info = Work->negate(); CHKERRQ(info);
    info = Work->boundGradientProjection(Work,XL,X,XU); CHKERRQ(info);
    info = Work->negate(); CHKERRQ(info);
    info = Work->dot(G,&gdx); CHKERRQ(info);
    if (cg->beta!=0 && gdx>=0){

      info = DX->copyFrom(GP); CHKERRQ(info);
      info = DX->negate(); CHKERRQ(info);
      cg->restarts++;
    } else {

    }
    info = DX->dot(G,&gdx); CHKERRQ(info);

    
    /* Line Search */
    step=1.5*step;
    step=TaoMax(1.5*step,0.1);
    step=1.0;

    info = TaoLineSearchApply(tao,X,G,DX,Work,&f,&step,&gdx,&lsflag);

    info = GP->boundGradientProjection(G,XL,X,XU); CHKERRQ(info);
    info = GP->norm2squared(&gnorm2); CHKERRQ(info);
    
  }
  
  TaoFunctionReturn(0);
}
コード例 #13
0
ファイル: blmvm.c プロジェクト: masa-ito/PETScToPoisson
static PetscErrorCode TaoSolve_BLMVM(Tao tao)
{
  PetscErrorCode               ierr;
  TAO_BLMVM                    *blmP = (TAO_BLMVM *)tao->data;
  TaoConvergedReason           reason = TAO_CONTINUE_ITERATING;
  TaoLineSearchConvergedReason ls_status = TAOLINESEARCH_CONTINUE_ITERATING;
  PetscReal                    f, fold, gdx, gnorm;
  PetscReal                    stepsize = 1.0,delta;

  PetscFunctionBegin;
  /*  Project initial point onto bounds */
  ierr = TaoComputeVariableBounds(tao);CHKERRQ(ierr);
  ierr = VecMedian(tao->XL,tao->solution,tao->XU,tao->solution);CHKERRQ(ierr);
  ierr = TaoLineSearchSetVariableBounds(tao->linesearch,tao->XL,tao->XU);CHKERRQ(ierr);


  /* Check convergence criteria */
  ierr = TaoComputeObjectiveAndGradient(tao, tao->solution,&f,blmP->unprojected_gradient);CHKERRQ(ierr);
  ierr = VecBoundGradientProjection(blmP->unprojected_gradient,tao->solution, tao->XL,tao->XU,tao->gradient);CHKERRQ(ierr);

  ierr = TaoGradientNorm(tao, tao->gradient,NORM_2,&gnorm);CHKERRQ(ierr);
  if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf pr NaN");

  ierr = TaoMonitor(tao, tao->niter, f, gnorm, 0.0, stepsize, &reason);CHKERRQ(ierr);
  if (reason != TAO_CONTINUE_ITERATING) PetscFunctionReturn(0);

  /* Set initial scaling for the function */
  if (f != 0.0) {
    delta = 2.0*PetscAbsScalar(f) / (gnorm*gnorm);
  } else {
    delta = 2.0 / (gnorm*gnorm);
  }
  ierr = MatLMVMSetDelta(blmP->M,delta);CHKERRQ(ierr);

  /* Set counter for gradient/reset steps */
  blmP->grad = 0;
  blmP->reset = 0;

  /* Have not converged; continue with Newton method */
  while (reason == TAO_CONTINUE_ITERATING) {
    /* Compute direction */
    ierr = MatLMVMUpdate(blmP->M, tao->solution, tao->gradient);CHKERRQ(ierr);
    ierr = MatLMVMSolve(blmP->M, blmP->unprojected_gradient, tao->stepdirection);CHKERRQ(ierr);
    ierr = VecBoundGradientProjection(tao->stepdirection,tao->solution,tao->XL,tao->XU,tao->gradient);CHKERRQ(ierr);

    /* Check for success (descent direction) */
    ierr = VecDot(blmP->unprojected_gradient, tao->gradient, &gdx);CHKERRQ(ierr);
    if (gdx <= 0) {
      /* Step is not descent or solve was not successful
         Use steepest descent direction (scaled) */
      ++blmP->grad;

      if (f != 0.0) {
        delta = 2.0*PetscAbsScalar(f) / (gnorm*gnorm);
      } else {
        delta = 2.0 / (gnorm*gnorm);
      }
      ierr = MatLMVMSetDelta(blmP->M,delta);CHKERRQ(ierr);
      ierr = MatLMVMReset(blmP->M);CHKERRQ(ierr);
      ierr = MatLMVMUpdate(blmP->M, tao->solution, blmP->unprojected_gradient);CHKERRQ(ierr);
      ierr = MatLMVMSolve(blmP->M,blmP->unprojected_gradient, tao->stepdirection);CHKERRQ(ierr);
    }
    ierr = VecScale(tao->stepdirection,-1.0);CHKERRQ(ierr);

    /* Perform the linesearch */
    fold = f;
    ierr = VecCopy(tao->solution, blmP->Xold);CHKERRQ(ierr);
    ierr = VecCopy(blmP->unprojected_gradient, blmP->Gold);CHKERRQ(ierr);
    ierr = TaoLineSearchSetInitialStepLength(tao->linesearch,1.0);CHKERRQ(ierr);
    ierr = TaoLineSearchApply(tao->linesearch, tao->solution, &f, blmP->unprojected_gradient, tao->stepdirection, &stepsize, &ls_status);CHKERRQ(ierr);
    ierr = TaoAddLineSearchCounts(tao);CHKERRQ(ierr);

    if (ls_status != TAOLINESEARCH_SUCCESS && ls_status != TAOLINESEARCH_SUCCESS_USER) {
      /* Linesearch failed
         Reset factors and use scaled (projected) gradient step */
      ++blmP->reset;

      f = fold;
      ierr = VecCopy(blmP->Xold, tao->solution);CHKERRQ(ierr);
      ierr = VecCopy(blmP->Gold, blmP->unprojected_gradient);CHKERRQ(ierr);

      if (f != 0.0) {
        delta = 2.0* PetscAbsScalar(f) / (gnorm*gnorm);
      } else {
        delta = 2.0/ (gnorm*gnorm);
      }
      ierr = MatLMVMSetDelta(blmP->M,delta);CHKERRQ(ierr);
      ierr = MatLMVMReset(blmP->M);CHKERRQ(ierr);
      ierr = MatLMVMUpdate(blmP->M, tao->solution, blmP->unprojected_gradient);CHKERRQ(ierr);
      ierr = MatLMVMSolve(blmP->M, blmP->unprojected_gradient, tao->stepdirection);CHKERRQ(ierr);
      ierr = VecScale(tao->stepdirection, -1.0);CHKERRQ(ierr);

      /* This may be incorrect; linesearch has values fo stepmax and stepmin
         that should be reset. */
      ierr = TaoLineSearchSetInitialStepLength(tao->linesearch,1.0);CHKERRQ(ierr);
      ierr = TaoLineSearchApply(tao->linesearch,tao->solution,&f, blmP->unprojected_gradient, tao->stepdirection,  &stepsize, &ls_status);CHKERRQ(ierr);
      ierr = TaoAddLineSearchCounts(tao);CHKERRQ(ierr);

      if (ls_status != TAOLINESEARCH_SUCCESS && ls_status != TAOLINESEARCH_SUCCESS_USER) {
        tao->reason = TAO_DIVERGED_LS_FAILURE;
        break;
      }
    }

    /* Check for converged */
    ierr = VecBoundGradientProjection(blmP->unprojected_gradient, tao->solution, tao->XL, tao->XU, tao->gradient);CHKERRQ(ierr);
    ierr = TaoGradientNorm(tao, tao->gradient, NORM_2, &gnorm);CHKERRQ(ierr);


    if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Not-a-Number");
    tao->niter++;
    ierr = TaoMonitor(tao, tao->niter, f, gnorm, 0.0, stepsize, &reason);CHKERRQ(ierr);
  }
  PetscFunctionReturn(0);
}
コード例 #14
0
ファイル: asils.c プロジェクト: 00liujj/petsc
static PetscErrorCode TaoSolve_ASILS(Tao tao)
{
  TAO_SSLS                     *asls = (TAO_SSLS *)tao->data;
  PetscReal                    psi,ndpsi, normd, innerd, t=0;
  PetscInt                     iter=0, nf;
  PetscErrorCode               ierr;
  TaoConvergedReason           reason;
  TaoLineSearchConvergedReason ls_reason;

  PetscFunctionBegin;
  /* Assume that Setup has been called!
     Set the structure for the Jacobian and create a linear solver. */

  ierr = TaoComputeVariableBounds(tao);CHKERRQ(ierr);
  ierr = TaoLineSearchSetObjectiveAndGradientRoutine(tao->linesearch,Tao_ASLS_FunctionGradient,tao);CHKERRQ(ierr);
  ierr = TaoLineSearchSetObjectiveRoutine(tao->linesearch,Tao_SSLS_Function,tao);CHKERRQ(ierr);

  /* Calculate the function value and fischer function value at the
     current iterate */
  ierr = TaoLineSearchComputeObjectiveAndGradient(tao->linesearch,tao->solution,&psi,asls->dpsi);CHKERRQ(ierr);
  ierr = VecNorm(asls->dpsi,NORM_2,&ndpsi);CHKERRQ(ierr);

  while (1) {
    /* Check the termination criteria */
    ierr = PetscInfo3(tao,"iter %D, merit: %g, ||dpsi||: %g\n",iter, (double)asls->merit,  (double)ndpsi);CHKERRQ(ierr);
    ierr = TaoMonitor(tao, iter++, asls->merit, ndpsi, 0.0, t, &reason);CHKERRQ(ierr);
    if (TAO_CONTINUE_ITERATING != reason) break;

    /* We are going to solve a linear system of equations.  We need to
       set the tolerances for the solve so that we maintain an asymptotic
       rate of convergence that is superlinear.
       Note: these tolerances are for the reduced system.  We really need
       to make sure that the full system satisfies the full-space conditions.

       This rule gives superlinear asymptotic convergence
       asls->atol = min(0.5, asls->merit*sqrt(asls->merit));
       asls->rtol = 0.0;

       This rule gives quadratic asymptotic convergence
       asls->atol = min(0.5, asls->merit*asls->merit);
       asls->rtol = 0.0;

       Calculate a free and fixed set of variables.  The fixed set of
       variables are those for the d_b is approximately equal to zero.
       The definition of approximately changes as we approach the solution
       to the problem.

       No one rule is guaranteed to work in all cases.  The following
       definition is based on the norm of the Jacobian matrix.  If the
       norm is large, the tolerance becomes smaller. */
    ierr = MatNorm(tao->jacobian,NORM_1,&asls->identifier);CHKERRQ(ierr);
    asls->identifier = PetscMin(asls->merit, 1e-2) / (1 + asls->identifier);

    ierr = VecSet(asls->t1,-asls->identifier);CHKERRQ(ierr);
    ierr = VecSet(asls->t2, asls->identifier);CHKERRQ(ierr);

    ierr = ISDestroy(&asls->fixed);CHKERRQ(ierr);
    ierr = ISDestroy(&asls->free);CHKERRQ(ierr);
    ierr = VecWhichBetweenOrEqual(asls->t1, asls->db, asls->t2, &asls->fixed);CHKERRQ(ierr);
    ierr = ISComplementVec(asls->fixed,asls->t1, &asls->free);CHKERRQ(ierr);

    ierr = ISGetSize(asls->fixed,&nf);CHKERRQ(ierr);
    ierr = PetscInfo1(tao,"Number of fixed variables: %D\n", nf);CHKERRQ(ierr);

    /* We now have our partition.  Now calculate the direction in the
       fixed variable space. */
    ierr = TaoVecGetSubVec(asls->ff, asls->fixed, tao->subset_type, 0.0, &asls->r1);
    ierr = TaoVecGetSubVec(asls->da, asls->fixed, tao->subset_type, 1.0, &asls->r2);
    ierr = VecPointwiseDivide(asls->r1,asls->r1,asls->r2);CHKERRQ(ierr);
    ierr = VecSet(tao->stepdirection,0.0);CHKERRQ(ierr);
    ierr = VecISAXPY(tao->stepdirection, asls->fixed,1.0,asls->r1);CHKERRQ(ierr);

    /* Our direction in the Fixed Variable Set is fixed.  Calculate the
       information needed for the step in the Free Variable Set.  To
       do this, we need to know the diagonal perturbation and the
       right hand side. */

    ierr = TaoVecGetSubVec(asls->da, asls->free, tao->subset_type, 0.0, &asls->r1);CHKERRQ(ierr);
    ierr = TaoVecGetSubVec(asls->ff, asls->free, tao->subset_type, 0.0, &asls->r2);CHKERRQ(ierr);
    ierr = TaoVecGetSubVec(asls->db, asls->free, tao->subset_type, 1.0, &asls->r3);CHKERRQ(ierr);
    ierr = VecPointwiseDivide(asls->r1,asls->r1, asls->r3);CHKERRQ(ierr);
    ierr = VecPointwiseDivide(asls->r2,asls->r2, asls->r3);CHKERRQ(ierr);

    /* r1 is the diagonal perturbation
       r2 is the right hand side
       r3 is no longer needed

       Now need to modify r2 for our direction choice in the fixed
       variable set:  calculate t1 = J*d, take the reduced vector
       of t1 and modify r2. */

    ierr = MatMult(tao->jacobian, tao->stepdirection, asls->t1);CHKERRQ(ierr);
    ierr = TaoVecGetSubVec(asls->t1,asls->free,tao->subset_type,0.0,&asls->r3);CHKERRQ(ierr);
    ierr = VecAXPY(asls->r2, -1.0, asls->r3);CHKERRQ(ierr);

    /* Calculate the reduced problem matrix and the direction */
    if (!asls->w && (tao->subset_type == TAO_SUBSET_MASK || tao->subset_type == TAO_SUBSET_MATRIXFREE)) {
      ierr = VecDuplicate(tao->solution, &asls->w);CHKERRQ(ierr);
    }
    ierr = TaoMatGetSubMat(tao->jacobian, asls->free, asls->w, tao->subset_type,&asls->J_sub);CHKERRQ(ierr);
    if (tao->jacobian != tao->jacobian_pre) {
      ierr = TaoMatGetSubMat(tao->jacobian_pre, asls->free, asls->w, tao->subset_type, &asls->Jpre_sub);CHKERRQ(ierr);
    } else {
      ierr = MatDestroy(&asls->Jpre_sub);CHKERRQ(ierr);
      asls->Jpre_sub = asls->J_sub;
      ierr = PetscObjectReference((PetscObject)(asls->Jpre_sub));CHKERRQ(ierr);
    }
    ierr = MatDiagonalSet(asls->J_sub, asls->r1,ADD_VALUES);CHKERRQ(ierr);
    ierr = TaoVecGetSubVec(tao->stepdirection, asls->free, tao->subset_type, 0.0, &asls->dxfree);CHKERRQ(ierr);
    ierr = VecSet(asls->dxfree, 0.0);CHKERRQ(ierr);

    /* Calculate the reduced direction.  (Really negative of Newton
       direction.  Therefore, rest of the code uses -d.) */
    ierr = KSPReset(tao->ksp);
    ierr = KSPSetOperators(tao->ksp, asls->J_sub, asls->Jpre_sub);CHKERRQ(ierr);
    ierr = KSPSolve(tao->ksp, asls->r2, asls->dxfree);CHKERRQ(ierr);

    /* Add the direction in the free variables back into the real direction. */
    ierr = VecISAXPY(tao->stepdirection, asls->free, 1.0,asls->dxfree);CHKERRQ(ierr);

    /* Check the real direction for descent and if not, use the negative
       gradient direction. */
    ierr = VecNorm(tao->stepdirection, NORM_2, &normd);CHKERRQ(ierr);
    ierr = VecDot(tao->stepdirection, asls->dpsi, &innerd);CHKERRQ(ierr);

    if (innerd <= asls->delta*pow(normd, asls->rho)) {
      ierr = PetscInfo1(tao,"Gradient direction: %5.4e.\n", (double)innerd);CHKERRQ(ierr);
      ierr = PetscInfo1(tao, "Iteration %D: newton direction not descent\n", iter);CHKERRQ(ierr);
      ierr = VecCopy(asls->dpsi, tao->stepdirection);CHKERRQ(ierr);
      ierr = VecDot(asls->dpsi, tao->stepdirection, &innerd);CHKERRQ(ierr);
    }

    ierr = VecScale(tao->stepdirection, -1.0);CHKERRQ(ierr);
    innerd = -innerd;

    /* We now have a correct descent direction.  Apply a linesearch to
       find the new iterate. */
    ierr = TaoLineSearchSetInitialStepLength(tao->linesearch, 1.0);CHKERRQ(ierr);
    ierr = TaoLineSearchApply(tao->linesearch, tao->solution, &psi,asls->dpsi, tao->stepdirection, &t, &ls_reason);CHKERRQ(ierr);
    ierr = VecNorm(asls->dpsi, NORM_2, &ndpsi);CHKERRQ(ierr);
  }
  PetscFunctionReturn(0);
}