Пример #1
0
/*************************************************************************
* This function checks if the balance achieved is better than the diff
* For now, it uses a 2-norm measure
**************************************************************************/
int Serial_BetterBalance(int ncon, float *npwgts, float *tpwgts, float *diff)
{
  int i;
  float ndiff[MAXNCON];

  for (i=0; i<ncon; i++)
    ndiff[i] = fabs(tpwgts[i]-npwgts[i]);

  return snorm2(ncon, ndiff) < snorm2(ncon, diff);
}
Пример #2
0
/*************************************************************************
* This function checks if the balance achieved is better than the diff 
* For now, it uses a 2-norm measure
**************************************************************************/ 
int BetterBalance(int ncon, floattype *npwgts, floattype *tpwgts, floattype *diff)
{
  int i;
  floattype ndiff[MAXNCON];

  for (i=0; i<ncon; i++)
    ndiff[i] = fabs(tpwgts[0]-npwgts[i]);
   
  return snorm2(ncon, ndiff) < snorm2(ncon, diff);
}
Пример #3
0
/*************************************************************************
* This function implements the CG solver used during the directed diffusion
**************************************************************************/
void ConjGrad2(MatrixType *A, floattype *b, floattype *x, floattype tol, floattype *workspace)
{
  int i, k, n;
  floattype *p, *r, *q, *z, *M;
  floattype alpha, beta, rho, rho_1 = -1.0, error, bnrm2, tmp;
  idxtype *rowptr, *colind;
  floattype *values;

  n = A->nrows;
  rowptr = A->rowptr;
  colind = A->colind;
  values = A->values;

  /* Initial Setup */
  p = workspace;
  r = workspace + n;
  q = workspace + 2*n;
  z = workspace + 3*n;
  M = workspace + 4*n;

  for (i=0; i<n; i++) {
    x[i] = 0.0;
    if (values[rowptr[i]] != 0.0)
      M[i] = 1.0/values[rowptr[i]];
    else
      M[i] = 0.0;
  }

  /* r = b - Ax */
  mvMult2(A, x, r);
  for (i=0; i<n; i++)
    r[i] = b[i]-r[i];

  bnrm2 = snorm2(n, b);
  if (bnrm2 > 0.0) {
    error = snorm2(n, r) / bnrm2;

    if (error > tol) {
      /* Begin Iterations */
      for (k=0; k<n; k++) {
        for (i=0; i<n; i++)
          z[i] = r[i]*M[i];

        rho = sdot(n, r, z);

        if (k == 0)
          scopy(n, z, p);
        else {
          if (rho_1 != 0.0)
            beta = rho/rho_1;
          else
            beta = 0.0;
          for (i=0; i<n; i++)
            p[i] = z[i] + beta*p[i];
        }

        mvMult2(A, p, q); /* q = A*p */

        tmp = sdot(n, p, q);
        if (tmp != 0.0)
          alpha = rho/tmp;
        else
          alpha = 0.0;
        saxpy(n, alpha, p, x);    /* x = x + alpha*p */
        saxpy(n, -alpha, q, r);   /* r = r - alpha*q */
        error = snorm2(n, r) / bnrm2;
        if (error < tol)
          break;

        rho_1 = rho;
      }
    }
  }
}
Пример #4
0
  void run( Vector<Real> &s, Real &snorm, Real &del, int &iflag, int &iter, const Vector<Real> &x,
            const Vector<Real> &grad, const Real &gnorm, ProjectedObjective<Real> &pObj ) { 
    Real tol = std::sqrt(ROL_EPSILON<Real>()), zero(0), one(1), two(2), half(0.5);
    const Real gtol = std::min(tol1_,tol2_*gnorm);

    // Gradient Vector
    g_->set(grad);
    Real normg = gnorm;
    if ( pObj.isConActivated() ) {
      primalVector_->set(grad.dual());
      pObj.pruneActive(*primalVector_,grad.dual(),x);
      g_->set(primalVector_->dual());
      normg = g_->norm();
    }

    // Old and New Step Vectors
    s.zero(); s_->zero();
    snorm = zero;
    Real snorm2(0), s1norm2(0);

    // Preconditioned Gradient Vector
    //pObj.precond(*v,*g,x,tol);
    pObj.reducedPrecond(*v_,*g_,x,grad.dual(),x,tol);

    // Basis Vector
    p_->set(*v_);
    p_->scale(-one);
    Real pnorm2 = v_->dot(g_->dual());

    iter = 0; iflag = 0;
    Real kappa(0), beta(0), sigma(0), alpha(0), tmp(0), sMp(0);
    Real gv = v_->dot(g_->dual());
    pRed_ = zero;

    for (iter = 0; iter < maxit_; iter++) {
      //pObj.hessVec(*Hp,*p,x,tol);
      pObj.reducedHessVec(*Hp_,*p_,x,grad.dual(),x,tol);

      kappa = p_->dot(Hp_->dual());
      if (kappa <= zero) {
        sigma = (-sMp+sqrt(sMp*sMp+pnorm2*(del*del-snorm2)))/pnorm2;
        s.axpy(sigma,*p_);
        iflag = 2; 
        break;
      }

      alpha = gv/kappa;
      s_->set(s); 
      s_->axpy(alpha,*p_);
      s1norm2 = snorm2 + two*alpha*sMp + alpha*alpha*pnorm2;

      if (s1norm2 >= del*del) {
        sigma = (-sMp+sqrt(sMp*sMp+pnorm2*(del*del-snorm2)))/pnorm2;
        s.axpy(sigma,*p_);
        iflag = 3; 
        break;
      }

      pRed_ += half*alpha*gv;

      s.set(*s_);
      snorm2 = s1norm2;  

      g_->axpy(alpha,*Hp_);
      normg = g_->norm();
      if (normg < gtol) {
        break;
      }

      //pObj.precond(*v,*g,x,tol);
      pObj.reducedPrecond(*v_,*g_,x,grad.dual(),x,tol);
      tmp   = gv; 
      gv    = v_->dot(g_->dual());
      beta  = gv/tmp;    

      p_->scale(beta);
      p_->axpy(-one,*v_);
      sMp    = beta*(sMp+alpha*pnorm2);
      pnorm2 = gv + beta*beta*pnorm2; 
    }
    if (iflag > 0) {
      pRed_ += sigma*(gv-half*sigma*kappa);
    }

    if (iter == maxit_) {
      iflag = 1;
    }
    if (iflag != 1) { 
      iter++;
    }

    snorm = s.norm();
    TrustRegion<Real>::setPredictedReduction(pRed_);
  }