Example #1
0
/*  Conjugate Gradients solution of A*x = b

        | A(1,1) A(1,2) A(1,3) .  A(1,N)| |x(1)|     |b(1)|
        | A(2,1) A(2,2) A(2,3) .  A(2,N)| |x(2)|     |b(2)|
        | A(3,1) A(3,2) A(3,3) .  A(3,N)| |x(3)|     |b(3)|
        |   .      .      .    .    .   | | .  |  =  | .  |
        | A(N,1) A(N,2) A(N,3) .  A(N,N)| |x(N)|     |b(N)|
INPUT:
   A is N x N array
   x - initial guess for solution vector
   b - r.h.s.
OUTPUT:
   x - solution vector          */
bool ConjGrad(FArr2D& A, FArr1D& b, FArr1D& x, real eps, int itermax)
{
  double Pold, Pnew, beta, gamma;
  int h1 = A.H1();
  int l1 = A.L1();
  int iter = 0;
  int k = sqrt((float)A.D2());
  //  Temp Arrays d, r, y
  FArr1D y(l1,h1);
  FArr1D d(l1,h1);
  FArr1D r(l1,h1);

  assert( l1 == A.L2() && h1 == A.H2() );

  r = b - A*x;
  Pold = Norm2(r);  //  Pold = r*r;
  d = r;

  while ((Pold > eps) && (iter <= itermax))
  {
    y = A*d;
    beta = Pold/(d*y);
    x = x + beta*d;
    if (iter % k == 0) r = b - A*x;
    else r = r - beta*y;
    Pnew = Norm2(r);  // Pnew = r*r;
    gamma = Pnew/Pold;
    d = r + gamma*d;
    Pold = Pnew;
    iter++;
  };

  return (iter < itermax);
} // ConjGrad <--------------------------------------------------------------
Example #2
0
int OptCGLike::checkConvg() // check convergence
{
  NLP1* nlp = nlprob();
  ColumnVector xc(nlp->getXc());

// Test 1. step tolerance 

  double step_tol = tol.getStepTol();
  double snorm = stepTolNorm();
  double xnorm =  Norm2(xc);
  double stol  = step_tol*max(1.0,xnorm);
  if (snorm  <= stol) {
    strcpy(mesg,"Algorithm converged - Norm of last step is less than step tolerance");
    *optout << "checkConvg: snorm = " << e(snorm,12,4) 
      << "  stol = " << e(stol,12,4) << "\n";
    return 1;
  }
  
// Test 2. function tolerance
  double ftol = tol.getFTol();
  double fvalue = nlp->getF();
  double rftol = ftol*max(1.0,fabs(fvalue));
  Real deltaf = fprev - fvalue;
  if (deltaf <= rftol) {
    strcpy(mesg,"Algorithm converged - Difference in successive fcn values less than tolerance");
    *optout << "checkConvg: deltaf = " << e(deltaf,12,4) 
         << "  ftol = " << e(ftol,12,4) << "\n";
    return 2;
  }
  

// Test 3. gradient tolerance 

  ColumnVector grad(nlp->getGrad());
  double gtol = tol.getGTol();
  double rgtol = gtol*max(1.0,fabs(fvalue));
  double gnorm = Norm2(grad);
  if (gnorm <= rgtol) {
    strcpy(mesg,"Algorithm converged - Norm of gradient is less than gradient tolerance");
    *optout << "checkConvg: gnorm = " << e(gnorm,12,4) 
      << "  gtol = " << e(rgtol, 12,4) << "\n";
    return 3;
  }
  

// Test 4. absolute gradient tolerance 

  if (gnorm <= gtol) {
    strcpy(mesg,"Algorithm converged - Norm of gradient is less than gradient tolerance");
    *optout << "checkConvg: gnorm = " << e(gnorm,12,4) 
      << "  gtol = " << e(gtol, 12,4) << "\n";
    return 4;
  }
  
  // Nothing to report 

  return 0;

}
Example #3
0
    BiCGStabResult BiCGstab::solve(const Array& b, const Array& x0) const {
        Real bnorm2 = Norm2(b);
        if (bnorm2 == 0.0) {
            BiCGStabResult result = { 0, 0.0, b};
            return result;
        }

        Array x = ((!x0.empty()) ? x0 : Array(b.size(), 0.0));
        Array r = b - A_(x);

        Array rTld = r;
        Array p, pTld, v, s, sTld, t;
        Real omega = 1.0;
        Real rho, rhoTld=1.0;
        Real alpha = 0.0, beta;
        Real error = Norm2(r)/bnorm2;

        Size i;
        for (i=0; i < maxIter_ && error >= relTol_; ++i) {
           rho = DotProduct(rTld, r);
           if  (rho == 0.0 || omega == 0.0)
               break;

           if (i) {
              beta = (rho/rhoTld)*(alpha/omega);
              p = r + beta*(p - omega*v);
           }
           else {
              p = r;
           }

           pTld = ((M_)? M_(p) : p);
           v     = A_(pTld);

           alpha = rho/DotProduct(rTld, v);
           s     = r-alpha*v;
           if (Norm2(s) < relTol_*bnorm2) {
              x += alpha*pTld;
              error = Norm2(s)/bnorm2;
              break;
           }

           sTld = ((M_) ? M_(s) : s);
           t = A_(sTld);
           omega = DotProduct(t,s)/DotProduct(t,t);
           x += alpha*pTld + omega*sTld;
           r = s - omega*t;
           error = Norm2(r)/bnorm2;
           rhoTld = rho;
        }

        QL_REQUIRE(i < maxIter_, "max number of iterations exceeded");
        QL_REQUIRE(error < relTol_, "could not converge");

        BiCGStabResult result = { i, error, x};
        return result;
    }
// Computes the furthest point in a triangle to a reference point. 
Vector3 FurthestToPointInTriangle(const Vector3 &p, const Vector3 &v0, 
        const Vector3 &v1, const Vector3 &v2)
{
	double d0 = Norm2(v0 - p);
	double d1 = Norm2(v1 - p);
	double d2 = Norm2(v2 - p);
	if (d0 > d1) {
		if (d0 > d2) return v0;
		else return v2;
	} else if (d1 > d2) return v1;
	else return v2;
}
void rl_nac::farm_out_grad( vector_t& converged_grad ) {

  // now we need to farm these out to the actions!  we start i at
  // state_feat_cnt because converged_grad contains [w_t+1 v_t+1], but we only
  // want v_t+1 to add to the parameter vectors.
  int i = state_feat_cnt;
  for ( int a=0; a<act_cnt; a++ ) {
    int act_a_cnt = (*acts)[a]->grad.GetM();

    for ( int j=0; j<act_a_cnt; j++ ) {
      (*acts)[a]->natural_grad(j) = converged_grad(i);
      i++;
    }

    // make all vectors have unit length
    double n2 = Norm2( (*acts)[a]->natural_grad );

    if ( n2 > 1e-10 ) {
      // it's pretty easy to have a zero norm natural gradient, if you
      // received zero total reward, so to avoid nans, we only
      // normalize if we have a nonzero vector...
      Mlt( 1.0/n2, (*acts)[a]->natural_grad );
    }

#ifdef DEBUG
    printf( "Act %d natural grad:\n", a );
    (*acts)[a]->natural_grad.Print();
#endif
  }

}
Quaternion::Quaternion(const double angle, const dVector & axis)
{
	if(axis.size() != 3) {
		assert (0 && "Quaternion::Quaternion, size of axis != 3");
		return;
	}

	// make sure axis is a unit vector
	v_ = sin(angle/2) * axis/Norm2(axis);
	s_ = cos(angle/2);
}
Example #7
0
double TAGMFast::LikelihoodForRow(const int UID, const TIntFltH& FU) {
  double L = 0.0;
  TFltV HOSumFV; //adjust for Fv of v hold out
  if (HOVIDSV[UID].Len() > 0) {
    HOSumFV.Gen(SumFV.Len());
    
    for (int e = 0; e < HOVIDSV[UID].Len(); e++) {
      for (int c = 0; c < SumFV.Len(); c++) {
        HOSumFV[c] += GetCom(HOVIDSV[UID][e], c);
      }
    }
  }

  TUNGraph::TNodeI NI = G->GetNI(UID);
  if (DoParallel && NI.GetDeg() > 10) {
#pragma omp parallel for schedule(static, 1)
    for (int e = 0; e < NI.GetDeg(); e++) {
      int v = NI.GetNbrNId(e);
      if (v == UID) { continue; }
      if (HOVIDSV[UID].IsKey(v)) { continue; }
      double LU = log (1.0 - Prediction(FU, F[v])) + NegWgt * DotProduct(FU, F[v]);
#pragma omp atomic
      L += LU;
    }
    for (TIntFltH::TIter HI = FU.BegI(); HI < FU.EndI(); HI++) {
      double HOSum = HOVIDSV[UID].Len() > 0?  HOSumFV[HI.GetKey()].Val: 0.0;//subtract Hold out pairs only if hold out pairs exist
      double LU = NegWgt * (SumFV[HI.GetKey()] - HOSum - GetCom(UID, HI.GetKey())) * HI.GetDat();
      L -= LU;
    }
  } else {
    for (int e = 0; e < NI.GetDeg(); e++) {
      int v = NI.GetNbrNId(e);
      if (v == UID) { continue; }
      if (HOVIDSV[UID].IsKey(v)) { continue; }
      L += log (1.0 - Prediction(FU, F[v])) + NegWgt * DotProduct(FU, F[v]);
    }
    for (TIntFltH::TIter HI = FU.BegI(); HI < FU.EndI(); HI++) {
      double HOSum = HOVIDSV[UID].Len() > 0?  HOSumFV[HI.GetKey()].Val: 0.0;//subtract Hold out pairs only if hold out pairs exist
      L -= NegWgt * (SumFV[HI.GetKey()] - HOSum - GetCom(UID, HI.GetKey())) * HI.GetDat();
    }
  }
  //add regularization
  if (RegCoef > 0.0) { //L1
    L -= RegCoef * Sum(FU);
  }
  if (RegCoef < 0.0) { //L2
    L += RegCoef * Norm2(FU);
  }

  return L;
}
Example #8
0
/*  Conjugate Gradients solution of C*x = b

        | C(1,1) C(1,2) .  C(1,N)| |x(1)|     |b(1)|
        | C(2,1) C(2,2) .  C(2,N)| |x(2)|     |b(2)|
        | C(3,1) C(3,2) .  C(3,N)| | .  |  =  |b(3)|
        |   .      .    .    .   | |x(N)|     | .  |
        | C(M,1) C(M,2) .  C(M,N)|            |b(M)|
INPUT:
   C(M,N) array    M >= N
   x(N) - initial guess for solution vector
   b(M) - r.h.s.
OUTPUT:
   x(N) - solution vector     */
bool ConjGradLS(FArr2D& C, FArr1D& b, FArr1D& x, real eps, int itermax)
{
  double Pold, Pnew, beta, gamma;
  int h1 = C.H1();
  int l1 = C.L1();
  int h2 = C.H2();
  int l2 = C.L2();
  int iter = 0;
  int k = sqrt((float)C.D2());
  // Temp Arrays  g(N), d(N), r(M), y(M)
  FArr1D y(l1,h1);
  FArr1D d(l2,h2);
  FArr1D r(l1,h1);
  FArr1D g(l2,h2);

  r = b - C*x;
  g = r*C;
  Pold = Norm2(g);  //  Pold = g*g;
  d = g;

  while ((Pold > eps) && (iter <= itermax))
  {
    y = C*d;
    beta = Pold/Norm2(y);  //  beta = Pold/(y*y);
    x = x + beta*d;
    if (iter % k == 0) r = b - C*x;
    else r = r - beta*y;
    g = r*C;
    Pnew = Norm2(g);  //  Pnew = g*g;
    gamma = Pnew/Pold;
    d = g + gamma*d;
    Pold = Pnew;
    iter++;
  };
  return (iter < itermax);
} // ConjGradLS <------------------------------------------------------------
Example #9
0
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Method for finding boost to special r.f.
inline Vec3D Vec4D::SRFBoost() const
{
    VEC3D_T n=Norm2();
    if( n > 0.) {            // time-like vector
        // e.g. physical particle

        return r/r0;           // boost to r.f. where r=0
        // i.e. particle's rest frame

    } else if ( n < 0.) {    // space-like vector
        // e.g. tachyon

        return r0*r/(r*r);     // boost to r.f. where r0=0

    }
    // light-like
    return Vec3D();          // boost=0. is returned
}
//*****centroids and pca_proj_matrix files shuold be readed only once!!!***************8
// input:
//	pca_dim, 降到多少维
int get_pca_vlad_feature(unsigned char *data,int nw,int nh,float *centroids,float *mean,float *pca_proj,int pca_dim,float *features)
{
	float *vlad_features = (float *)malloc(SIFT_DESCRIPTOR_DIM * VLAD_CENTROIDS_NUM *sizeof(float));
	if(vlad_features == NULL)
	{
		printf("Memory overflow error:can't apply vlad features memory!\n");
		return -1;
	}
	memset(vlad_features,0,SIFT_DESCRIPTOR_DIM * VLAD_CENTROIDS_NUM *sizeof(float));
	ExtractVladFeatures(data,nw,nh,centroids,vlad_features);
	//printf("here 1\n");

	int vlad_feature_dim = SIFT_DESCRIPTOR_DIM * VLAD_CENTROIDS_NUM;
	// char *f_pca_proj = "data/pca_proj_matrix_vladk64_flickr1Mstar.fvecs";
	// float *mean = (float *)malloc(vlad_feature_dim * sizeof(float));
	// float *pca_proj = (float *)malloc(vlad_feature_dim * 1024 *sizeof(float));	// only the 1024 eigenvectors are stored
	// ReadPCAProj(f_pca_proj,mean,pca_proj);
	// printf("here 2\n");
	
	float *pca_vlad_features = (float *)malloc(pca_dim * sizeof(float));
	int i = 0;
	for(i = 0;i < vlad_feature_dim;++i)
	{
		vlad_features[i] -= mean[i];
	}
	int j = 0;
	for(i = 0;i < pca_dim;++i)
	{
		float sum = 0;
		for(j = 0;j < vlad_feature_dim;++j)	
		{
			sum += pca_proj[i * vlad_feature_dim + j] * vlad_features[j];
		}
		pca_vlad_features[i] = sum;
	}
	//printf("here 3\n");
	Norm2(pca_dim,pca_vlad_features);
	memcpy(features,pca_vlad_features,pca_dim * sizeof(float));
	// free(mean);
	// free(pca_proj);
	free(vlad_features);
	free(pca_vlad_features);
	return 0;
}
Example #11
0
SymmetricMatrix OptQNIPS::updateH(SymmetricMatrix&Hk, int k)
{ 
  Real mcheps  = FloatingPointPrecision::Epsilon();
  Real sqrteps = sqrt(mcheps);

  NLP1* nlp1 = nlprob();
  int ndim  = nlp1->getDim();
  real yts, snrm, ynrm, sBs, maxres;
  ColumnVector xc, yk, sk, res, Bsk;
  ColumnVector gradl_curr, gradl_prev, yzmultiplier;
  Matrix Htmp(ndim,ndim);

  if(k == 0){
     initHessian();
     Hk = hessl;
     return Hk;
  }

  // Compute change in x and Lagrangian gradient 
  xc = nlp1->getXc();
  sk = xc - xprev; 
  yzmultiplier = y & z;
  gradl_curr   = getGradL();

  if( nlp->hasConstraints() )
      gradl_prev = gprev - constraintGradientPrev*yzmultiplier;
  else
      gradl_prev = gprev;

  yk = gradl_curr - gradl_prev; 

  // If yts too small, skip bfgs
  yts  = Dot(sk,yk);
  snrm = Norm2(sk);
  ynrm = Norm2(yk);

  if(yts <= sqrteps*snrm*ynrm){
    if (debug_) {
      *optout << "UpdateH: <y,s> = " << e(yts,12,4) << " is too small\n";
      *optout << "UpdateH: The BFGS update is skipped\n";
    }
    hessl = Hk;
    return Hk;
  }

  res = yk - Hk*sk;
  maxres = res.NormInfinity() ;
     
  if(maxres <= sqrteps){
    if (debug_) {
      *optout << "UpdateH: y-Hs = " << e(maxres,12,4) << " is too small\n";
      *optout << "UpdateH: The BFGS update is skipped\n";
    }
    hessl = Hk;
    return Hk;
  }

  // If s'Hs too small, skip bfgs
  Bsk = Hk*sk;
  sBs  = Dot(sk,Bsk);

  if(sBs <= sqrteps*snrm*snrm){
    if (debug_) {
      *optout << "UpdateH: The BFGS update is skipped\n";
    }
    hessl = Hk;
    return Hk;
  }

  // Perfom BFGS update 
  Htmp  = -(Bsk * Bsk.t()) / sBs;
  Htmp +=  (yk * yk.t()) / yts;
  Htmp  = Hk + Htmp;
  Hk << Htmp;
  Htmp.Release();
  if (debug_) {
    *optout << "\nUpdateH: after update, k = " << k << "\n";
    *optout << "UpdateH: sBs  = " << sBs << "\n";
  }
  hessl = Hk;
  return Hk;
}
Example #12
0
void OptCG::optimize()
//------------------------------------------------------------------------
// Nonlinear Preconditioned Conjugate Gradient
// 
// Given a nonlinear operator objfcn find the minimizer using a
// nonlinear conjugate gradient method
// This version uses the Polak-Ribiere formula.
// and a line search routine due to More and Thuente as implemented
// in the routines mcsrch and mcstep
//
// Notes: The parameters ftol and gtol should be set so that
//        0 < ftol < gtol < 0.5
//        Default values: ftol = 1.e-1, gtol = 5.e-1
//        This results in a fairly accurate line search
//
// Here is the mathematical description of the algorithm
// (g = grad f).
//                     -1
//        1.  set z = M  g, search = -z; 
//
//        2.  for i=0 until convergence
//
//                 find alpha that minimizes f(x + alpha*search)
//                 subject to the strong Wolfe conditions
//
//                 Test for convergence
//
//
//                 beta = -( g  ,  (z     - z ) ) / ( g  , z  )
//                            i+1    i + 1   i         i    i
//
//                 search     =  - z     +   beta * search
//                       i+1        i+1                   i
//
//----------------------------------------------------------------------------
     
{

  int i, nlcg_iter;
  int convgd = 0;
  int step_type;

  double beta;
  double delta, delta_old, delta_mid, delta_new;
  double slope, gnorm;

  double step;
  double zero = 0.;
  
// Allocate local vectors 

  int n = dim;
  int maxiter;
  double fvalue;
  ColumnVector search(n), grad(n), z(n), diag(n), xc(n);

// Initialize iteration

  maxiter = tol.getMaxIter();

  initOpt();

  if (ret_code == 0) {
    //  compute preconditioned gradient

    diag = getFcnScale();
    grad = nlp->getGrad();
    for (i=1; i<=n; i++) z(i) = grad(i)/diag(i);

    search    = -z;
    delta_old = delta_new = Dot(grad,z);
    gnorm     = sqrt(Dot(grad,grad));

    step    = 1.0/gnorm;

//---------------------------------------------------------------------------
//
//
//
//
//---------------------------------------------------------------------------

    for (nlcg_iter=1; nlcg_iter <= maxiter; nlcg_iter++) {

      iter_taken = nlcg_iter;

      //  compute a step along the direction search 

      if ((step_type = computeStep(search)) < 0) {
	setMesg("Algorithm terminated - No longer able to compute step with sufficient decrease");
	ret_code = step_type;
        setReturnCode(ret_code);
	return;
      }
    
      //  Accept this step and update the nonlinear model

      acceptStep(nlcg_iter, step_type);
      updateModel(nlcg_iter, n, xprev);

      xc         = nlp->getXc();
      mem_step   = xc - xprev;
      step       = Norm2(mem_step);

      fvalue     = nlp->getF();
      grad       = nlp->getGrad();
      gnorm      = sqrt(Dot(grad,grad));
      slope      = Dot(grad,search);

      //  Test for Convergence

      convgd = checkConvg();
      if (convgd > 0) {
	ret_code = convgd;
        setReturnCode(ret_code);
	*optout  << d(nlcg_iter,5) << " " << e(fvalue,12,4)  << " "
		 << e(gnorm,12,4)  << e(step,12,4) << "\n";
	return;
      }

      //
      //  compute a new search direction
      //  1. compute preconditioned gradient,  z = grad;
      //  2. beta is computed using Polak-Ribiere Formula constrained 
      //     so that beta > 0
      //  3  Update search direction and norms 
  
      delta_old = delta_new; delta_mid = Dot(grad,z);

      for (i=1; i<=n; i++) z(i) = grad(i)/diag(i);

      delta_new = Dot(grad,z);
      delta     = delta_new - delta_mid;
      beta      = max(zero,delta/delta_old);

      search = -z + search*beta;

      xprev  = nlp->getXc();
      fprev  = fvalue;
      gprev  = grad;

      *optout 
	<< d(nlcg_iter,5) << " " << e(fvalue,12,4) << " " << e(gnorm,12,4) 
	<< e(step,12,4)   << " " << e(beta,12,4)   << " " << e(slope,12,4) 
	<< d(fcn_evals,4) << " " << d(grad_evals,4) << endl;
    }

    setMesg("Algorithm terminated - Number of iterations exceeds the specified limit");
    ret_code = 4;
    setReturnCode(ret_code);
  }
}
Example #13
0
void OptCG::initOpt()
{
  time_t t;
  char *c;

// get date and print out header

  t = time(NULL);
  c = asctime(localtime(&t));

  *optout << "************************************************************\n";
  *optout << "OPT++ version " << OPT_GLOBALS::OPT_VERSION << "\n";
  *optout << "Job run at " << c << "\n";
  copyright();
  *optout << "************************************************************\n";

  if (debug_)
    nlp->setDebug();

  nlp->initFcn();
  ret_code = 0;

  if(nlp->hasConstraints()){
    CompoundConstraint* constraints = nlp->getConstraints();
    ColumnVector xstart = nlp->getXc();
    double feas_tol = tol.getCTol();
    bool feasible = constraints->amIFeasible(xstart, feas_tol);
    if (!feasible) {
      *optout << "OptCG WARNING:  Initial guess not feasible.\n"
              << "CG may be unable to make progress." << endl;
    }
    //cerr << "Error: OptCG does not support bound, linear, or nonlinear "
    //     << "constraints.\n       Please select a different method for "
    //     << "constrained problems." << endl;
    //abort_handler(-1);
  }

  if (ret_code == 0) {
    double fvalue, gnorm;

    int n     = nlp->getDim();
    nlp->eval();

    fvalue  = nlp->getF();
    fprev   = fvalue;
    xprev   = nlp->getXc();
    gprev   = nlp->getGrad();  gnorm = Norm2(gprev);


    *optout << "\n\t\t\t\tNonlinear CG"
	    << "\n  Iter      F(x)       ||grad||    "
	    << "||step||     beta       gtp        fcn\n\n"
	    << d(0,5) << " " << e(fvalue,12,4) << " " << e(gnorm,12,4) << endl;

    if (debug_) {
      nlp->fPrintState(optout, "qnewton: Initial Guess");
      *optout << "xc, grad, step\n";
      for(int i=1; i<=n; i++)
	*optout << d(i,6) << e(xprev(i),24,16) << e(gprev(i),24,16) << "\n";
    }
  }
}
Example #14
0
real OptCG::stepTolNorm() const
{
  return Norm2(nlp->getXc()-xprev);
}
Example #15
0
bool rl_nac::check_for_convergence( vector_t& guess, vector_t& prev_guess ) {
    Copy( guess, tmpvec );
    Add( -1, prev_guess, tmpvec ); // Add(a,B,C) computes C = C + a*B
    return ( Norm2(tmpvec) < epsilon );
}
Example #16
0
 Real BiCGstab::norm2(const Array& a) const {
     return Norm2(a);
 }
 // returns K(||X-Y||) where X,Y are vectors
 Real kernelAbs(const Array& X, const Array& Y)const{
     return kernel_(Norm2(X-Y));
 }
Example #18
0
int main() try 
{
    // Several ways to create and initialize band matrices:

    // Create with uninitialized values
    tmv::BandMatrix<double> m1(6,6,1,2);
    for(int i=0;i<m1.nrows();i++) 
        for(int j=0;j<m1.ncols();j++) 
            if (i<=j+m1.nlo() && j<=i+m1.nhi())
                m1(i,j) = 3.*i-j*j+7.; 
    std::cout<<"m1 =\n"<<m1;
    //! m1 =
    //! 6  6  
    //! ( 7  6  3  0  0  0 )
    //! ( 10  9  6  1  0  0 )
    //! ( 0  12  9  4  -3  0 )
    //! ( 0  0  12  7  0  -9 )
    //! ( 0  0  0  10  3  -6 )
    //! ( 0  0  0  0  6  -3 )

    // Create with all 2's.
    tmv::BandMatrix<double> m2(6,6,1,3,2.);
    std::cout<<"m2 =\n"<<m2;
    //! m2 =
    //! 6  6  
    //! ( 2  2  2  2  0  0 )
    //! ( 2  2  2  2  2  0 )
    //! ( 0  2  2  2  2  2 )
    //! ( 0  0  2  2  2  2 )
    //! ( 0  0  0  2  2  2 )
    //! ( 0  0  0  0  2  2 )

    // A BandMatrix can be non-square:
    tmv::BandMatrix<double> m3(6,8,1,3,2.);
    std::cout<<"m3 =\n"<<m3;
    //! m3 =
    //! 6  8  
    //! ( 2  2  2  2  0  0  0  0 )
    //! ( 2  2  2  2  2  0  0  0 )
    //! ( 0  2  2  2  2  2  0  0 )
    //! ( 0  0  2  2  2  2  2  0 )
    //! ( 0  0  0  2  2  2  2  2 )
    //! ( 0  0  0  0  2  2  2  2 )

    // Create from given elements:
    double mm[20] = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20};
    tmv::BandMatrix<double,tmv::ColMajor> m4(6,6,2,1);
    std::copy(mm,mm+20,m4.colmajor_begin());
    std::cout<<"m4 (ColMajor) =\n"<<m4;
    //! m4 (ColMajor) =
    //! 6  6  
    //! ( 1  4  0  0  0  0 )
    //! ( 2  5  8  0  0  0 )
    //! ( 3  6  9  12  0  0 )
    //! ( 0  7  10  13  16  0 )
    //! ( 0  0  11  14  17  19 )
    //! ( 0  0  0  15  18  20 )
    tmv::BandMatrix<double,tmv::RowMajor> m5(6,6,2,1);
    std::copy(mm,mm+20,m5.rowmajor_begin());
    std::cout<<"m5 (RowMajor) =\n"<<m5;
    //! m5 (RowMajor) =
    //! 6  6  
    //! ( 1  2  0  0  0  0 )
    //! ( 3  4  5  0  0  0 )
    //! ( 6  7  8  9  0  0 )
    //! ( 0  10  11  12  13  0 )
    //! ( 0  0  14  15  16  17 )
    //! ( 0  0  0  18  19  20 )
    tmv::BandMatrix<double,tmv::DiagMajor> m6(6,6,2,1);
    std::copy(mm,mm+20,m6.diagmajor_begin());
    std::cout<<"m6 (DiagMajor) =\n"<<m6;
    //! m6 (DiagMajor) =
    //! 6  6  
    //! ( 10  16  0  0  0  0 )
    //! ( 5  11  17  0  0  0 )
    //! ( 1  6  12  18  0  0 )
    //! ( 0  2  7  13  19  0 )
    //! ( 0  0  3  8  14  20 )
    //! ( 0  0  0  4  9  15 )

    // Can make from the banded portion of a regular Matrix:
    tmv::Matrix<double> xm(6,6);
    for(int i=0;i<xm.nrows();i++) 
        for(int j=0;j<xm.ncols();j++) 
            xm(i,j) = 5.*i-j*j+3.; 
    tmv::BandMatrix<double> m7(xm,3,2);
    std::cout<<"m7 =\n"<<m7;
    //! m7 =
    //! 6  6  
    //! ( 3  2  -1  0  0  0 )
    //! ( 8  7  4  -1  0  0 )
    //! ( 13  12  9  4  -3  0 )
    //! ( 18  17  14  9  2  -7 )
    //! ( 0  22  19  14  7  -2 )
    //! ( 0  0  24  19  12  3 )
    // Or from a wider BandMatrix:
    tmv::BandMatrix<double> m8(m7,3,0);
    std::cout<<"m8 =\n"<<m8;
    //! m8 =
    //! 6  6  
    //! ( 3  0  0  0  0  0 )
    //! ( 8  7  0  0  0  0 )
    //! ( 13  12  9  0  0  0 )
    //! ( 18  17  14  9  0  0 )
    //! ( 0  22  19  14  7  0 )
    //! ( 0  0  24  19  12  3 )

    // Shortcuts to Bi- and Tri-diagonal matrices:
    tmv::Vector<double> v1(5,1.);
    tmv::Vector<double> v2(6,2.);
    tmv::Vector<double> v3(5,3.);
    tmv::BandMatrix<double> m9 = LowerBiDiagMatrix(v1,v2);
    tmv::BandMatrix<double> m10 = UpperBiDiagMatrix(v2,v3);
    tmv::BandMatrix<double> m11 = TriDiagMatrix(v1,v2,v3);
    std::cout<<"LowerBiDiagMatrix(v1,v2) =\n"<<m9;
    //! LowerBiDiagMatrix(v1,v2) =
    //! 6  6  
    //! ( 2  0  0  0  0  0 )
    //! ( 1  2  0  0  0  0 )
    //! ( 0  1  2  0  0  0 )
    //! ( 0  0  1  2  0  0 )
    //! ( 0  0  0  1  2  0 )
    //! ( 0  0  0  0  1  2 )
    std::cout<<"UpperBiDiagMatrix(v2,v3) =\n"<<m10;
    //! UpperBiDiagMatrix(v2,v3) =
    //! 6  6  
    //! ( 2  3  0  0  0  0 )
    //! ( 0  2  3  0  0  0 )
    //! ( 0  0  2  3  0  0 )
    //! ( 0  0  0  2  3  0 )
    //! ( 0  0  0  0  2  3 )
    //! ( 0  0  0  0  0  2 )
    std::cout<<"TriDiagMatrix(v1,v2,v3) =\n"<<m11;
    //! TriDiagMatrix(v1,v2,v3) =
    //! 6  6  
    //! ( 2  3  0  0  0  0 )
    //! ( 1  2  3  0  0  0 )
    //! ( 0  1  2  3  0  0 )
    //! ( 0  0  1  2  3  0 )
    //! ( 0  0  0  1  2  3 )
    //! ( 0  0  0  0  1  2 )


    // Norms, etc. 

    std::cout<<"Norm1(m1) = "<<Norm1(m1)<<std::endl;
    //! Norm1(m1) = 30
    std::cout<<"Norm2(m1) = "<<Norm2(m1)<<std::endl;
    //! Norm2(m1) = 24.0314
    std::cout<<"NormInf(m1) = "<<NormInf(m1)<<std::endl;
    //! NormInf(m1) = 28
    std::cout<<"NormF(m1) = "<<NormF(m1)<<" = "<<Norm(m1)<<std::endl;
    //! NormF(m1) = 32.0312 = 32.0312
    std::cout<<"MaxAbsElement(m1) = "<<MaxAbsElement(m1)<<std::endl;
    //! MaxAbsElement(m1) = 12
    std::cout<<"Trace(m1) = "<<Trace(m1)<<std::endl;
    //! Trace(m1) = 32
    std::cout<<"Det(m1) = "<<Det(m1)<<std::endl;
    //! Det(m1) = 67635


    // Views:

    std::cout<<"m1 =\n"<<m1;
    //! m1 =
    //! 6  6  
    //! ( 7  6  3  0  0  0 )
    //! ( 10  9  6  1  0  0 )
    //! ( 0  12  9  4  -3  0 )
    //! ( 0  0  12  7  0  -9 )
    //! ( 0  0  0  10  3  -6 )
    //! ( 0  0  0  0  6  -3 )
    std::cout<<"m1.diag() = "<<m1.diag()<<std::endl;
    //! m1.diag() = 6  ( 7  9  9  7  3  -3 )
    std::cout<<"m1.diag(1) = "<<m1.diag(1)<<std::endl;
    //! m1.diag(1) = 5  ( 6  6  4  0  -6 )
    std::cout<<"m1.diag(-1) = "<<m1.diag(-1)<<std::endl;
    //! m1.diag(-1) = 5  ( 10  12  12  10  6 )
    std::cout<<"m1.subBandMatrix(0,3,0,3,1,1) =\n"<<
        m1.subBandMatrix(0,3,0,3,1,1);
    //! m1.subBandMatrix(0,3,0,3,1,1) =
    //! 3  3  
    //! ( 7  6  0 )
    //! ( 10  9  6 )
    //! ( 0  12  9 )
    std::cout<<"m1.transpose() =\n"<<m1.transpose();
    //! m1.transpose() =
    //! 6  6  
    //! ( 7  10  0  0  0  0 )
    //! ( 6  9  12  0  0  0 )
    //! ( 3  6  9  12  0  0 )
    //! ( 0  1  4  7  10  0 )
    //! ( 0  0  -3  0  3  6 )
    //! ( 0  0  0  -9  -6  -3 )

    // rowRange, colRange shrink both dimensions of the matrix to include only
    // the portions that are in those rows or columns:
    std::cout<<"m1.rowRange(0,4) =\n"<<m1.rowRange(0,4);
    //! m1.rowRange(0,4) =
    //! 4  6  
    //! ( 7  6  3  0  0  0 )
    //! ( 10  9  6  1  0  0 )
    //! ( 0  12  9  4  -3  0 )
    //! ( 0  0  12  7  0  -9 )
    std::cout<<"m1.colRange(1,4) =\n"<<m1.colRange(1,4);
    //! m1.colRange(1,4) =
    //! 5  3  
    //! ( 6  3  0 )
    //! ( 9  6  1 )
    //! ( 12  9  4 )
    //! ( 0  12  7 )
    //! ( 0  0  10 )
    std::cout<<"m1.diagRange(0,2) =\n"<<m1.diagRange(0,2);
    //! m1.diagRange(0,2) =
    //! 6  6  
    //! ( 7  6  0  0  0  0 )
    //! ( 0  9  6  0  0  0 )
    //! ( 0  0  9  4  0  0 )
    //! ( 0  0  0  7  0  0 )
    //! ( 0  0  0  0  3  -6 )
    //! ( 0  0  0  0  0  -3 )
    std::cout<<"m1.diagRange(-1,1) =\n"<<m1.diagRange(-1,1);
    //! m1.diagRange(-1,1) =
    //! 6  6  
    //! ( 7  0  0  0  0  0 )
    //! ( 10  9  0  0  0  0 )
    //! ( 0  12  9  0  0  0 )
    //! ( 0  0  12  7  0  0 )
    //! ( 0  0  0  10  3  0 )
    //! ( 0  0  0  0  6  -3 )


    // Fortran Indexing:

    tmv::BandMatrix<double,tmv::FortranStyle> fm1 = m1;
    std::cout<<"fm1 = m1 =\n"<<fm1;
    //! fm1 = m1 =
    //! 6  6  
    //! ( 7  6  3  0  0  0 )
    //! ( 10  9  6  1  0  0 )
    //! ( 0  12  9  4  -3  0 )
    //! ( 0  0  12  7  0  -9 )
    //! ( 0  0  0  10  3  -6 )
    //! ( 0  0  0  0  6  -3 )
    std::cout<<"fm1(1,1) = "<<fm1(1,1)<<std::endl;
    //! fm1(1,1) = 7
    std::cout<<"fm1(4,3) = "<<fm1(4,3)<<std::endl;
    //! fm1(4,3) = 12
    std::cout<<"fm1.subBandMatrix(1,3,1,3,1,1) =\n"<<
        fm1.subBandMatrix(1,3,1,3,1,1);
    //! fm1.subBandMatrix(1,3,1,3,1,1) =
    //! 3  3  
    //! ( 7  6  0 )
    //! ( 10  9  6 )
    //! ( 0  12  9 )
    std::cout<<"fm1.rowRange(1,4) =\n"<<fm1.rowRange(1,4);
    //! fm1.rowRange(1,4) =
    //! 4  6  
    //! ( 7  6  3  0  0  0 )
    //! ( 10  9  6  1  0  0 )
    //! ( 0  12  9  4  -3  0 )
    //! ( 0  0  12  7  0  -9 )
    std::cout<<"fm1.colRange(2,4) =\n"<<fm1.colRange(2,4);
    //! fm1.colRange(2,4) =
    //! 5  3  
    //! ( 6  3  0 )
    //! ( 9  6  1 )
    //! ( 12  9  4 )
    //! ( 0  12  7 )
    //! ( 0  0  10 )
    std::cout<<"fm1.diagRange(0,1) =\n"<<fm1.diagRange(0,1);
    //! fm1.diagRange(0,1) =
    //! 6  6  
    //! ( 7  6  0  0  0  0 )
    //! ( 0  9  6  0  0  0 )
    //! ( 0  0  9  4  0  0 )
    //! ( 0  0  0  7  0  0 )
    //! ( 0  0  0  0  3  -6 )
    //! ( 0  0  0  0  0  -3 )
    std::cout<<"fm1.diagRange(-1,0) =\n"<<fm1.diagRange(-1,0);
    //! fm1.diagRange(-1,0) =
    //! 6  6  
    //! ( 7  0  0  0  0  0 )
    //! ( 10  9  0  0  0  0 )
    //! ( 0  12  9  0  0  0 )
    //! ( 0  0  12  7  0  0 )
    //! ( 0  0  0  10  3  0 )
    //! ( 0  0  0  0  6  -3 )


    // Matrix arithmetic:

    tmv::BandMatrix<double> m1pm2 = m1 + m2;
    std::cout<<"m1 + m2 =\n"<<m1pm2;
    //! m1 + m2 =
    //! 6  6  
    //! ( 9  8  5  2  0  0 )
    //! ( 12  11  8  3  2  0 )
    //! ( 0  14  11  6  -1  2 )
    //! ( 0  0  14  9  2  -7 )
    //! ( 0  0  0  12  5  -4 )
    //! ( 0  0  0  0  8  -1 )
    // Works correctly even if matrices are stored in different order:
    tmv::BandMatrix<double> m5pm6 = m5 + m6; 
    std::cout<<"m5 + m6 =\n"<<m5pm6;
    //! m5 + m6 =
    //! 6  6  
    //! ( 11  18  0  0  0  0 )
    //! ( 8  15  22  0  0  0 )
    //! ( 7  13  20  27  0  0 )
    //! ( 0  12  18  25  32  0 )
    //! ( 0  0  17  23  30  37 )
    //! ( 0  0  0  22  28  35 )
    // Also expands the number of off-diagonals appropriately as needed:
    tmv::BandMatrix<double> m2pm4 = m2 + m4; 
    std::cout<<"m2 + m4 =\n"<<m2pm4;
    //! m2 + m4 =
    //! 6  6  
    //! ( 3  6  2  2  0  0 )
    //! ( 4  7  10  2  2  0 )
    //! ( 3  8  11  14  2  2 )
    //! ( 0  7  12  15  18  2 )
    //! ( 0  0  11  16  19  21 )
    //! ( 0  0  0  15  20  22 )

    m1 *= 2.;
    std::cout<<"m1 *= 2 =\n"<<m1;
    //! m1 *= 2 =
    //! 6  6  
    //! ( 14  12  6  0  0  0 )
    //! ( 20  18  12  2  0  0 )
    //! ( 0  24  18  8  -6  0 )
    //! ( 0  0  24  14  0  -18 )
    //! ( 0  0  0  20  6  -12 )
    //! ( 0  0  0  0  12  -6 )

    m2 += m1;
    std::cout<<"m2 += m1 =\n"<<m2;
    //! m2 += m1 =
    //! 6  6  
    //! ( 16  14  8  2  0  0 )
    //! ( 22  20  14  4  2  0 )
    //! ( 0  26  20  10  -4  2 )
    //! ( 0  0  26  16  2  -16 )
    //! ( 0  0  0  22  8  -10 )
    //! ( 0  0  0  0  14  -4 )

    tmv::Vector<double> v = xm.col(0);
    std::cout<<"v = "<<v<<std::endl;
    //! v = 6  ( 3  8  13  18  23  28 )
    std::cout<<"m1 * v = "<<m1*v<<std::endl;
    //! m1 * v = 6  ( 216  396  432  60  162  108 )
    std::cout<<"v * m1 = "<<v*m1<<std::endl;
    //! v * m1 = 6  ( 202  492  780  832  396  -768 )

    // Matrix * matrix product also expands bands appropriately:
    tmv::BandMatrix<double> m1m2 = m1 * m2; 
    std::cout<<"m1 * m2 =\n"<<m1m2;
    //! m1 * m2 =
    //! 6  6  
    //! ( 488  592  400  136  0  12 )
    //! ( 716  952  704  264  -8  -8 )
    //! ( 528  948  904  272  -56  -32 )
    //! ( 0  624  844  464  -320  -104 )
    //! ( 0  0  520  452  -80  -332 )
    //! ( 0  0  0  264  12  -96 )

    // Can mix BandMatrix with other kinds of matrices:
    std::cout<<"xm * m1 =\n"<<xm*m1;
    //! xm * m1 =
    //! 6  6  
    //! ( 82  48  -120  -348  -336  396 )
    //! ( 252  318  180  -128  -276  216 )
    //! ( 422  588  480  92  -216  36 )
    //! ( 592  858  780  312  -156  -144 )
    //! ( 762  1128  1080  532  -96  -324 )
    //! ( 932  1398  1380  752  -36  -504 )
    tmv::UpperTriMatrix<double> um(xm);
    std::cout<<"um + m1 =\n"<<um+m1;
    //! um + m1 =
    //! 6  6  
    //! ( 17  14  5  -6  -13  -22 )
    //! ( 20  25  16  1  -8  -17 )
    //! ( 0  24  27  12  -9  -12 )
    //! ( 0  0  24  23  2  -25 )
    //! ( 0  0  0  20  13  -14 )
    //! ( 0  0  0  0  12  -3 )
    tmv::LowerTriMatrix<double> lm(xm);
    lm *= m8;
    std::cout<<"lm *= m8 =\n"<<lm;
    //! lm *= m8 =
    //! 6  6  
    //! ( 9  0  0  0  0  0 )
    //! ( 80  49  0  0  0  0 )
    //! ( 252  192  81  0  0  0 )
    //! ( 534  440  252  81  0  0 )
    //! ( 744  774  500  224  49  0 )
    //! ( 954  1064  782  396  120  9 )
    tmv::DiagMatrix<double> dm(xm);
    m1 *= dm;
    std::cout<<"m1 *= dm =\n"<<m1;
    //! m1 *= dm =
    //! 6  6  
    //! ( 42  84  54  0  0  0 )
    //! ( 60  126  108  18  0  0 )
    //! ( 0  168  162  72  -42  0 )
    //! ( 0  0  216  126  0  -54 )
    //! ( 0  0  0  180  42  -36 )
    //! ( 0  0  0  0  84  -18 )
    return 0;
} catch (tmv::Error& e) {
    std::cerr<<e<<std::endl;
    return 1;
}
Example #19
0
 inline const typename A::BASE_TYPE Norm(const VectorExpression<T2, A> &expr, bool concurrent = false) {
     return std::sqrt(Norm2(expr,concurrent));
 }
Example #20
0
int TAGMFast::MLEGradAscent(const double& Thres, const int& MaxIter, const TStr PlotNm, const double StepAlpha, const double StepBeta) {
  time_t InitTime = time(NULL);
  TExeTm ExeTm, CheckTm;
  int iter = 0, PrevIter = 0;
  TIntFltPrV IterLV;
  TUNGraph::TNodeI UI;
  double PrevL = TFlt::Mn, CurL = 0.0;
  TIntV NIdxV(F.Len(), 0);
  for (int i = 0; i < F.Len(); i++) { NIdxV.Add(i); }
  IAssert(NIdxV.Len() == F.Len());
  TIntFltH GradV;
  while(iter < MaxIter) {
    NIdxV.Shuffle(Rnd);
    for (int ui = 0; ui < F.Len(); ui++, iter++) {
      int u = NIdxV[ui]; //
      //find set of candidate c (we only need to consider c to which a neighbor of u belongs to)
      UI = G->GetNI(u);
      TIntSet CIDSet(5 * UI.GetDeg());
      for (int e = 0; e < UI.GetDeg(); e++) {
        if (HOVIDSV[u].IsKey(UI.GetNbrNId(e))) { continue; }
        TIntFltH& NbhCIDH = F[UI.GetNbrNId(e)];
        for (TIntFltH::TIter CI = NbhCIDH.BegI(); CI < NbhCIDH.EndI(); CI++) {
          CIDSet.AddKey(CI.GetKey());
        }
      }
      for (TIntFltH::TIter CI = F[u].BegI(); CI < F[u].EndI(); CI++) { //remove the community membership which U does not share with its neighbors
        if (! CIDSet.IsKey(CI.GetKey())) {
          DelCom(u, CI.GetKey());
        }
      }
      if (CIDSet.Empty()) { continue; }
      GradientForRow(u, GradV, CIDSet);
      if (Norm2(GradV) < 1e-4) { continue; }
      double LearnRate = GetStepSizeByLineSearch(u, GradV, GradV, StepAlpha, StepBeta);
      if (LearnRate == 0.0) { continue; }
      for (int ci = 0; ci < GradV.Len(); ci++) {
        int CID = GradV.GetKey(ci);
        double Change = LearnRate * GradV.GetDat(CID);
        double NewFuc = GetCom(u, CID) + Change;
        if (NewFuc <= 0.0) {
          DelCom(u, CID);
        } else {
          AddCom(u, CID, NewFuc);
        }
      }
      if (! PlotNm.Empty() && (iter + 1) % G->GetNodes() == 0) {
        IterLV.Add(TIntFltPr(iter, Likelihood(false)));
      }
    }
    printf("\r%d iterations (%f) [%lu sec]", iter, CurL, time(NULL) - InitTime);
    fflush(stdout);
    if (iter - PrevIter >= 2 * G->GetNodes() && iter > 10000) {
      PrevIter = iter;
      CurL = Likelihood();
      if (PrevL > TFlt::Mn && ! PlotNm.Empty()) {
        printf("\r%d iterations, Likelihood: %f, Diff: %f", iter, CurL,  CurL - PrevL);
      }
      fflush(stdout);
      if (CurL - PrevL <= Thres * fabs(PrevL)) { break; }
      else { PrevL = CurL; }
    }
    
  }
  printf("\n");
  printf("MLE for Lambda completed with %d iterations(%s)\n", iter, ExeTm.GetTmStr());
  if (! PlotNm.Empty()) {
    TGnuPlot::PlotValV(IterLV, PlotNm + ".likelihood_Q");
  }
  return iter;
}
Example #21
0
int TAGMFast::MLEGradAscentParallel(const double& Thres, const int& MaxIter, const int ChunkNum, const int ChunkSize, const TStr PlotNm, const double StepAlpha, const double StepBeta) {
  //parallel
  time_t InitTime = time(NULL);
  uint64 StartTm = TSecTm::GetCurTm().GetAbsSecs();
  TExeTm ExeTm, CheckTm;
  double PrevL = Likelihood(true);
  TIntFltPrV IterLV;
  int PrevIter = 0;
  int iter = 0;
  TIntV NIdxV(F.Len(), 0);
  for (int i = 0; i < F.Len(); i++) { NIdxV.Add(i); }
  TIntV NIDOPTV(F.Len()); //check if a node needs optimization or not 1: does not require optimization
  NIDOPTV.PutAll(0);
  TVec<TIntFltH> NewF(ChunkNum * ChunkSize);
  TIntV NewNIDV(ChunkNum * ChunkSize);
  for (iter = 0; iter < MaxIter; iter++) {
    NIdxV.Clr(false);
    for (int i = 0; i < F.Len(); i++) { 
      if (NIDOPTV[i] == 0) {  NIdxV.Add(i); }
    }
    IAssert (NIdxV.Len() <= F.Len());
    NIdxV.Shuffle(Rnd);
    // compute gradient for chunk of nodes
#pragma omp parallel for schedule(static, 1)
    for (int TIdx = 0; TIdx < ChunkNum; TIdx++) {
      TIntFltH GradV;
      for (int ui = TIdx * ChunkSize; ui < (TIdx + 1) * ChunkSize; ui++) {
        NewNIDV[ui] = -1;
        if (ui > NIdxV.Len()) { continue; }
        int u = NIdxV[ui]; //
        //find set of candidate c (we only need to consider c to which a neighbor of u belongs to)
        TUNGraph::TNodeI UI = G->GetNI(u);
        TIntSet CIDSet(5 * UI.GetDeg());
        TIntFltH CurFU = F[u];
        for (int e = 0; e < UI.GetDeg(); e++) {
          if (HOVIDSV[u].IsKey(UI.GetNbrNId(e))) { continue; }
          TIntFltH& NbhCIDH = F[UI.GetNbrNId(e)];
          for (TIntFltH::TIter CI = NbhCIDH.BegI(); CI < NbhCIDH.EndI(); CI++) {
            CIDSet.AddKey(CI.GetKey());
          }
        }
        if (CIDSet.Empty()) { 
          CurFU.Clr();
        }
        else {
          for (TIntFltH::TIter CI = CurFU.BegI(); CI < CurFU.EndI(); CI++) { //remove the community membership which U does not share with its neighbors
            if (! CIDSet.IsKey(CI.GetKey())) {
              CurFU.DelIfKey(CI.GetKey());
            }
          }
          GradientForRow(u, GradV, CIDSet);
          if (Norm2(GradV) < 1e-4) { NIDOPTV[u] = 1; continue; }
          double LearnRate = GetStepSizeByLineSearch(u, GradV, GradV, StepAlpha, StepBeta, 5);
          if (LearnRate <= 1e-5) { NewNIDV[ui] = -2; continue; }
          for (int ci = 0; ci < GradV.Len(); ci++) {
            int CID = GradV.GetKey(ci);
            double Change = LearnRate * GradV.GetDat(CID);
            double NewFuc = CurFU.IsKey(CID)? CurFU.GetDat(CID) + Change : Change;
            if (NewFuc <= 0.0) {
              CurFU.DelIfKey(CID);
            } else {
              CurFU.AddDat(CID) = NewFuc;
            }
          }
          CurFU.Defrag();
        }
        //store changes
        NewF[ui] = CurFU;
        NewNIDV[ui] = u;
      }
    }
    int NumNoChangeGrad = 0;
    int NumNoChangeStepSize = 0;
    for (int ui = 0; ui < NewNIDV.Len(); ui++) {
      int NewNID = NewNIDV[ui];
      if (NewNID == -1) { NumNoChangeGrad++; continue; }
      if (NewNID == -2) { NumNoChangeStepSize++; continue; }
      for (TIntFltH::TIter CI = F[NewNID].BegI(); CI < F[NewNID].EndI(); CI++) {
        SumFV[CI.GetKey()] -= CI.GetDat();
      }
    }
#pragma omp parallel for
    for (int ui = 0; ui < NewNIDV.Len(); ui++) {
      int NewNID = NewNIDV[ui];
      if (NewNID < 0) { continue; }
      F[NewNID] = NewF[ui];
    }
    for (int ui = 0; ui < NewNIDV.Len(); ui++) {
      int NewNID = NewNIDV[ui];
      if (NewNID < 0) { continue; }
      for (TIntFltH::TIter CI = F[NewNID].BegI(); CI < F[NewNID].EndI(); CI++) {
        SumFV[CI.GetKey()] += CI.GetDat();
      }
    }
    // update the nodes who are optimal
    for (int ui = 0; ui < NewNIDV.Len(); ui++) {
      int NewNID = NewNIDV[ui];
      if (NewNID < 0) { continue; }
      TUNGraph::TNodeI UI = G->GetNI(NewNID);
      NIDOPTV[NewNID] = 0;
      for (int e = 0; e < UI.GetDeg(); e++) {
        NIDOPTV[UI.GetNbrNId(e)] = 0;
      }
    }
    int OPTCnt = 0;
    for (int i = 0; i < NIDOPTV.Len(); i++) { if (NIDOPTV[i] == 1) { OPTCnt++; } }
    if (! PlotNm.Empty()) {
      printf("\r%d iterations [%s] %d secs", iter * ChunkSize * ChunkNum, ExeTm.GetTmStr(), int(TSecTm::GetCurTm().GetAbsSecs() - StartTm));
      if (PrevL > TFlt::Mn) { printf(" (%f) %d g %d s %d OPT", PrevL, NumNoChangeGrad, NumNoChangeStepSize, OPTCnt); }
      fflush(stdout);
    }
    if ((iter - PrevIter) * ChunkSize * ChunkNum >= G->GetNodes()) {
      PrevIter = iter;
      double CurL = Likelihood(true);
      IterLV.Add(TIntFltPr(iter * ChunkSize * ChunkNum, CurL));
      printf("\r%d iterations, Likelihood: %f, Diff: %f [%d secs]", iter, CurL,  CurL - PrevL, int(time(NULL) - InitTime));
       fflush(stdout);
      if (CurL - PrevL <= Thres * fabs(PrevL)) { 
        break;
      }
      else {
        PrevL = CurL;
      }
    }
  }
  if (! PlotNm.Empty()) {
    printf("\nMLE completed with %d iterations(%s secs)\n", iter, int(TSecTm::GetCurTm().GetAbsSecs() - StartTm));
    TGnuPlot::PlotValV(IterLV, PlotNm + ".likelihood_Q");[]
  } else {
Example #22
0
Matrix GenSetBase::pllMesh(int P, ColumnVector& xc, ColumnVector& xn, double r) 
  // P : num points we want ( ~ num of processors)
  // xc: the current point; 
  // xn: the "newton" point, tells the dir in which the mesh is grown
  //  r: ||xc-xn||, if known.
  //
  // return matrix M with genset generated at y_k = xc+k(xn-xc)
  // with radius r_k = k^n*r_0, r_0=||xc-xn||/2
  //
  // *** current implementation not optimized for efficiency ***
  //
{

    int k = 0;
    ColumnVector xk; 
    double       rk; 
    Matrix M, A, B, C;

    ColumnVector ns = xn - xc;  // newton step

    int m = Vdim;
    int n = Size;

    // return at least the base point
    M = xn;
    int nump = P-1; 

    //--
    // main loop:
    //--
    if (r <= 0) r = Norm2(ns);
    double r0 = 0.25*r;
    double pert = r0*.05;
    while (nump > 0) {

      // generate points xc + k*newton_step + r^k*d(i), i=1..size()
      ++k;
      xk = xc + k*ns;
      rk = k*r0;
      A = generateAll(xk,rk);

      // perturbation to avoid potential overlaps
      int RAND_HALF = RAND_MAX / 2;
      for (int i=1; i<=n; i++)
	for (int j=1; j<=m; j++) {
	  int sig = (std::rand() > RAND_HALF)? 1 : -1;
	  double amp = std::rand() / RAND_MAX * pert;	
	  A(j,i) = A(j,i) + sig * amp ;
	}

      // after the first iteration want to add xk to M
      if (k>1) {
	B = M | xk;
	M = B;
	--nump;
      }

      // addd to M as many columns as we can afford
      if (nump > n) {
	B = M | A;
      }
      else if (nump>0) { 
	C = A.SubMatrix(1,m,1,nump);
	B = M | C;
      }
      
      M = B;
      nump = nump - n;

    }
    return M;
}
int ExtractVladFeatures(unsigned char* data, int nw, int nh, float *centroids, float* vlad_features)
{
	//printf("here 0\n");
	FILE * f_fea_time = fopen("./result/fea_time.txt","w");
	struct timeval start,end;
	int time_used;
	float *sift_features = (float *)malloc(SIFT_DESCRIPTOR_DIM * MAX_SIFT_FEATURE_NUM * sizeof(float));	// 没初始化
	if(sift_features == NULL)
	{
		printf("Memory overflow error:can't apply sift features memory!\n");
		return -1;
	}
	gettimeofday(&start,NULL);
	int sift_features_num = 0;
	GetSiftFeatures(data, nw, nh, sift_features,&sift_features_num);
	printf("sift_num = %d \n",sift_features_num);
	gettimeofday(&end,NULL);
	time_used = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec;
	time_used /= 1000;
    printf("Get SIFT Feature Time = %d ms\n",time_used);
    fprintf(f_fea_time,"Get SIFT Feature Time = %d ms\n",time_used);
	//printf("here 1\n");

	// char *centroids_file = "data/clust_k64.fvecs";
	// int centroids_dim = 0;
	// float *centroids = (float *)malloc(SIFT_DESCRIPTOR_DIM * VLAD_CENTROIDS_NUM * sizeof(float));
	// ReadFvecs(centroids_file,&centroids_dim,centroids);
	// int cn = 0;
	// for(cn = 0;cn < SIFT_DESCRIPTOR_DIM * VLAD_CENTROIDS_NUM;cn += SIFT_DESCRIPTOR_DIM)
	// {
	// 	Norm2(SIFT_DESCRIPTOR_DIM,centroids + cn);
	// }
	// printf("here 2\n");
	gettimeofday(&start,NULL);
	int *idx = (int *)malloc(sift_features_num * sizeof(int));
	float *dis = (float *)malloc(sift_features_num * sizeof(float));
	memset(idx,0,sift_features_num * sizeof(int));
	memset(dis,0.0,sift_features_num * sizeof(float));
	FindNearestNeighbors(centroids,VLAD_CENTROIDS_NUM,sift_features,sift_features_num,SIFT_DESCRIPTOR_DIM,idx,dis);
	// printf("here 3\n");
	
	// float *vlad_features = (float *)malloc(SIFT_DESCRIPTOR_DIM * VLAD_CENTROIDS_NUM *sizeof(float));
	// memset(vlad_features,0,SIFT_DESCRIPTOR_DIM * VLAD_CENTROIDS_NUM *sizeof(float));
	int i = 0,j = 0;
	for(i = 0;i < sift_features_num;++i)
	{
		for(j = 0;j < SIFT_DESCRIPTOR_DIM;++j)
		{
			vlad_features[idx[i] *  SIFT_DESCRIPTOR_DIM + j] += sift_features[i * SIFT_DESCRIPTOR_DIM + j] -
																centroids[idx[i] * SIFT_DESCRIPTOR_DIM + j];
		}
	}
	// printf("here 4.1\n");
	//exit(1);
	// L2-normalized
	Norm2(SIFT_DESCRIPTOR_DIM * VLAD_CENTROIDS_NUM,vlad_features);	
	gettimeofday(&end,NULL);
	time_used = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec;
	time_used /= 1000;
    printf("SIFT Feature Convert to VLAD Time = %d ms\n",time_used);
    fprintf(f_fea_time,"SIFT Feature Convert to VLAD Time = %d ms\n\n",time_used);
	fclose(f_fea_time);
	// printf("here 4.2\n");
	
	//memcpy(features,vlad_features,INDEX_FEATURE_DIM * sizeof(float));	
	//printf("here 5");
	// fcolse(centroids_file);
	free(idx);
	free(dis);
	// free(centroids);
	free(sift_features);
	// free(vlad_features);
	return 0;
}