Esempio n. 1
0
File: Radii.C Progetto: mindis/ligra
void Compute(graph<vertex>& GA, commandLine P) {
  long n = GA.n;
  intE* radii = newA(intE,n);
  long* Visited = newA(long,n), *NextVisited = newA(long,n);
  {parallel_for(long i=0;i<n;i++) {
    radii[i] = -1;
    Visited[i] = NextVisited[i] = 0;
    }}
  long sampleSize = min(n,(long)64);
  uintE* starts = newA(uintE,sampleSize);
  
  {parallel_for(ulong i=0;i<sampleSize;i++) { //initial set of vertices
      uintE v = hashInt(i) % n;
    radii[v] = 0;
    starts[i] = v;
    NextVisited[v] = (long) 1<<i;
    }}

  vertexSubset Frontier(n,sampleSize,starts); //initial frontier of size 64

  intE round = 0;
  while(!Frontier.isEmpty()){
    round++;
    vertexMap(Frontier, Radii_Vertex_F(Visited,NextVisited));
    vertexSubset output = edgeMap(GA, Frontier, Radii_F(Visited,NextVisited,radii,round),GA.m/20);
    Frontier.del();
    Frontier = output;
  }
  free(Visited); free(NextVisited); Frontier.del(); free(radii); 
}
Vector<T> GaussianElimination<T>::operator()(MatrixBase<T>& A, const Vector<T>& b)
{
  Vector<T> x( b.getSize() );
  
  Matrix<T> newA(A.getNumRows(),A.getNumCols());
  newA = A;
  newA.addColumn(b);
  
  // Forward Elimination
  for(int i=1; i < newA.getNumRows(); i++)
  {
    reduceDown(newA, i);
  }
  // Backward Elimination
  for(int i=newA.getNumRows()-2; i >= 0; i--)
  {
    reduceUp(newA, i);
  }
  // Solve for x
  for(int i=0; i < x.getSize(); i++)
  {
    x[i] = newA(i,newA.getNumCols()-1) / newA(i,i);
  }
  
  return x;
}
Esempio n. 3
0
void TPtrTest::TestLinkedPtr() {
    {
        TLinkedPtr<A> a1(newA());
        TLinkedPtr<A> a2(newA());
        TLinkedPtr<A> a3 = a2;

        a1 = a2;
        a2 = a3;
    }

    UNIT_ASSERT_EQUAL(cnt, 0);
}
Esempio n. 4
0
void TPtrTest::TestSimpleIntrPtr() {
    {
        TSimpleIntrusivePtr<A> a1(newA());
        TSimpleIntrusivePtr<A> a2(newA());
        TSimpleIntrusivePtr<A> a3 = a2;

        a1 = a2;
        a2 = a3;
    }

    UNIT_ASSERT_EQUAL(cnt, 0);
}
Esempio n. 5
0
void TPtrTest::TestCopyPtr() {
    TCopyPtr<A> a1(newA());
    {
        TCopyPtr<A> a2(newA());
        TCopyPtr<A> a3 = a2;
        UNIT_ASSERT_EQUAL(cnt, 3);

        a1 = a2;
        a2 = a3;
    }
    UNIT_ASSERT_EQUAL(cnt, 1);
    a1.Destroy();

    UNIT_ASSERT_EQUAL(cnt, 0);
}
Esempio n. 6
0
void Compute(graph<vertex>& GA, commandLine P) {
  long ratio = P.getOptionLongValue("-r",1000);
  long n = GA.n;
  printf("Nodes: %d Ratio %d\n", n, ratio);

  for(size_t i(0); i < n; ++i) {
     if(i % ratio != 0)
        continue;
     intE* ShortestPathLen = newA(intE,n);
     int* Visited = newA(int,n);

     //initialize ShortestPathLen to "infinity"
     {parallel_for(long i=0;i<n;i++) ShortestPathLen[i] = INT_MAX/2;}
     ShortestPathLen[i] = 0;

     {parallel_for(long i=0;i<n;i++) Visited[i] = 0;}

     vertexSubset Frontier(n,i); //initial frontier

     long round = 0;
     while(!Frontier.isEmpty()){
        if(round == n) {
           //negative weight cycle
           {parallel_for(long i=0;i<n;i++) ShortestPathLen[i] = -(INT_E_MAX/2);}
           break;
        }
        vertexSubset output = edgeMap(GA, Frontier, BF_F(ShortestPathLen,Visited), GA.m/20, DENSE_FORWARD);
        vertexMap(output,BF_Vertex_F(Visited));
        Frontier.del();
        Frontier = output;
        round++;
     } 
Esempio n. 7
0
File: BFSCC.C Progetto: mindis/ligra
void Compute(graph<vertex>& GA, commandLine P) {
  long n = GA.n;
  //creates Parents array, initialized to all -1, except for start
  uintE* Parents = newA(uintE,GA.n);
  parallel_for(long i=0;i<GA.n;i++) Parents[i] = UINT_E_MAX;
  long numVisited = 0;

  for(long i=0;i<n;i++) {
    uintE start = i;
    if(Parents[start] == UINT_E_MAX) {
      Parents[start] = start;
      vertexSubset Frontier(n,start); //creates initial frontier
      long round = 0;
      while(!Frontier.isEmpty()){ //loop until frontier is empty
	round++;
	numVisited+=Frontier.numNonzeros();
	//apply edgemap
	vertexSubset output = edgeMap(GA, Frontier, BFS_F(Parents,start),GA.m/20);    
	Frontier.del();
	Frontier = output; //set new frontier
      } 
      Frontier.del();
      if(numVisited == n) break;
    }
  }
  free(Parents); 
}
Esempio n. 8
0
void TPtrTest::TestHolderPtr() {
    {
        THolder<A> a1(newA());
        THolder<A> a2(a1.Release());
    }

    UNIT_ASSERT_EQUAL(cnt, 0);
}
Esempio n. 9
0
void Compute(graph<vertex>& GA, commandLine P) {
  long n = GA.n;
  uintE* IDs = newA(uintE,n), *prevIDs = newA(uintE,n);
  {parallel_for(long i=0;i<n;i++) IDs[i] = i;} //initialize unique IDs

  bool* frontier = newA(bool,n);
  {parallel_for(long i=0;i<n;i++) frontier[i] = 1;} 
  vertexSubset Frontier(n,n,frontier); //initial frontier contains all vertices
 
  while(!Frontier.isEmpty()){ //iterate until IDS converge
    vertexMap(Frontier,CC_Vertex_F(IDs,prevIDs));
    vertexSubset output = edgeMap(GA, Frontier, CC_F(IDs,prevIDs),GA.m/20);
    Frontier.del();
    Frontier = output;
  }
  Frontier.del(); free(IDs); free(prevIDs);
}
Esempio n. 10
0
void speculative_for(S step, int s, int e, int granularity, 
		     bool hasState=1, int maxTries=-1) {
  if (maxTries < 0) maxTries = 2*granularity;
  int maxRoundSize = (e-s)/granularity+1;
  vindex *I = newA(vindex,maxRoundSize);
  vindex *Ihold = newA(vindex,maxRoundSize);
  bool *keep = newA(bool,maxRoundSize);
  S *state;
  if (hasState) {
    state = newA(S, maxRoundSize);
    for (int i=0; i < maxRoundSize; i++) state[i] = step;
  }

  int round = 0; 
  int numberDone = s; // number of iterations done
  int numberKeep = 0; // number of iterations to carry to next round
  int failed = 0;

  while (numberDone < e) {
    //cout << "numberDone=" << numberDone << endl;
    if (round++ > maxTries) {
//      cerr << "speculativeLoop: too many iterations, increase maxTries parameter\n";
//      abort();
    }
    int size = min(maxRoundSize, e - numberDone);

    if (hasState) {
//      parallel_for (int i =0; i < size; i++) {
      parallel_doall(int, i, 0, size)  {
	if (i >= numberKeep) I[i] = numberDone + i;
	keep[i] = state[i].reserve(I[i]);
      } parallel_doall_end
    } else {
//      parallel_for (int i =0; i < size; i++) {
      parallel_doall(int, i, 0, size)  {
	if (i >= numberKeep) I[i] = numberDone + i;
	keep[i] = step.reserve(I[i]);
      } parallel_doall_end
    }

    if (hasState) {
//      parallel_for (int i =0; i < size; i++) 
      parallel_doall(int, i, 0, size) {
	if (keep[i]) keep[i] = !state[i].commit(I[i]);
      } parallel_doall_end
    } else {
Esempio n. 11
0
void TPtrTest::TestTrulePtr() {
    {
        TAutoPtr<A> a1(newA());
        TAutoPtr<A> a2(a1);
        a1 = a2;
    }

    UNIT_ASSERT_EQUAL(cnt, 0);
}
Esempio n. 12
0
void Compute(graph<vertex>& GA, commandLine P) {
  long n = GA.n, m = GA.m;
  uintE* IDs = newA(uintE,n), *prevIDs = newA(uintE,n);
  {parallel_for(long i=0;i<n;i++) {prevIDs[i] = i; IDs[i] = i;}} //initialize unique IDs

  bool* all = newA(bool,n);
  {parallel_for(long i=0;i<n;i++) all[i] = 1;} 
  vertexSubset All(n,n,all); //all vertices
  bool* active = newA(bool,n);
  {parallel_for(long i=0;i<n;i++) active[i] = 1;} 
  vertexSubset Active(n,n,active); //initial frontier contains all vertices

  while(!Active.isEmpty()){ //iterate until IDS converge
    edgeMap(GA, Active, CC_F(IDs,prevIDs),m/20,no_output);
    vertexSubset output = vertexFilter(All,CC_Shortcut(IDs,prevIDs));
    Active.del();
    Active = output;
  }
  Active.del(); All.del(); free(IDs); free(prevIDs);
}
Esempio n. 13
0
void Compute(graph<vertex>& GA, commandLine P) {
  long start = P.getOptionLongValue("-r",0);
  long n = GA.n;
  //initialize ShortestPathLen to "infinity"
  intE* ShortestPathLen = newA(intE,n);
  {parallel_for(long i=0;i<n;i++) ShortestPathLen[i] = INT_MAX/2;}
  ShortestPathLen[start] = 0;

  int* Visited = newA(int,n);
  {parallel_for(long i=0;i<n;i++) Visited[i] = 0;}

  vertexSubset Frontier(n,start); //initial frontier

  long round = 0;
  while(!Frontier.isEmpty()){
    if(round == n) {
      //negative weight cycle
      {parallel_for(long i=0;i<n;i++) ShortestPathLen[i] = -(INT_E_MAX/2);}
      break;
    }
Esempio n. 14
0
void Compute(graph<vertex>& GA, commandLine P) {
  long start = P.getOptionLongValue("-r",0);
  long n = GA.n;
  //creates Parents array, initialized to all -1, except for start
  uintE* Parents = newA(uintE,n);
  parallel_for(long i=0;i<n;i++) Parents[i] = UINT_E_MAX;
  Parents[start] = start;
  //create bitvector to mark visited vertices
  long numWords = (n+63)/64;
  long* Visited = newA(long,numWords);
  {parallel_for(long i=0;i<numWords;i++) Visited[i] = 0;}
  Visited[start/64] = (long)1 << (start % 64);
  vertexSubset Frontier(n,start); //creates initial frontier

  while(!Frontier.isEmpty()){ //loop until frontier is empty
    vertexSubset output = edgeMap(GA, Frontier, BFS_F(Parents,Visited),GA.m/20);    
    Frontier.del();
    Frontier = output; //set new frontier
  } 
  Frontier.del();
  free(Parents); free(Visited);
}
Esempio n. 15
0
void Compute(graph<vertex>& GA, commandLine P) {
  t10.start();
  char* oFile = P.getOptionValue("-out"); //file to write eccentricites
  srand (time(NULL));
  uintT seed = rand();
  cout << "seed = " << seed << endl;
  t0.start();
  long n = GA.n;
  uintE* ecc = newA(uintE,n);
  {parallel_for(long i=0;i<n;i++) ecc[i] = UINT_E_MAX;}
  t0.stop();

  //BEGIN COMPUTE CONNECTED COMPONENTS
  t1.start();
  intE* Labels = newA(intE,n);
  {parallel_for(long i=0;i<n;i++) {
    if(GA.V[i].getOutDegree() == 0) Labels[i] = -i-1; //singletons
    else Labels[i] = INT_E_MAX;
    }}
  //get max degree vertex
  uintE maxV = sequence::reduce<uintE>((intE)0,(intE)n,maxF<intE>(),getDegree<vertex>(GA.V));
  //visit large component with BFS
  CCBFS(maxV,GA,Labels);
  //visit small components with label propagation
  Components(GA, Labels);
  //sort by component ID
  intPair* CCpairs = newA(intPair,n);
  {parallel_for(long i=0;i<n;i++)
    if(Labels[i] < 0)
      CCpairs[i] = make_pair(-Labels[i]-1,i);
    else CCpairs[i] = make_pair(Labels[i],i);
  }
  free(Labels);

  intSort::iSort(CCpairs, n, n+1, firstF<uintE,uintE>());

  uintE* changes = newA(uintE,n);
  changes[0] = 0;
  {parallel_for(long i=1;i<n;i++) 
      changes[i] = (CCpairs[i].first != CCpairs[i-1].first) ? i : UINT_E_MAX;}

  uintE* CCoffsets = newA(uintE,n);
  uintE numCC = sequence::filter(changes, CCoffsets, n, nonMaxF());
  CCoffsets[numCC] = n;
  free(changes);
  t1.stop();
  //END COMPUTE CONNECTED COMPONENTS

  uintE maxS = min((uintE)n,(uintE)sqrt(n*log2(n)));
  uintE maxSampleSize = max((uintE)10,max((uintE)((n/maxS)*log2(n)),maxS));
  //data structures to be shared by all components
  uintE** Dists = newA(uintE*,maxSampleSize);
  uintE* Dist = newA(uintE,maxSampleSize*n);
  {parallel_for(long i=0;i<maxSampleSize;i++) Dists[i] = Dist+i*n;}
  {parallel_for(long i=0;i<n*maxSampleSize;i++) Dist[i] = UINT_E_MAX;}
  intPair* wDist = newA(intPair,n); 
  {parallel_for(long i=0;i<n;i++)
      wDist[i] = make_pair(UINT_E_MAX,UINT_E_MAX);}
  intPair* minDists = newA(intPair,n);
  uintE* starts = newA(uintE,n);
  uintE* starts2 = newA(uintE,n);
  uintE* maxDists = newA(uintE,n);

  //BEGIN COMPUTE ECCENTRICITES PER COMPONENT
  t4.start();
  for(long k = 0; k < numCC; k++) {
    uintE o = CCoffsets[k];
    uintE CCsize = CCoffsets[k+1] - o;
    if(CCsize == 1) ecc[CCpairs[o].second] = 0; //singletons have ecc of 0
    if(CCsize == 2) { //size 2 CC's have ecc of 1
      ecc[CCpairs[o].second] = ecc[CCpairs[o+1].second] = 1;
    } else if(CCsize > 1) {
      //do main computation
      t2.start();
      uintE s = min(CCsize,(uintE)sqrt(CCsize*log2(CCsize)));
      //pick sample of about \sqrt{n\log n} vertices
      long sampleSize = min(CCsize,max((uintE)10,(uintE)((CCsize/s)*log2(CCsize))));
      //pick random vertices
      {parallel_for(ulong i=0;i<CCsize;i++) {
	  //pick with probability sampleSize/CCsize
	  uintT index = hash(i+seed) % CCsize; 
	if(index < sampleSize) starts[i] = CCpairs[o+i].second;
	else starts[i] = UINT_E_MAX;
       	}}
      //pack down
      uintE numUnique = sequence::filter(starts,starts2,CCsize,nonMaxF());
      //sample cannot be empty!
      if(numUnique == 0) { starts2[0] = CCpairs[o+(hash(seed)%CCsize)].second; numUnique++; }
      if(numUnique > maxSampleSize) numUnique = maxSampleSize; //cap at maxSampleSize
      t2.stop();
      t3.start();
      //execute BFS per sample
      {for(long i=0;i<numUnique;i++) {
	uintE v = starts2[i];
	Dists[i][v] = 0; //set source dist to 0
	vertexSubset Frontier(n,v);
	uintE round = 0;
	while(!Frontier.isEmpty()){
	  round++;
	  vertexSubset output = 
	    edgeMap(GA, Frontier, BFS_F(Dists[i],round),GA.m/20);
	  Frontier.del();
	  Frontier = output;
	}
	Frontier.del();
	ecc[v] = round-1; //set radius for sample vertex
	}}
      t3.stop();
      t4.start();
      //store max distance from sample for each vertex so that we can
      //reuse Distance arrays
      {parallel_for(long i=0;i<CCsize;i++) {
	uintE v = CCpairs[o+i].second;
	//if not one of the vertices we did BFS on
	if(ecc[v] == UINT_E_MAX) {
	  uintE max_from_sample = 0;
	  //compute max distance from sampled vertex
	  for(long j=0;j<numUnique;j++) {
	    uintE d = Dists[j][v];
	    if(d > max_from_sample) max_from_sample = d;
	  }
	  maxDists[i] = max_from_sample;
	}}}
      t4.stop();
      t5.start();
      //find furthest vertex from sample set S
      {parallel_for(long j=0;j<CCsize;j++) {
	uintE v = CCpairs[o+j].second;
	uintE m = UINT_E_MAX;
	for(long i=0;i<numUnique;i++) {
	  uintE d = Dists[i][v];
	  if(d < m) m = d;
	  if(d == 0) break;
	}
	minDists[j] = make_pair(m,v);
	}}
      
      intPair furthest = 
	sequence::reduce<intPair>(minDists,(intE)CCsize,maxFirstF());
      uintE w = furthest.second;
      t5.stop();
      t3.start();
      //reset Dist array entries
      {parallel_for(long i=0;i<numUnique;i++) {
	  parallel_for(long j=0;j<CCsize;j++) {
	    uintE v = CCpairs[o+j].second;
	    Dists[i][v] = UINT_E_MAX;
	  }
	}}
      t3.stop();
      t6.start();
      //execute BFS from w and find \sqrt{n log n} neighborhood of w
      uintE nghSize = min(CCsize,max((uintE)10,s));
      uintE* Ngh_s = starts; //reuse starts array
      bool filled_Ngh = 0;
      //stores distance from w and index of closest vertex in Ngh_s on
      //path from w to v
      wDist[w] = make_pair(0,0); //set source dist to 0
      vertexSubset Frontier(n,w);
      uintE round = 0;
      uintE numVisited = 0;
      while(!Frontier.isEmpty()){
	round++;
	if(!filled_Ngh) { 
	  Frontier.toSparse();
	  //Note: if frontier size < nghSize - visited, there is non-determinism in which vertices 
	  //get added to Ngh_s as the ordering of vertices on the frontier is non-deterministic
	  {parallel_for(long i=0;i<min(nghSize-numVisited,(uintE)Frontier.numNonzeros());i++) {
	    Ngh_s[numVisited+i] = Frontier.s[i];
	    wDist[Frontier.s[i]].second = numVisited+i;
	  }
	  numVisited += Frontier.numNonzeros();
	  if(numVisited >= nghSize) filled_Ngh = 1;
	  }}
	vertexSubset output = 
	  edgeMap(GA, Frontier, BFS_Pair_F(wDist,round),GA.m/20);
	Frontier.del();
	Frontier = output;
      }
      Frontier.del();
      ecc[w] = round-1; //set radius for w
      t6.stop();
      t7.start();
      //execute BFS from each vertex in neighborhood of w
      uintE** Dists2 = Dists; //reuse distance array
      uintE* Dist2 = Dist;
    
      {for(long i=0;i<nghSize;i++) {
	uintE v = Ngh_s[i];
	Dists2[i][v] = 0; //set source dist to 0
	vertexSubset Frontier(n,v);
	uintE round = 0;
	while(!Frontier.isEmpty()){
	  round++;
	  vertexSubset output = 
	    edgeMap(GA, Frontier, BFS_F(Dists2[i],round),GA.m/20);
	  Frontier.del();
	  Frontier = output;
	}
	Frontier.del();
	ecc[v] = round-1; //set radius of vertex in Ngh_s
	}}
      t7.stop();
      t8.start();
      //min radius of sample
      parallel_for(long i=0;i<numUnique;i++) starts2[i] = ecc[starts2[i]];
      uintE min_r_sample = 
	sequence::reduce<uintE>(starts2,numUnique,minF<uintE>());
      //compute ecc values
      {parallel_for(long i=0;i<CCsize;i++) {
	uintE v = CCpairs[o+i].second;
	//if not one of the vertices we did BFS on
	if(ecc[v] == UINT_E_MAX) {
	  uintE d_vw = wDist[v].first;
	  uintE rv = max(maxDists[i],d_vw);
	  //index in Ngh_s of closest vertex in Ngh_s on path from w to v
	  uintE index_vt = wDist[v].second;
	  uintE vt = Ngh_s[index_vt];
	  uintE d_vt_v = Dists2[index_vt][v];
	  uintE d_vt_w = Dists2[index_vt][w];
	  if(d_vt_v <= d_vt_w) ecc[v] = max(rv,ecc[vt]);
	  else ecc[v] = max(rv,min_r_sample);
	}
	}}
      t8.stop();
      t7.start();
      //reset Dist array entries
      {parallel_for(long i=0;i<nghSize;i++) {
	  parallel_for(long j=0;j<CCsize;j++) {
	    uintE v = CCpairs[o+j].second;
	    Dists2[i][v] = UINT_E_MAX;
	  }
	}}
      t7.stop();
      t6.start();
      //reset wDist array entries
      {parallel_for(long i=0;i<CCsize;i++) {
	  uintE v = CCpairs[o+i].second;
	  wDist[v] = make_pair(UINT_E_MAX,UINT_E_MAX);
	}}
      t6.stop();
    }
Esempio n. 16
0
void Compute(graph<vertex>& GA, commandLine P) {
  t1.start();
  long start = P.getOptionLongValue("-r",0);
  if(GA.V[start].getOutDegree() == 0) { 
    cout << "starting vertex has degree 0" << endl;
    return;
  }
  const int procs = P.getOptionIntValue("-p",0);
  if(procs > 0) setWorkers(procs);
  const double t = P.getOptionDoubleValue("-t",3);
  const double epsilon = P.getOptionDoubleValue("-e",0.000000001);
  const uintE N = P.getOptionIntValue("-N",1);
  const intE n = GA.n;
  const double constant = exp(t)*epsilon/(2*(double)N);
  double* psis = newA(double,N);
  double* fact = newA(double,N);
  fact[0] = 1;
  for(long k=1;k<N;k++) fact[k] = k*fact[k-1];
  double* tm = newA(double,N);
  {parallel_for(long m=0;m<N;m++) tm[m]  = pow(t,m);}
  {parallel_for(long k=0;k<N;k++) {
    psis[k] = 0;
    for(long m=0;m<N-k;m++)
      psis[k] += fact[k]*tm[m]/(double)fact[m+k];
    }}

  sparseAdditiveSet<float> x = sparseAdditiveSet<float>(10000,1,0.0);
  sparseAdditiveSet<float> r = sparseAdditiveSet<float>(2,1,0.0);
  x.insert(make_pair(start,0.0));
  r.insert(make_pair(start,1.0));
  vertexSubset Frontier(n,start);

  long j = 0, totalPushes = 0;
  while(Frontier.numNonzeros() > 0){
    totalPushes += Frontier.numNonzeros();
    uintT* Degrees = newA(uintT,Frontier.numNonzeros());
    {parallel_for(long i=0;i<Frontier.numNonzeros();i++) Degrees[i] = GA.V[Frontier.s[i]].getOutDegree();}
    long totalDegree = sequence::plusReduce(Degrees,Frontier.numNonzeros());
    free(Degrees);
    if(j+1 < N) {
      long rCount = r.count();
      //make bigger hash table initialized to 0.0's
      sparseAdditiveSet<float> new_r = sparseAdditiveSet<float>(max(100L,min((long)n,totalDegree+rCount)),LOAD_FACTOR,0.0); 
      vertexMap(Frontier,Local_Update(x,r));
      vertexSubset output = edgeMap(GA, Frontier, HK_F<vertex>(x,r,new_r,GA.V,t/(double)(j+1)));
      r.del(); 
      r = new_r;
      if(x.m < ((uintT) 1 << log2RoundUp((uintT)(LOAD_FACTOR*min((long)n,rCount+output.numNonzeros()))))) {
	sparseAdditiveSet<float> new_x = sparseAdditiveSet<float>(LOAD_FACTOR*min((long)n,rCount+output.numNonzeros()),LOAD_FACTOR,0.0); //make bigger hash table
	new_x.copy(x);
	x.del();
	x = new_x;
      }
      output.del();

      //compute active set (faster in practice to just scan over r)
      _seq<ACLpair> vals = r.entries(activeF<vertex>(GA.V,constant/psis[j+1]));
      uintE* Active = newA(uintE,vals.n);
      parallel_for(long i=0;i<vals.n;i++) Active[i] = vals.A[i].first;
      Frontier.del(); vals.del();
      Frontier = vertexSubset(n,vals.n,Active);
      j++;
    } else { //last iteration
Esempio n. 17
0
void Compute(graph<vertex>& GA, commandLine P) {
  t5.start();
  long length = P.getOptionLongValue("-r",0); //number of words per vertex
  char* oFile = P.getOptionValue("-out"); //file to write eccentricites
  srand (time(NULL));
  uintT seed = rand();
  cout << "seed = " << seed << endl;
  t0.start();
  long n = GA.n;
  
  uintE* ecc = newA(uintE,n);
  uintE* ecc2 = newA(uintE,n);
  {parallel_for(long i=0;i<n;i++) {
      ecc[i] = ecc2[i] = 0;
    }}

  t0.stop();

  //BEGIN COMPUTE CONNECTED COMPONENTS
  t1.start();
  intE* Labels = newA(intE,n);
  {parallel_for(long i=0;i<n;i++) {
    if(GA.V[i].getOutDegree() == 0) Labels[i] = -i-1; //singletons
    else Labels[i] = INT_E_MAX;
    }}

  //get max degree vertex
  uintE maxV = sequence::reduce<uintE>((intE)0,(intE)n,maxF<intE>(),getDegree<vertex>(GA.V));

  //visit large component with BFS
  CCBFS(maxV,GA,Labels);
  //visit small components with label propagation
  Components(GA, Labels);

  //sort by component ID
  intPair* CCpairs = newA(intPair,n);
  {parallel_for(long i=0;i<n;i++)
    if(Labels[i] < 0)
      CCpairs[i] = make_pair(-Labels[i]-1,i);
    else CCpairs[i] = make_pair(Labels[i],i);
  }
  free(Labels);

  intSort::iSort(CCpairs, n, n+1,firstF<uintE,uintE>());

  uintE* changes = newA(uintE,n);
  changes[0] = 0;
  {parallel_for(long i=1;i<n;i++) 
      changes[i] = (CCpairs[i].first != CCpairs[i-1].first) ? i : UINT_E_MAX;}

  uintE* CCoffsets = newA(uintE,n);
  uintE numCC = sequence::filter(changes, CCoffsets, n, nonMaxF());
  CCoffsets[numCC] = n;
  free(changes);
  t1.stop();
  //END COMPUTE CONNECTED COMPONENTS

  //init data structures
  t0.start();
  length = max((long)1,min((n+63)/64,(long)length));
  long* VisitedArray = newA(long,n*length);
  long* NextVisitedArray = newA(long,n*length); 
  int* flags = newA(int,n);
  {parallel_for(long i=0;i<n;i++) flags[i] = -1;}
  uintE* starts = newA(uintE,n);
  intPair* pairs = newA(intPair,n);
  t0.stop();

  //BEGIN COMPUTE ECCENTRICITES PER COMPONENT
  for(long k = 0; k < numCC; k++) {
    t2.start();
    uintE o = CCoffsets[k];
    uintE CCsize = CCoffsets[k+1] - o;
    if(CCsize == 2) { //size 2 CC's have ecc of 1
      ecc[CCpairs[o].second] = ecc[CCpairs[o+1].second] = 1;
      t2.stop();
    } else if(CCsize > 1) { //size 1 CC's already have ecc of 0
      //do main computation
      long myLength = min((long)length,((long)CCsize+63)/64);

      //initialize bit vectors for component vertices
      {parallel_for(long i=0;i<CCsize;i++) {
	uintT v = CCpairs[o+i].second;
	parallel_for(long j=0;j<myLength;j++)
	  VisitedArray[v*myLength+j] = NextVisitedArray[v*myLength+j] = 0;
	}}

      long sampleSize = min((long)CCsize,(long)64*myLength);

      uintE* starts2 = newA(uintE,sampleSize);

      //pick random vertices (could have duplicates)
      {parallel_for(ulong i=0;i<sampleSize;i++) {
	uintT index = hashInt(i+seed) % CCsize;
	if(flags[index] == -1 && CAS(&flags[index],-1,(int)i)) {
	  starts[i] = CCpairs[o+index].second;
	  NextVisitedArray[CCpairs[o+index].second*myLength + i/64] = (long) 1<<(i%64);
	} else starts[i] = UINT_E_MAX;
	}}

      //remove duplicates
      uintE numUnique = sequence::filter(starts,starts2,sampleSize,nonMaxF());

      //reset flags
      parallel_for(ulong i=0;i<sampleSize;i++) {
	uintT index = hashInt(i+seed) % CCsize;
	if(flags[index] == i) flags[index] = -1;
      }

      //first phase
      vertexSubset Frontier(n,numUnique,starts2); //initial frontier
      //note: starts2 will be freed inside the following loop
      uintE round = 0;
      while(!Frontier.isEmpty()){
	round++;
	vertexMap(Frontier, Ecc_Vertex_F(myLength,VisitedArray,NextVisitedArray));
	vertexSubset output = 
	  edgeMap(GA, Frontier, 
		  Ecc_F(myLength,VisitedArray,NextVisitedArray,ecc,round),
		  GA.m/20);
	Frontier.del();
	Frontier = output;
      }
      Frontier.del();
      t2.stop();
      //second phase if size of CC > 64
      if(CCsize > 1024) {
	//sort by ecc
	t3.start();
	{parallel_for(long i=0;i<CCsize;i++) {
	  pairs[i] = make_pair(ecc[CCpairs[o+i].second],CCpairs[o+i].second);
	  }}
	intPair maxR = sequence::reduce(pairs,CCsize,maxFirstF());
	intSort::iSort(pairs, CCsize, 1+maxR.first, firstF<uintE,uintE>());
	t3.stop();

	t4.start();

	//reset bit vectors for component vertices
	{parallel_for(long i=0;i<CCsize;i++) {
	  uintT v = CCpairs[o+i].second;
	  parallel_for(long j=0;j<myLength;j++)
	    VisitedArray[v*myLength+j] = NextVisitedArray[v*myLength+j] = 0;
	  }}

	starts2 = newA(uintE,sampleSize);
	//pick starting points with highest ecc ("fringe" vertices)
	{parallel_for(long i=0;i<sampleSize;i++) {
	  intE v = pairs[CCsize-i-1].second;
	  starts2[i] = v;
	  NextVisitedArray[v*myLength + i/64] = (long) 1<<(i%64);
	  }}

	vertexSubset Frontier2(n,sampleSize,starts2); //initial frontier
	//note: starts2 will be freed inside the following loop
	round = 0;
	while(!Frontier2.isEmpty()){
	  round++;
	  vertexMap(Frontier2, Ecc_Vertex_F(myLength,VisitedArray,NextVisitedArray));
	  vertexSubset output = 
	    edgeMap(GA, Frontier2,Ecc_F(myLength,VisitedArray,NextVisitedArray,ecc2,round), GA.m/20);
	  Frontier2.del();
	  Frontier2 = output;
	}
	Frontier2.del();
	{parallel_for(long i=0;i<n;i++) ecc[i] = max(ecc[i],ecc2[i]);}
	t4.stop();
      }
    }