int diagonalize_bisection(localized_matrix<double, MATRIX_MAJOR>& mata, localized_matrix<double, MATRIX_MAJOR>& matb,
			  double* eigvals,
			  rokko::parameters const& params, timer& timer) {
  rokko::parameters params_out;
  char jobz = 'N';  // only eigenvalues
  int dim = mata.innerSize();
  int lda = mata.outerSize();
  int ldb = matb.outerSize();
  lapack_int m;  // output: found eigenvalues
  double abstol;
  get_key(params, "abstol", abstol);
  if (abstol < 0) {
    std::cerr << "Error in diagonalize_bisection" << std::endl
	      << "abstol is negative value, which means QR method." << std::endl
	      << "To use dsygvx as bisection solver, set abstol a positive value" << std::endl;
    throw;
  }
  if (!params.defined("abstol")) {  // default: optimal value for bisection method
    abstol = 2 * LAPACKE_dlamch('S');
  }
  params_out.set("abstol", abstol);
  char uplow = get_matrix_part(params);

  lapack_int il, iu;
  double vl, vu;
  char range = get_eigenvalues_range(params, vl, vu, il, iu);

  std::vector<lapack_int> ifail(dim);
  timer.start(timer_id::diagonalize_diagonalize);
  int info;
  if(mata.is_col_major())
    info = LAPACKE_dsygvx(LAPACK_COL_MAJOR, 1, jobz, range, uplow, dim,
			  &mata(0,0), lda, &matb(0,0), ldb, vl, vu, il, iu,
			  abstol, &m, eigvals, NULL, lda, &ifail[0]);
  else
    info = LAPACKE_dsygvx(LAPACK_ROW_MAJOR, 1, jobz, range, uplow, dim,
			  &mata(0,0), lda, &matb(0,0), ldb, vl, vu, il, iu,
			  abstol, &m, eigvals, NULL, lda, &ifail[0]);
  timer.stop(timer_id::diagonalize_diagonalize);
  timer.start(timer_id::diagonalize_finalize);
  if (info) {
    std::cerr << "error at dsygvx function. info=" << info << std::endl;
    if (info < 0) {
      std::cerr << "This means that ";
      std::cerr << "the " << abs(info) << "-th argument had an illegal value." << std::endl;
    }
    exit(1);
  }
  params_out.set("m", m);
  params_out.set("ifail", ifail);
  
  if (params.get_bool("verbose")) {
    print_verbose("dsygvx (bisection)", jobz, range, uplow, vl, vu, il, iu, params_out);
  }
  timer.stop(timer_id::diagonalize_finalize);
  return info;
}
Example #2
0
void push(Queue& queue, size_t n, timer& t)
{
    t.restart();
    for(size_t i=0; i<n; i++)
        queue.push();
    t.stop();
}
Example #3
0
int main()
{

	vector<recorder<timer> > stats(2);

	for(int i = 0; i < 2; i++)
		stats[i].reset();

	int size = 1000000;
	Resistive gh(6);
	//for(int i =0; i < size; i++)
	//	gh.addEdge(i, i+1, i+1);

	//for(int i = 0; i < size; i++)
	//	gh.addEdge(0, 1, i+1);

//	for(int i = 0; i < size; i++) {
//		for(int jazz = 0; jazz < 1000; jazz++)
//			gh.addEdge(i, i+1, 100);
//	}

	gh.addEdge(0, 1, 2);	
   	gh.addEdge(0, 1, 8);	
   	gh.addEdge(0, 5, 10);	
   	gh.addEdge(0, 2, 20);	
   	gh.addEdge(0, 2, 30);	
   	gh.addEdge(0, 2, 60);	
   	gh.addEdge(1, 3, 9);	
   	gh.addEdge(1, 3, 6);	
   	gh.addEdge(2, 4, 90);	
   	gh.addEdge(3, 5, 3);	
   	gh.addEdge(4, 5, 3);	
   	gh.addEdge(4, 5, 100);	
   	gh.addEdge(4, 5, 15);	
	
    gh.printGraph();

	timer1.restart();
	double ans = gh.traverse(0, 5);
   	timer1.stop();	
	stats[0].record(timer1);
   	cout << "-----------------" << endl; 
    
	gh.printGraph();
 
    // print the adjacency list representation of the above graph
	cout << endl << "TIME: ";
	stats[0].report(cout);

	cout << endl << ans << endl;
	double test = 0;
	for(int n = 1; n <= size; n++)
		test += n;
 
    return 0;
}
void Compute(graph<vertex>& GA, commandLine P) {
  t1.start();
  long start = P.getOptionLongValue("-r",0);
  if(GA.V[start].getOutDegree() == 0) { 
    cout << "starting vertex has degree 0" << endl;
    return;
  }
  const uintE K = P.getOptionIntValue("-K",10);
  const uintE N = P.getOptionIntValue("-N",10);
  const double t = P.getOptionDoubleValue("-t",3);
  srand (time(NULL));
  uintE seed = rand();
  const intE n = GA.n;

  //walk length probabilities
  double* fact = newA(double,K);
  fact[0] = 1;
  for(long k=1;k<K;k++) fact[k] = k*fact[k-1];
  double* probs = newA(double,K);
  for(long k=0;k<K;k++) probs[k] = exp(-t)*pow(t,k)/fact[k];

  unordered_map<uintE,double> p;
  for(long i=0;i<N;i++) {
    double randDouble = (double) hashInt(seed++) / UINT_E_MAX;
    long j = 0;
    double mass = 0;
    uintE x = start;
    do {
      mass += probs[j];
      if(randDouble < mass) break;
      x = walk(x,GA.V,seed++);
      j++;
    } while(j <= K);
    p[x]++;
  }
  for(auto it=p.begin();it!=p.end();it++) {
    p[it->first] /= N;
  }

  free(probs); free(fact);
  t1.stop();
  pairIF* A = newA(pairIF,p.size());

  long numNonzerosQ = 0;
  for(auto it = p.begin(); it != p.end(); it++) {
    A[numNonzerosQ++] = make_pair(it->first,it->second);
  }
  sweepObject sweep = sweepCut(GA,A,numNonzerosQ,start);
  free(A);
  cout << "number of vertices touched = " << p.size() << endl;
  cout << "number of edges touched = " << sweep.vol << endl;
  cout << "conductance = " << sweep.conductance << " |S| = " << sweep.sizeS << " vol(S) = " << sweep.volS << " edgesCrossing = " << sweep.edgesCrossing << endl; 
  t1.reportTotal("computation time");
}
Example #5
0
void Compute(graph<vertex>& GA, commandLine P) {
  t10.start();
  char* oFile = P.getOptionValue("-out"); //file to write eccentricites
  srand (time(NULL));
  uintT seed = rand();
  cout << "seed = " << seed << endl;
  t0.start();
  long n = GA.n;
  uintE* ecc = newA(uintE,n);
  {parallel_for(long i=0;i<n;i++) ecc[i] = UINT_E_MAX;}
  t0.stop();

  //BEGIN COMPUTE CONNECTED COMPONENTS
  t1.start();
  intE* Labels = newA(intE,n);
  {parallel_for(long i=0;i<n;i++) {
    if(GA.V[i].getOutDegree() == 0) Labels[i] = -i-1; //singletons
    else Labels[i] = INT_E_MAX;
    }}
  //get max degree vertex
  uintE maxV = sequence::reduce<uintE>((intE)0,(intE)n,maxF<intE>(),getDegree<vertex>(GA.V));
  //visit large component with BFS
  CCBFS(maxV,GA,Labels);
  //visit small components with label propagation
  Components(GA, Labels);
  //sort by component ID
  intPair* CCpairs = newA(intPair,n);
  {parallel_for(long i=0;i<n;i++)
    if(Labels[i] < 0)
      CCpairs[i] = make_pair(-Labels[i]-1,i);
    else CCpairs[i] = make_pair(Labels[i],i);
  }
  free(Labels);

  intSort::iSort(CCpairs, n, n+1, firstF<uintE,uintE>());

  uintE* changes = newA(uintE,n);
  changes[0] = 0;
  {parallel_for(long i=1;i<n;i++) 
      changes[i] = (CCpairs[i].first != CCpairs[i-1].first) ? i : UINT_E_MAX;}

  uintE* CCoffsets = newA(uintE,n);
  uintE numCC = sequence::filter(changes, CCoffsets, n, nonMaxF());
  CCoffsets[numCC] = n;
  free(changes);
  t1.stop();
  //END COMPUTE CONNECTED COMPONENTS

  uintE maxS = min((uintE)n,(uintE)sqrt(n*log2(n)));
  uintE maxSampleSize = max((uintE)10,max((uintE)((n/maxS)*log2(n)),maxS));
  //data structures to be shared by all components
  uintE** Dists = newA(uintE*,maxSampleSize);
  uintE* Dist = newA(uintE,maxSampleSize*n);
  {parallel_for(long i=0;i<maxSampleSize;i++) Dists[i] = Dist+i*n;}
  {parallel_for(long i=0;i<n*maxSampleSize;i++) Dist[i] = UINT_E_MAX;}
  intPair* wDist = newA(intPair,n); 
  {parallel_for(long i=0;i<n;i++)
      wDist[i] = make_pair(UINT_E_MAX,UINT_E_MAX);}
  intPair* minDists = newA(intPair,n);
  uintE* starts = newA(uintE,n);
  uintE* starts2 = newA(uintE,n);
  uintE* maxDists = newA(uintE,n);

  //BEGIN COMPUTE ECCENTRICITES PER COMPONENT
  t4.start();
  for(long k = 0; k < numCC; k++) {
    uintE o = CCoffsets[k];
    uintE CCsize = CCoffsets[k+1] - o;
    if(CCsize == 1) ecc[CCpairs[o].second] = 0; //singletons have ecc of 0
    if(CCsize == 2) { //size 2 CC's have ecc of 1
      ecc[CCpairs[o].second] = ecc[CCpairs[o+1].second] = 1;
    } else if(CCsize > 1) {
      //do main computation
      t2.start();
      uintE s = min(CCsize,(uintE)sqrt(CCsize*log2(CCsize)));
      //pick sample of about \sqrt{n\log n} vertices
      long sampleSize = min(CCsize,max((uintE)10,(uintE)((CCsize/s)*log2(CCsize))));
      //pick random vertices
      {parallel_for(ulong i=0;i<CCsize;i++) {
	  //pick with probability sampleSize/CCsize
	  uintT index = hash(i+seed) % CCsize; 
	if(index < sampleSize) starts[i] = CCpairs[o+i].second;
	else starts[i] = UINT_E_MAX;
       	}}
      //pack down
      uintE numUnique = sequence::filter(starts,starts2,CCsize,nonMaxF());
      //sample cannot be empty!
      if(numUnique == 0) { starts2[0] = CCpairs[o+(hash(seed)%CCsize)].second; numUnique++; }
      if(numUnique > maxSampleSize) numUnique = maxSampleSize; //cap at maxSampleSize
      t2.stop();
      t3.start();
      //execute BFS per sample
      {for(long i=0;i<numUnique;i++) {
	uintE v = starts2[i];
	Dists[i][v] = 0; //set source dist to 0
	vertexSubset Frontier(n,v);
	uintE round = 0;
	while(!Frontier.isEmpty()){
	  round++;
	  vertexSubset output = 
	    edgeMap(GA, Frontier, BFS_F(Dists[i],round),GA.m/20);
	  Frontier.del();
	  Frontier = output;
	}
	Frontier.del();
	ecc[v] = round-1; //set radius for sample vertex
	}}
      t3.stop();
      t4.start();
      //store max distance from sample for each vertex so that we can
      //reuse Distance arrays
      {parallel_for(long i=0;i<CCsize;i++) {
	uintE v = CCpairs[o+i].second;
	//if not one of the vertices we did BFS on
	if(ecc[v] == UINT_E_MAX) {
	  uintE max_from_sample = 0;
	  //compute max distance from sampled vertex
	  for(long j=0;j<numUnique;j++) {
	    uintE d = Dists[j][v];
	    if(d > max_from_sample) max_from_sample = d;
	  }
	  maxDists[i] = max_from_sample;
	}}}
      t4.stop();
      t5.start();
      //find furthest vertex from sample set S
      {parallel_for(long j=0;j<CCsize;j++) {
	uintE v = CCpairs[o+j].second;
	uintE m = UINT_E_MAX;
	for(long i=0;i<numUnique;i++) {
	  uintE d = Dists[i][v];
	  if(d < m) m = d;
	  if(d == 0) break;
	}
	minDists[j] = make_pair(m,v);
	}}
      
      intPair furthest = 
	sequence::reduce<intPair>(minDists,(intE)CCsize,maxFirstF());
      uintE w = furthest.second;
      t5.stop();
      t3.start();
      //reset Dist array entries
      {parallel_for(long i=0;i<numUnique;i++) {
	  parallel_for(long j=0;j<CCsize;j++) {
	    uintE v = CCpairs[o+j].second;
	    Dists[i][v] = UINT_E_MAX;
	  }
	}}
      t3.stop();
      t6.start();
      //execute BFS from w and find \sqrt{n log n} neighborhood of w
      uintE nghSize = min(CCsize,max((uintE)10,s));
      uintE* Ngh_s = starts; //reuse starts array
      bool filled_Ngh = 0;
      //stores distance from w and index of closest vertex in Ngh_s on
      //path from w to v
      wDist[w] = make_pair(0,0); //set source dist to 0
      vertexSubset Frontier(n,w);
      uintE round = 0;
      uintE numVisited = 0;
      while(!Frontier.isEmpty()){
	round++;
	if(!filled_Ngh) { 
	  Frontier.toSparse();
	  //Note: if frontier size < nghSize - visited, there is non-determinism in which vertices 
	  //get added to Ngh_s as the ordering of vertices on the frontier is non-deterministic
	  {parallel_for(long i=0;i<min(nghSize-numVisited,(uintE)Frontier.numNonzeros());i++) {
	    Ngh_s[numVisited+i] = Frontier.s[i];
	    wDist[Frontier.s[i]].second = numVisited+i;
	  }
	  numVisited += Frontier.numNonzeros();
	  if(numVisited >= nghSize) filled_Ngh = 1;
	  }}
	vertexSubset output = 
	  edgeMap(GA, Frontier, BFS_Pair_F(wDist,round),GA.m/20);
	Frontier.del();
	Frontier = output;
      }
      Frontier.del();
      ecc[w] = round-1; //set radius for w
      t6.stop();
      t7.start();
      //execute BFS from each vertex in neighborhood of w
      uintE** Dists2 = Dists; //reuse distance array
      uintE* Dist2 = Dist;
    
      {for(long i=0;i<nghSize;i++) {
	uintE v = Ngh_s[i];
	Dists2[i][v] = 0; //set source dist to 0
	vertexSubset Frontier(n,v);
	uintE round = 0;
	while(!Frontier.isEmpty()){
	  round++;
	  vertexSubset output = 
	    edgeMap(GA, Frontier, BFS_F(Dists2[i],round),GA.m/20);
	  Frontier.del();
	  Frontier = output;
	}
	Frontier.del();
	ecc[v] = round-1; //set radius of vertex in Ngh_s
	}}
      t7.stop();
      t8.start();
      //min radius of sample
      parallel_for(long i=0;i<numUnique;i++) starts2[i] = ecc[starts2[i]];
      uintE min_r_sample = 
	sequence::reduce<uintE>(starts2,numUnique,minF<uintE>());
      //compute ecc values
      {parallel_for(long i=0;i<CCsize;i++) {
	uintE v = CCpairs[o+i].second;
	//if not one of the vertices we did BFS on
	if(ecc[v] == UINT_E_MAX) {
	  uintE d_vw = wDist[v].first;
	  uintE rv = max(maxDists[i],d_vw);
	  //index in Ngh_s of closest vertex in Ngh_s on path from w to v
	  uintE index_vt = wDist[v].second;
	  uintE vt = Ngh_s[index_vt];
	  uintE d_vt_v = Dists2[index_vt][v];
	  uintE d_vt_w = Dists2[index_vt][w];
	  if(d_vt_v <= d_vt_w) ecc[v] = max(rv,ecc[vt]);
	  else ecc[v] = max(rv,min_r_sample);
	}
	}}
      t8.stop();
      t7.start();
      //reset Dist array entries
      {parallel_for(long i=0;i<nghSize;i++) {
	  parallel_for(long j=0;j<CCsize;j++) {
	    uintE v = CCpairs[o+j].second;
	    Dists2[i][v] = UINT_E_MAX;
	  }
	}}
      t7.stop();
      t6.start();
      //reset wDist array entries
      {parallel_for(long i=0;i<CCsize;i++) {
	  uintE v = CCpairs[o+i].second;
	  wDist[v] = make_pair(UINT_E_MAX,UINT_E_MAX);
	}}
      t6.stop();
    }
Example #6
0
int main()
{
    const unsigned int START_SIZE = 32768;
    unsigned int largest_size = START_SIZE * pow(2,10);
    unsigned int elems_tested = START_SIZE * 16;
    //unsigned int current_size = START_SIZE;
    //unsigned int old_size = 0;
//    int max_size = START_SIZE^(INCREMENT_FACTOR*number_of_trials);
// for our outputting of the results
    ofstream ofs("results.txt");

// this is going to hold the measurements
    vector<recorder<timer> > stats(number_of_algorithms);

// The "U" is the type for the queues x and y (poorly named, i know). Using the largest sequence multiplied by factor to allocate memory
    //EMAQueue<U> x(current_size);

    cout << "________";
    for (int i = 0; i < number_of_algorithms; ++i)
      cout << headings[i];
    cout << endl;

    cout << "  Range ";
    for (int i = 0; i < number_of_algorithms; ++i)
      cout << "|      Time      ";
    cout << endl;

    //initialize vector of ints
    vector<int> testVector;
    //initialize vector of keys
    vector<int> keyVector;
    //initialize random stuff
    std::random_device rd; // obtain a random number from hardware
    std::mt19937 eng(rd()); // seed the generator
    std::uniform_int_distribution<> distr(START_SIZE, largest_size); // define the range
    for (unsigned int i = 0; i < elems_tested; ++i) {
        testVector.push_back(distr(eng));
        keyVector.push_back(testVector[i]%101);
    }



    for (int count = 0; count < number_of_trials; count ++)
    {
        //displays the number of elements that will be added to the data structures
        cout << setw(8) << 1+ largest_size - START_SIZE << flush;
        ofs << setw(8) << 1+ largest_size - START_SIZE;
        //resets stats
        for (int i = 0; i < number_of_algorithms; ++i)
            stats[i].reset();

        //start of testing
        for (int j = 0; j < number_of_trials; ++j)
        {
            //initialize data structures each trial
            Hashtable<unsigned int> emHash;
            unordered_map<unsigned int, unsigned int> stlMap;
            HashMap sepChain;
            hashdict<unsigned int, unsigned int> bookHash(elems_tested, -1);
            //does test for each algorithm
            for (int i = 0; i < number_of_algorithms; ++i)
            {
               //resets timer
               timer1.restart();
                //completes the test "current_size" times
            for (unsigned int k = 0; k < elems_tested; ++k)
            {
                 //data type operations to be tested
                switch (i)
                {
                    //insert values to Emily's Hash
                    case 0: emHash.insert(testVector[k]);
                            //emHash.insert(k);
                        break;
                    case 1: stlMap.insert(make_pair(keyVector[k], testVector[k]));
                            //stlMap.insert(k, k);
                        break;
                    /*
                    case 2: //sepChain.insert(testVector[k]);
                            //sepChain.insert(k);
                        break;
                    case 3: //bookHash.insert(keyVector[k], testVector[k]);
                            //bookHash.insert(k, k);
                        break;
                    */
                    case 2: emHash.remove(testVector[k]);
                            //emHash.remove(k);
                       break;
                    case 3: stlMap.erase(testVector[k]);
                            //stlMap.erase(k);
                    /*    break;
                    case 6: //sepChain.remove(testVector[k]);
                            //sepChain.remove(k);
                        break;
                    case 7: bookHash.removeAny();
                    */
                }
            }
            //stops timer
            timer1.stop();
            //records stats
            stats[i].record(timer1);
            }
            //cout << "insert: " << START_SIZE << "to: " << largest_size << endl;
        } // end of trials loop

        for (int i = 0; i < number_of_algorithms; ++i)
        {
            //outputs results console
            stats[i].report(cout);
            //outputs results to file
            stats[i].report(ofs);
        }

        cout << endl;
        ofs << endl;

        //delete vector
        testVector.clear();
        keyVector.clear();
        largest_size = largest_size/2;
        //repopulate with smaller distribution
        std::uniform_int_distribution<> distr(START_SIZE, largest_size); // define the range
        for (unsigned int m = 0; m < elems_tested; ++m) {
            testVector.push_back(distr(eng));
            keyVector.push_back(testVector[m]%101);
        }
    }
    return 0;
}
Example #7
0
void Compute(graph<vertex>& GA, commandLine P) {
  t5.start();
  long length = P.getOptionLongValue("-r",0); //number of words per vertex
  char* oFile = P.getOptionValue("-out"); //file to write eccentricites
  srand (time(NULL));
  uintT seed = rand();
  cout << "seed = " << seed << endl;
  t0.start();
  long n = GA.n;
  
  uintE* ecc = newA(uintE,n);
  uintE* ecc2 = newA(uintE,n);
  {parallel_for(long i=0;i<n;i++) {
      ecc[i] = ecc2[i] = 0;
    }}

  t0.stop();

  //BEGIN COMPUTE CONNECTED COMPONENTS
  t1.start();
  intE* Labels = newA(intE,n);
  {parallel_for(long i=0;i<n;i++) {
    if(GA.V[i].getOutDegree() == 0) Labels[i] = -i-1; //singletons
    else Labels[i] = INT_E_MAX;
    }}

  //get max degree vertex
  uintE maxV = sequence::reduce<uintE>((intE)0,(intE)n,maxF<intE>(),getDegree<vertex>(GA.V));

  //visit large component with BFS
  CCBFS(maxV,GA,Labels);
  //visit small components with label propagation
  Components(GA, Labels);

  //sort by component ID
  intPair* CCpairs = newA(intPair,n);
  {parallel_for(long i=0;i<n;i++)
    if(Labels[i] < 0)
      CCpairs[i] = make_pair(-Labels[i]-1,i);
    else CCpairs[i] = make_pair(Labels[i],i);
  }
  free(Labels);

  intSort::iSort(CCpairs, n, n+1,firstF<uintE,uintE>());

  uintE* changes = newA(uintE,n);
  changes[0] = 0;
  {parallel_for(long i=1;i<n;i++) 
      changes[i] = (CCpairs[i].first != CCpairs[i-1].first) ? i : UINT_E_MAX;}

  uintE* CCoffsets = newA(uintE,n);
  uintE numCC = sequence::filter(changes, CCoffsets, n, nonMaxF());
  CCoffsets[numCC] = n;
  free(changes);
  t1.stop();
  //END COMPUTE CONNECTED COMPONENTS

  //init data structures
  t0.start();
  length = max((long)1,min((n+63)/64,(long)length));
  long* VisitedArray = newA(long,n*length);
  long* NextVisitedArray = newA(long,n*length); 
  int* flags = newA(int,n);
  {parallel_for(long i=0;i<n;i++) flags[i] = -1;}
  uintE* starts = newA(uintE,n);
  intPair* pairs = newA(intPair,n);
  t0.stop();

  //BEGIN COMPUTE ECCENTRICITES PER COMPONENT
  for(long k = 0; k < numCC; k++) {
    t2.start();
    uintE o = CCoffsets[k];
    uintE CCsize = CCoffsets[k+1] - o;
    if(CCsize == 2) { //size 2 CC's have ecc of 1
      ecc[CCpairs[o].second] = ecc[CCpairs[o+1].second] = 1;
      t2.stop();
    } else if(CCsize > 1) { //size 1 CC's already have ecc of 0
      //do main computation
      long myLength = min((long)length,((long)CCsize+63)/64);

      //initialize bit vectors for component vertices
      {parallel_for(long i=0;i<CCsize;i++) {
	uintT v = CCpairs[o+i].second;
	parallel_for(long j=0;j<myLength;j++)
	  VisitedArray[v*myLength+j] = NextVisitedArray[v*myLength+j] = 0;
	}}

      long sampleSize = min((long)CCsize,(long)64*myLength);

      uintE* starts2 = newA(uintE,sampleSize);

      //pick random vertices (could have duplicates)
      {parallel_for(ulong i=0;i<sampleSize;i++) {
	uintT index = hashInt(i+seed) % CCsize;
	if(flags[index] == -1 && CAS(&flags[index],-1,(int)i)) {
	  starts[i] = CCpairs[o+index].second;
	  NextVisitedArray[CCpairs[o+index].second*myLength + i/64] = (long) 1<<(i%64);
	} else starts[i] = UINT_E_MAX;
	}}

      //remove duplicates
      uintE numUnique = sequence::filter(starts,starts2,sampleSize,nonMaxF());

      //reset flags
      parallel_for(ulong i=0;i<sampleSize;i++) {
	uintT index = hashInt(i+seed) % CCsize;
	if(flags[index] == i) flags[index] = -1;
      }

      //first phase
      vertexSubset Frontier(n,numUnique,starts2); //initial frontier
      //note: starts2 will be freed inside the following loop
      uintE round = 0;
      while(!Frontier.isEmpty()){
	round++;
	vertexMap(Frontier, Ecc_Vertex_F(myLength,VisitedArray,NextVisitedArray));
	vertexSubset output = 
	  edgeMap(GA, Frontier, 
		  Ecc_F(myLength,VisitedArray,NextVisitedArray,ecc,round),
		  GA.m/20);
	Frontier.del();
	Frontier = output;
      }
      Frontier.del();
      t2.stop();
      //second phase if size of CC > 64
      if(CCsize > 1024) {
	//sort by ecc
	t3.start();
	{parallel_for(long i=0;i<CCsize;i++) {
	  pairs[i] = make_pair(ecc[CCpairs[o+i].second],CCpairs[o+i].second);
	  }}
	intPair maxR = sequence::reduce(pairs,CCsize,maxFirstF());
	intSort::iSort(pairs, CCsize, 1+maxR.first, firstF<uintE,uintE>());
	t3.stop();

	t4.start();

	//reset bit vectors for component vertices
	{parallel_for(long i=0;i<CCsize;i++) {
	  uintT v = CCpairs[o+i].second;
	  parallel_for(long j=0;j<myLength;j++)
	    VisitedArray[v*myLength+j] = NextVisitedArray[v*myLength+j] = 0;
	  }}

	starts2 = newA(uintE,sampleSize);
	//pick starting points with highest ecc ("fringe" vertices)
	{parallel_for(long i=0;i<sampleSize;i++) {
	  intE v = pairs[CCsize-i-1].second;
	  starts2[i] = v;
	  NextVisitedArray[v*myLength + i/64] = (long) 1<<(i%64);
	  }}

	vertexSubset Frontier2(n,sampleSize,starts2); //initial frontier
	//note: starts2 will be freed inside the following loop
	round = 0;
	while(!Frontier2.isEmpty()){
	  round++;
	  vertexMap(Frontier2, Ecc_Vertex_F(myLength,VisitedArray,NextVisitedArray));
	  vertexSubset output = 
	    edgeMap(GA, Frontier2,Ecc_F(myLength,VisitedArray,NextVisitedArray,ecc2,round), GA.m/20);
	  Frontier2.del();
	  Frontier2 = output;
	}
	Frontier2.del();
	{parallel_for(long i=0;i<n;i++) ecc[i] = max(ecc[i],ecc2[i]);}
	t4.stop();
      }
    }
Example #8
0
_seq<intT> setCover(Graph GS) {
    double epsilon = 0.01;
    intT m = maxElt(GS);
    cout << "m = " << m << endl;

    bucketTime.start();
    pair<bucket*, int> B = putInBuckets(GS, epsilon);
    bucketTime.stop();
    bucket* allBuckets = B.first;
    int numBuckets = B.second;

    set* S = newA(set, GS.n);    // holds sets for current bucket
    set* ST = newA(set, GS.n);   // temporarily S (pack is not inplace)
    int l = 0;                   // size of S
    bool* flag = newA(bool, GS.n);
    intT* inCover = newA(intT, GS.n);
    intT nInCover = 0;
    intT totalWork = 0;
    intT* elts = newA(intT,m);
    intT threshold = GS.n;
    for (int i = 0; i < m; i++) elts[i] = INT_MAX;

    // loop over all buckets, largest degree first
    for (int i = numBuckets-1; i >= 0; i--) {
        bucket currentB = allBuckets[i];

        intT degreeThreshold = ceil(pow(1.0+epsilon,i));
        if (degreeThreshold == threshold && currentB.n == 0) continue;
        else threshold = degreeThreshold;
        packTime.start();

        // pack leftover sets that are below threshold down for the next round
        for (int j = 0; j < l; j++)
            flag[j] = (S[j].degree > 0 && S[j].degree < threshold);
        intT ln = sequence::pack(S, ST, flag, l);

        // pack leftover sets greater than threshold above for this round
        for (int j = 0; j < l; j++)
            flag[j] = (S[j].degree >= threshold);
        intT lb = sequence::pack(S, ST+ln, flag, l);

        // copy prebucketed bucket i to end, also for this round
        for (int j = 0; j < currentB.n; j++)
            ST[j+ln+lb] = currentB.S[j];

        lb = lb + currentB.n;   // total number in this round
        l = ln + lb;            // total number including those for next round
        swap(ST,S);             // since pack is not in place
        set* SB = S + ln;       // pointer to bottom of sets for this round
        packTime.stop();

        if (lb > 0) { // is there anything to do in this round?

            manisTime.start();
            intT work = processBucket(SB, elts, lb, threshold);
            totalWork += work;
            manisTime.stop();
            packTime.start();

            // check which sets were selected by manis to be in the set cover
            for (int j = 0; j < lb; j++)
                flag[j] = SB[j].degree < 0;

            // add these to inCover and label by their original ID
            int nNew = sequence::packIndex(inCover+nInCover, flag, lb);
            for (int j = nInCover; j < nInCover + nNew; j++)
                inCover[j] = SB[inCover[j]].id;
            nInCover = nInCover + nNew;
            packTime.stop();
            cout << "i = " << i << " bc = " << currentB.n << " l = " << l << " lb = " << lb
                 << " work = " << work << " new = " << nNew << " threshold = " << threshold << endl;
        }
    }
    cout << "Set cover size = " << nInCover << endl;
    cout << "Total work = " << totalWork << endl;
    cout << "Bucket Time = " << bucketTime.total() << endl;
    cout << "Manis Time = " << manisTime.total() << endl;
    cout << "Pack Time = " << packTime.total() << endl;

    free(elts);
    free(S);
    free(ST);
    free(flag);
    freeBuckets(allBuckets);
    return _seq<intT>(inCover, nInCover);
}