Example #1
0
void pageRank(Graph g, float* solution, float damping, float convergence)
{
  int numNodes = num_nodes(g);
  State<float> s(g, damping, convergence);

  VertexSet* frontier = newVertexSet(SPARSE, numNodes, numNodes);
  for (int i = 0; i < numNodes; i++) {
    addVertex(frontier, i);
  }

  float error = INFINITY;
  while (error > convergence) {
    Local<float> local(g, s.pcurr, s.pnext, s.diff, damping);

    VertexSet* frontier2 = edgeMap<State<float> >(g, frontier, s);
    VertexSet* frontier3 = vertexMap<Local<float> >(frontier2, local);

    freeVertexSet(frontier);
    freeVertexSet(frontier2);
    frontier = frontier3;

    error = s.getError();
    std::swap(s.pcurr, s.pnext);
  }

  freeVertexSet(frontier);

  #pragma omp parallel for schedule(static)
  for (int i = 0; i < numNodes; i++) {
    solution[i] = s.pcurr[i];
  }
}
void decompose(graph *g, int *decomp, int* dus, int maxVal, int maxId) {
  int* cluster;
  int* nextcluster;
  int iter = 0;
  int total_num = g -> num_nodes;

  cluster = (int*) malloc(sizeof(int*) * total_num);
  nextcluster = (int*) malloc(sizeof(int*) * total_num);
  memset(cluster, NA, sizeof(int) * total_num);
  memset(nextcluster, NA, sizeof(int) * total_num);
  
  // int* dus = getDus(total_num, decomp, &maxVal, &maxId); // rate = 1/beta beta哪里来

  VertexSet* frontier = newVertexSet(SPARSE, 1, total_num);

  addVertex(frontier, maxId);

  VertexSet *newFrontier;

  int max_dus = 0;
  for (int i = 0; i < total_num; i++) {
  	max_dus = max_dus >= dus[i] ? max_dus : dus[i];
  }

  while (frontier->size > 0) {
    iter += 1;
    Decomosition dec(cluster, nextcluster);
    newFrontier = edgeMap(g, frontier, dec, NORETURN);

    freeVertexSet(frontier);
    frontier = newFrontier;
    // start growing all balls i at the next iter with 
//     // uncluster center i and with maxDu - dus[i] < iter 
//     foreach vertex i in not_cluster {
//       if (iter > du_max - dus[i]) {
//         frontier->add(i);
//       }
//     }
    UpdateFrontier uf(max_dus, iter, dus);
    vertexMap(frontier, uf, YESRETURN);

    ClusterCopy vc(cluster, nextcluster);
    vertexMap(frontier, vc, NORETURN);
  }

  freeVertexSet(frontier);
  free(cluster);
  free(nextcluster);
}
Example #3
0
// Finds the BFS distance to each node starting from node 0.
void bfs(graph *g, int *solution) {
  Bfs f(g, solution);

  // Initialize frontier.
  VertexSet* frontier = newVertexSet(SPARSE, 1, num_nodes(g));
  addVertex(frontier, 0);

  VertexSet *newFrontier;

  while (frontier->size != 0) {
    newFrontier = edgeMap<Bfs>(g, frontier, f);
    freeVertexSet(frontier);
    frontier = newFrontier;
    f.currentDistance++;
  }

  freeVertexSet(frontier);
}
/**
  Given a graph, select k random start vertices. Call these 
  k vertices set S. For all vertices v in the graph, find the farthest
  distance between v and any of the vertices in S. Store this is distField.
  
  Note that this implementation is faster than running K separate BFSs in 
  parallel because all k BFSs share a frontier. This means that for each 
  node we must store a bit vector of size K bits, indicating whether or not
  a node has been visited yet by the kth BFS. 
  
  Note that a node v will continue to be added to the frontier as long as its
  source vertex u has a different bit vector than it. This means that v is 
  being visited for the first time by at least one of the K BFSs. Thus we must
  also increment our estimate of its radius since the radius is the FARTHEST
  distance of node v to any of the u source vertices.   

  At the end of the algorithm distField will contain the maximum distance from 
  each node v in the graph to any of the K source nodes. The final radius 
  estimate of the graph is obtained by taking the max radius obtained from all 
  nodes in the graph. Note that this is an estimate because we only ran a BFS 
  from K nodes. If we wanted an exact radius we would have had to run a BFS from
  every single node in the graph.
 **/
void kBFS(graph *g, int *distField) {

  int** visited;
  int** nextVisited;
  int* radii;
  int iter = 0;

  // set up globals
  #pragma omp parallel for schedule(static)
  for (int i = 0; i < g->num_nodes; i++)
    distField[i] = NA;
  radii = distField;

  visited = (int**) malloc(sizeof(int*) * g->num_nodes);
  nextVisited = (int**) malloc(sizeof(int*) * g->num_nodes);

  for (int i = 0; i < g->num_nodes; i++) {
    visited[i] = (int*) malloc(sizeof(int) * NUMWORDS);
    nextVisited[i] = (int*) malloc(sizeof(int) * NUMWORDS);
    memset(visited[i], 0, sizeof(int) * NUMWORDS);
    memset(nextVisited[i], 0, sizeof(int) * NUMWORDS);
  }


  // initialize the frontier with K random nodes
  srand(0);
  int numSources = std::min(K, g->num_nodes);
  int S[numSources]; // the set of source nodes
  for (int i = 0; i < numSources; i++) 
    S[i] = (std::rand()/(float)RAND_MAX) * g->num_nodes;

  VertexSet* frontier = newVertexSet(SPARSE, numSources, g->num_nodes);
  for (int i = 0; i < numSources; i++) {
    addVertex(frontier, S[i]);
  }

  // iterate over values 1 thru k to do initialization
  VertexSet* ks = newVertexSet(SPARSE, numSources, g->num_nodes);
  for (int i = 0; i < numSources; i++) 
    addVertex(ks, i);

  Init i(S, visited, nextVisited, radii);
  vertexMap(ks, i, NORETURN);

  freeVertexSet(ks);

  VertexSet *newFrontier;

  while (frontier->size > 0) {
    iter = iter + 1;
    RadiiUpdate ru(visited, nextVisited, radii, iter);
    newFrontier = edgeMap(g, frontier, ru);

    freeVertexSet(frontier);
    frontier = newFrontier;

    VisitedCopy vc(visited, nextVisited);
    vertexMap(frontier, vc, NORETURN);
  }

  for (int i = 0; i < g->num_nodes; i++) {
    free(visited[i]);
    free(nextVisited[i]);
  }

  freeVertexSet(frontier);
  free(visited);
  free(nextVisited);
}