void parallel_dc(graph_t& g, unsigned threadnum, gBenchPerf_multi & perf, int perf_group) { uint64_t chunk = (unsigned)ceil(g.num_vertices()/(double)threadnum); #pragma omp parallel num_threads(threadnum) { unsigned tid = omp_get_thread_num(); perf.open(tid, perf_group); perf.start(tid, perf_group); unsigned start = tid*chunk; unsigned end = start + chunk; if (end > g.num_vertices()) end = g.num_vertices(); for (unsigned vid=start;vid<end;vid++) { vertex_iterator vit = g.find_vertex(vid); // out degree vit->property().outdegree = vit->edges_size(); // in degree edge_iterator eit; for (eit=vit->edges_begin(); eit!=vit->edges_end(); eit++) { vertex_iterator targ = g.find_vertex(eit->target()); __sync_fetch_and_add(&(targ->property().indegree), 1); } } perf.stop(tid, perf_group); } }
void graph_update(graph_t &g, vector<uint64_t> IDs) { for (size_t i=0;i<IDs.size();i++) { if (g.num_vertices()==0) break; g.delete_vertex(IDs[i]); } }
void parallel_bc(graph_t& g, unsigned threadnum, bool undirected, gBenchPerf_multi & perf, int perf_group) { typedef list<size_t> vertex_list_t; size_t vnum = g.num_vertices(); uint64_t chunk = (unsigned)ceil(vnum/(double)threadnum); double normalizer; normalizer = (undirected)? 2.0 : 1.0; #pragma omp parallel num_threads(threadnum) { unsigned tid = omp_get_thread_num(); perf.open(tid, perf_group); perf.start(tid, perf_group); unsigned start = tid*chunk; unsigned end = start + chunk; if (maxiter != 0 && chunk > maxiter) end = start + maxiter; if (end > vnum) end = vnum; // initialization vector<vertex_list_t> shortest_path_parents(vnum); vector<int16_t> num_of_paths(vnum); vector<uint16_t> depth_of_vertices(vnum); // 16 bits signed vector<float> centrality_update(vnum); #ifdef SIM unsigned iter = 0; #endif for (uint64_t vid=start;vid<end;vid++) { #ifdef SIM SIM_BEGIN(iter==beginiter); iter++; #endif size_t vertex_s = vid; stack<size_t> order_seen_stack; queue<size_t> BFS_queue; BFS_queue.push(vertex_s); for (size_t i=0;i<vnum;i++) { shortest_path_parents[i].clear(); num_of_paths[i] = (i==vertex_s) ? 1 : 0; depth_of_vertices[i] = (i==vertex_s) ? 0: MY_INFINITY; centrality_update[i] = 0; } // BFS traversal while (!BFS_queue.empty()) { size_t v = BFS_queue.front(); BFS_queue.pop(); order_seen_stack.push(v); vertex_iterator vit = g.find_vertex(v); uint16_t newdepth = depth_of_vertices[v]+1; for (edge_iterator eit=vit->edges_begin(); eit!= vit->edges_end(); eit++) { size_t w = eit->target(); #ifdef HMC if (HMC_CAS_equal_16B(&(depth_of_vertices[w]),MY_INFINITY,newdepth) == MY_INFINITY) { BFS_queue.push(w); } if (depth_of_vertices[w] == newdepth) { HMC_ADD_16B(&(num_of_paths[w]), num_of_paths[v]); shortest_path_parents[w].push_back(v); } #else if (depth_of_vertices[w] == MY_INFINITY) { BFS_queue.push(w); depth_of_vertices[w] = newdepth; } if (depth_of_vertices[w] == newdepth) { num_of_paths[w] += num_of_paths[v]; shortest_path_parents[w].push_back(v); } #endif } } // dependency accumulation while (!order_seen_stack.empty()) { size_t w = order_seen_stack.top(); order_seen_stack.pop(); float coeff = (1+centrality_update[w])/(double)num_of_paths[w]; vertex_list_t::iterator iter; for (iter=shortest_path_parents[w].begin(); iter!=shortest_path_parents[w].end(); iter++) { size_t v=*iter; #ifdef HMC HMC_FP_ADD(&(centrality_update[v]), (num_of_paths[v]*coeff)); #else centrality_update[v] += (num_of_paths[v]*coeff); #endif } if (w!=vertex_s) { vertex_iterator vit = g.find_vertex(w); #pragma omp atomic vit->property().BC += centrality_update[w]/normalizer; } } #ifdef SIM SIM_END(iter==enditer); #endif } #ifdef SIM SIM_END(enditer==0); #endif perf.stop(tid, perf_group); } return; }
void bc(graph_t& g, bool undirected, gBenchPerf_event & perf, int perf_group) { typedef list<size_t> vertex_list_t; // initialization size_t vnum = g.num_vertices(); vector<vertex_list_t> shortest_path_parents(vnum); vector<size_t> num_of_paths(vnum); vector<int8_t> depth_of_vertices(vnum); // 8 bits signed vector<double> centrality_update(vnum); double normalizer; normalizer = (undirected)? 2.0 : 1.0; perf.open(perf_group); perf.start(perf_group); vertex_iterator vit; for (vit=g.vertices_begin(); vit!=g.vertices_end(); vit++) { size_t vertex_s = vit->id(); stack<size_t> order_seen_stack; queue<size_t> BFS_queue; BFS_queue.push(vertex_s); for (size_t i=0;i<vnum;i++) { shortest_path_parents[i].clear(); num_of_paths[i] = (i==vertex_s) ? 1 : 0; depth_of_vertices[i] = (i==vertex_s) ? 0: -1; centrality_update[i] = 0; } // BFS traversal while (!BFS_queue.empty()) { size_t v = BFS_queue.front(); BFS_queue.pop(); order_seen_stack.push(v); vertex_iterator vit = g.find_vertex(v); for (edge_iterator eit=vit->edges_begin(); eit!= vit->edges_end(); eit++) { size_t w = eit->target(); if (depth_of_vertices[w]<0) { BFS_queue.push(w); depth_of_vertices[w] = depth_of_vertices[v] + 1; } if (depth_of_vertices[w] == (depth_of_vertices[v] + 1)) { num_of_paths[w] += num_of_paths[v]; shortest_path_parents[w].push_back(v); } } } // dependency accumulation while (!order_seen_stack.empty()) { size_t w = order_seen_stack.top(); order_seen_stack.pop(); double coeff = (1+centrality_update[w])/(double)num_of_paths[w]; vertex_list_t::iterator iter; for (iter=shortest_path_parents[w].begin(); iter!=shortest_path_parents[w].end(); iter++) { size_t v=*iter; centrality_update[v] += (num_of_paths[v]*coeff); } if (w!=vertex_s) { vertex_iterator vit = g.find_vertex(w); vit->property().BC += centrality_update[w]/normalizer; } } } perf.stop(perf_group); return; }
void parallel_bc(graph_t& g, unsigned threadnum, bool undirected, gBenchPerf_multi & perf, int perf_group) { typedef list<size_t> vertex_list_t; size_t vnum = g.num_vertices(); uint64_t chunk = (unsigned)ceil(vnum/(double)threadnum); double normalizer; normalizer = (undirected)? 2.0 : 1.0; #pragma omp parallel num_threads(threadnum) { unsigned tid = omp_get_thread_num(); perf.open(tid, perf_group); perf.start(tid, perf_group); unsigned start = tid*chunk; unsigned end = start + chunk; if (end > vnum) end = vnum; // initialization vector<vertex_list_t> shortest_path_parents(vnum); vector<size_t> num_of_paths(vnum); vector<int8_t> depth_of_vertices(vnum); // 8 bits signed vector<double> centrality_update(vnum); for (uint64_t vid=start;vid<end;vid++) { size_t vertex_s = vid; stack<size_t> order_seen_stack; queue<size_t> BFS_queue; BFS_queue.push(vertex_s); for (size_t i=0;i<vnum;i++) { shortest_path_parents[i].clear(); num_of_paths[i] = (i==vertex_s) ? 1 : 0; depth_of_vertices[i] = (i==vertex_s) ? 0: -1; centrality_update[i] = 0; } // BFS traversal while (!BFS_queue.empty()) { size_t v = BFS_queue.front(); BFS_queue.pop(); order_seen_stack.push(v); vertex_iterator vit = g.find_vertex(v); for (edge_iterator eit=vit->edges_begin(); eit!= vit->edges_end(); eit++) { size_t w = eit->target(); if (depth_of_vertices[w]<0) { BFS_queue.push(w); depth_of_vertices[w] = depth_of_vertices[v] + 1; } if (depth_of_vertices[w] == (depth_of_vertices[v] + 1)) { num_of_paths[w] += num_of_paths[v]; shortest_path_parents[w].push_back(v); } } } // dependency accumulation while (!order_seen_stack.empty()) { size_t w = order_seen_stack.top(); order_seen_stack.pop(); double coeff = (1+centrality_update[w])/(double)num_of_paths[w]; vertex_list_t::iterator iter; for (iter=shortest_path_parents[w].begin(); iter!=shortest_path_parents[w].end(); iter++) { size_t v=*iter; centrality_update[v] += (num_of_paths[v]*coeff); } if (w!=vertex_s) { vertex_iterator vit = g.find_vertex(w); #pragma omp atomic vit->property().BC += centrality_update[w]/normalizer; } } } perf.stop(tid, perf_group); } return; }