main(int argc, char *argv[]) { int streamnum, commNum, nstreams, *stream, *commonStream; double rn; int i, myid, nprocs; /************************** MPI calls ***********************************/ MPI_Init(&argc, &argv); /* Initialize MPI */ MPI_Comm_rank(MPI_COMM_WORLD, &myid); /* find process id */ MPI_Comm_size(MPI_COMM_WORLD, &nprocs); /* find number of processes */ /****************** Initialization values *******************************/ streamnum = myid; /*This stream is different on each process*/ commNum = nprocs; /* This stream is common to all processes */ nstreams = nprocs + 1; /* extra stream is common to all processes*/ /*********************** Initialize streams *****************************/ /* This stream is different on each process */ stream = init_sprng(streamnum,nstreams,SEED,SPRNG_DEFAULT); printf("Process %d: Print information about new stream\n", myid); print_sprng(stream); /* This stream is identical on each process */ commonStream = init_sprng(commNum,nstreams,SEED,SPRNG_DEFAULT); printf("Process %d: This stream is identical on all processes\n", myid); print_sprng(commonStream); /*********************** print random numbers ***************************/ for (i=0;i<2;i++) /* random numbers from distinct stream */ { rn = sprng(stream); /* generate double precision random number*/ printf("Process %d, random number (distinct stream) %d: %f\n", myid, i+1, rn); } for (i=0;i<2;i++) /* random number from common stream */ { rn = sprng(commonStream); /*generate double precision random number */ printf("Process %d, random number (shared stream) %d: %f\n", myid, i+1, rn); } /*************************** free memory ********************************/ free_sprng(stream); /* free memory used to store stream state */ free_sprng(commonStream); MPI_Finalize(); /* terminate MPI */ }
main() { int streamnum, nstreams, seed, *stream, i; double rn; /************************** Initialization *******************************/ streamnum = 0; nstreams = 1; seed = make_sprng_seed(); /* make new seed each time program is run */ stream = init_sprng(streamnum,nstreams,seed,SPRNG_DEFAULT); /*initialize stream*/ printf(" Printing information about new stream\n"); print_sprng(stream); /************************ print random numbers ***************************/ printf(" Printing 3 random numbers in [0,1):\n"); for (i=0;i<3;i++) { rn = sprng(stream); /* generate double precision random number */ printf("%f\n", rn); } free_sprng(stream); /* free memory used to store stream state */ }
main() { int streamnum, nstreams, *stream, **new; double rn; int i, irn, nspawned; int gtype; /*--- */ /*--- reading in a generator type */ #include "gen_types_menu.h" printf("Type in a generator type (integers: 0,1,2,3,4,5): "); scanf("%d", >ype); /****************** Initialization values *******************************/ streamnum = 0; nstreams = 1; stream = init_sprng(gtype,streamnum,nstreams,SEED,SPRNG_DEFAULT); /* initialize stream */ printf(" Print information about stream:\n"); print_sprng(stream); /*********************** print random numbers ***************************/ printf(" Printing 2 random numbers in [0,1):\n"); for (i=0;i<2;i++) { rn = sprng(stream); /* generate double precision random number*/ printf("%f\n", rn); } /**************************** spawn streams *****************************/ printf(" Spawned two streams\n"); nspawned = 2; nspawned = spawn_sprng(stream,2,&new); /* spawn 2 streams */ if(nspawned != 2) { fprintf(stderr,"Error: only %d streams spawned\n", nspawned); exit(1); } printf(" Information on first spawned stream:\n"); print_sprng(new[0]); printf(" Information on second spawned stream:\n"); print_sprng(new[1]); printf(" Printing 2 random numbers from second spawned stream:\n"); for (i=0;i<2;i++) { rn = sprng(new[1]); /* generate a random number */ printf("%f\n", rn); } /*************************** free memory ********************************/ free_sprng(stream); /* free memory used to store stream state */ free_sprng(new[0]); /* free memory used to store stream state */ free_sprng(new[1]); /* free memory used to store stream state */ free(new); }
main(int argc, char *argv[]) { int streamnum, nstreams, seed, *stream, i, myid, nprocs; double rn; int gtype; /*--- */ /*************************** MPI calls ***********************************/ MPI_Init(&argc, &argv); /* Initialize MPI */ MPI_Comm_rank(MPI_COMM_WORLD, &myid); /* find process id */ MPI_Comm_size(MPI_COMM_WORLD, &nprocs); /* find number of processes */ /************************** Initialization *******************************/ streamnum = myid; nstreams = nprocs; /* one stream per processor */ seed = make_sprng_seed(); /* make new seed each time program is run */ /*--- node 0 is reading in a generator type */ if(myid == 0) { #include "gen_types_menu.h" printf("Type in a generator type (integers: 0,1,2,3,4,5): "); scanf("%d", >ype); } MPI_Bcast(>ype,1,MPI_INT,0,MPI_COMM_WORLD ); /* Seed should be the same on all processes */ printf("Process %d: seed = %16d\n", myid, seed); stream = init_sprng(gtype,streamnum,nstreams,seed,SPRNG_DEFAULT); /*initialize stream*/ printf("\n\nProcess %d: Print information about stream:\n",myid); print_sprng(stream); /************************ print random numbers ***************************/ for (i=0;i<3;i++) { rn = sprng(stream); /* generate double precision random number */ printf("process %d, random number %d: %f\n", myid, i+1, rn); } free_sprng(stream); /* free memory used to store stream state */ MPI_Finalize(); /* Terminate MPI */ }
main(int argc, char *argv[]) { int streamnum, nstreams, *stream; double rn; int i, myid, nprocs; /*************************** MPI calls ***********************************/ MPI_Init(&argc, &argv); /* Initialize MPI */ MPI_Comm_rank(MPI_COMM_WORLD, &myid); /* find process id */ MPI_Comm_size(MPI_COMM_WORLD, &nprocs); /* find number of processes */ /************************** Initialization *******************************/ streamnum = myid; nstreams = nprocs; /* one stream per processor */ stream = init_sprng(streamnum,nstreams,SEED,SPRNG_DEFAULT); /* initialize stream */ printf("Process %d, print information about stream:\n", myid); print_sprng(stream); /*********************** print random numbers ****************************/ for (i=0;i<3;i++) { rn = sprng(stream); /* generate double precision random number */ printf("Process %d, random number %d: %.14f\n", myid, i+1, rn); } /*************************** free memory *********************************/ free_sprng(stream); /* free memory used to store stream state */ MPI_Finalize(); /* Terminate MPI */ }
void vertex_betweenness_centrality_parBFS(graph_t* G, double* BC, long numSrcs) { attr_id_t *S; /* stack of vertices in the order of non-decreasing distance from s. Also used to implicitly represent the BFS queue */ plist_t* P; /* predecessors of a vertex v on shortest paths from s */ double* sig; /* No. of shortest paths */ attr_id_t* d; /* Length of the shortest path between every pair */ double* del; /* dependency of vertices */ attr_id_t *in_degree, *numEdges, *pSums; attr_id_t* pListMem; #if RANDSRCS attr_id_t* Srcs; #endif attr_id_t *start, *end; long MAX_NUM_PHASES; attr_id_t *psCount; #ifdef _OPENMP omp_lock_t* vLock; long chunkSize; #endif #ifdef DIAGNOSTIC double elapsed_time; #endif int seed = 2387; #ifdef _OPENMP #pragma omp parallel firstprivate(G) { #endif attr_id_t *myS, *myS_t; attr_id_t myS_size; long i, j, k, p, count, myCount; long v, w, vert; long k0, k1; long numV, num_traversals, n, m, phase_num; long start_iter, end_iter; long tid, nthreads; int* stream; #ifdef DIAGNOSTIC double elapsed_time_part; #endif #ifdef _OPENMP int myLock; tid = omp_get_thread_num(); nthreads = omp_get_num_threads(); #else tid = 0; nthreads = 1; #endif #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time = get_seconds(); elapsed_time_part = get_seconds(); } #endif /* numV: no. of vertices to run BFS from = numSrcs */ numV = numSrcs; n = G->n; m = G->m; /* Permute vertices */ if (tid == 0) { #if RANDSRCS Srcs = (attr_id_t *) malloc(n*sizeof(attr_id_t)); #endif #ifdef _OPENMP vLock = (omp_lock_t *) malloc(n*sizeof(omp_lock_t)); #endif } #ifdef _OPENMP #pragma omp barrier #pragma omp for for (i=0; i<n; i++) { omp_init_lock(&vLock[i]); } #endif /* Initialize RNG stream */ stream = init_sprng(0, tid, nthreads, seed, SPRNG_DEFAULT); #if RANDSRCS #ifdef _OPENMP #pragma omp for #endif for (i=0; i<n; i++) { Srcs[i] = i; } #ifdef _OPENMP #pragma omp for #endif for (i=0; i<n; i++) { j = n * sprng(stream); if (i != j) { #ifdef _OPENMP int l1 = omp_test_lock(&vLock[i]); if (l1) { int l2 = omp_test_lock(&vLock[j]); if (l2) { #endif k = Srcs[i]; Srcs[i] = Srcs[j]; Srcs[j] = k; #ifdef _OPENMP omp_unset_lock(&vLock[j]); } omp_unset_lock(&vLock[i]); } #endif } } #endif #ifdef _OPENMP #pragma omp barrier #endif if (tid == 0) { MAX_NUM_PHASES = 500; } #ifdef _OPENMP #pragma omp barrier #endif /* Initialize predecessor lists */ /* The size of the predecessor list of each vertex is bounded by its in-degree. So we first compute the in-degree of every vertex */ if (tid == 0) { P = (plist_t *) calloc(n, sizeof(plist_t)); in_degree = (attr_id_t *) calloc(n+1, sizeof(attr_id_t)); numEdges = (attr_id_t *) malloc((n+1)*sizeof(attr_id_t)); pSums = (attr_id_t *) malloc(nthreads*sizeof(attr_id_t)); } #ifdef _OPENMP #pragma omp barrier #pragma omp for #endif for (i=0; i<m; i++) { v = G->endV[i]; #ifdef _OPENMP omp_set_lock(&vLock[v]); #endif in_degree[v]++; #ifdef _OPENMP omp_unset_lock(&vLock[v]); #endif } prefix_sums(in_degree, numEdges, pSums, n); if (tid == 0) { pListMem = (attr_id_t *) malloc(m*sizeof(attr_id_t)); } #ifdef _OPENMP #pragma omp barrier #pragma omp for #endif for (i=0; i<n; i++) { P[i].list = pListMem + numEdges[i]; P[i].degree = in_degree[i]; P[i].count = 0; } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() -elapsed_time_part; fprintf(stderr, "In-degree computation time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif /* Allocate shared memory */ if (tid == 0) { free(in_degree); free(numEdges); free(pSums); S = (attr_id_t *) malloc(n*sizeof(attr_id_t)); sig = (double *) malloc(n*sizeof(double)); d = (attr_id_t *) malloc(n*sizeof(attr_id_t)); del = (double *) calloc(n, sizeof(double)); start = (attr_id_t *) malloc(MAX_NUM_PHASES*sizeof(attr_id_t)); end = (attr_id_t *) malloc(MAX_NUM_PHASES*sizeof(attr_id_t)); psCount = (attr_id_t *) malloc((nthreads+1)*sizeof(attr_id_t)); } /* local memory for each thread */ myS_size = (2*n)/nthreads; myS = (attr_id_t *) malloc(myS_size*sizeof(attr_id_t)); num_traversals = 0; myCount = 0; #ifdef _OPENMP #pragma omp barrier #endif #ifdef _OPENMP #pragma omp for #endif for (i=0; i<n; i++) { d[i] = -1; } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() - elapsed_time_part; fprintf(stderr, "BC initialization time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif for (p=0; p<n; p++) { #if RANDSRCS i = Srcs[p]; #else i = p; #endif if (G->numEdges[i+1] - G->numEdges[i] == 0) { continue; } else { num_traversals++; } if (num_traversals == numV + 1) { break; } if (tid == 0) { sig[i] = 1; d[i] = 0; S[0] = i; start[0] = 0; end[0] = 1; } count = 1; phase_num = 0; #ifdef _OPENMP #pragma omp barrier #endif while (end[phase_num] - start[phase_num] > 0) { myCount = 0; start_iter = start[phase_num]; end_iter = end[phase_num]; #ifdef _OPENMP #pragma omp barrier #pragma omp for schedule(dynamic) nowait #endif for (vert = start_iter; vert < end_iter; vert++) { v = S[vert]; for (j=G->numEdges[v]; j<G->numEdges[v+1]; j++) { w = G->endV[j]; if (v != w) { #ifdef _OPENMP myLock = omp_test_lock(&vLock[w]); if (myLock) { #endif /* w found for the first time? */ if (d[w] == -1) { if (myS_size == myCount) { /* Resize myS */ myS_t = (attr_id_t *) malloc(2*myS_size*sizeof(attr_id_t)); memcpy(myS_t, myS, myS_size*sizeof(attr_id_t)); free(myS); myS = myS_t; myS_size = 2*myS_size; } myS[myCount++] = w; d[w] = d[v] + 1; sig[w] = sig[v]; P[w].list[P[w].count++] = v; } else if (d[w] == d[v] + 1) { sig[w] += sig[v]; P[w].list[P[w].count++] = v; } #ifdef _OPENMP omp_unset_lock(&vLock[w]); } else { if ((d[w] == -1) || (d[w] == d[v]+ 1)) { omp_set_lock(&vLock[w]); sig[w] += sig[v]; P[w].list[P[w].count++] = v; omp_unset_lock(&vLock[w]); } } #endif } } } /* Merge all local stacks for next iteration */ phase_num++; if (tid == 0) { if (phase_num >= MAX_NUM_PHASES) { fprintf(stderr, "Error: Max num phases set to %ld\n", MAX_NUM_PHASES); fprintf(stderr, "Diameter of input network greater than" " this value. Increase MAX_NUM_PHASES" " in vertex_betweenness_centrality_parBFS()\n"); exit(-1); } } psCount[tid+1] = myCount; #ifdef _OPENMP #pragma omp barrier #endif if (tid == 0) { start[phase_num] = end[phase_num-1]; psCount[0] = start[phase_num]; for(k=1; k<=nthreads; k++) { psCount[k] = psCount[k-1] + psCount[k]; } end[phase_num] = psCount[nthreads]; } #ifdef _OPENMP #pragma omp barrier #endif k0 = psCount[tid]; k1 = psCount[tid+1]; for (k = k0; k < k1; k++) { S[k] = myS[k-k0]; } count = end[phase_num]; } phase_num--; while (phase_num > 0) { start_iter = start[phase_num]; end_iter = end[phase_num]; #ifdef _OPENMP #pragma omp for schedule(static) nowait #endif for (j=start_iter; j<end_iter; j++) { w = S[j]; for (k = 0; k<P[w].count; k++) { v = P[w].list[k]; #ifdef _OPENMP omp_set_lock(&vLock[v]); #endif del[v] = del[v] + sig[v]*(1+del[w])/sig[w]; #ifdef _OPENMP omp_unset_lock(&vLock[v]); #endif } BC[w] += del[w]; } phase_num--; #ifdef _OPENMP #pragma omp barrier #endif } #ifdef _OPENMP chunkSize = n/nthreads; #pragma omp for schedule(static, chunkSize) nowait #endif for (j=0; j<count; j++) { w = S[j]; d[w] = -1; del[w] = 0; P[w].count = 0; } #ifdef _OPENMP #pragma omp barrier #endif } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() - elapsed_time_part; fprintf(stderr, "BC computation time: %lf seconds\n", elapsed_time_part); } #endif #ifdef _OPENMP #pragma omp barrier #endif #ifdef _OPENMP #pragma omp for for (i=0; i<n; i++) { omp_destroy_lock(&vLock[i]); } #endif free(myS); if (tid == 0) { free(S); free(pListMem); free(P); free(sig); free(d); free(del); #ifdef _OPENMP free(vLock); #endif free(start); free(end); free(psCount); #ifdef DIAGNOSTIC elapsed_time = get_seconds() - elapsed_time; fprintf(stderr, "Time taken: %lf\n seconds", elapsed_time); #endif #if RANDSRCS free(Srcs); #endif } free_sprng(stream); #ifdef _OPENMP } #endif }
void vertex_betweenness_centrality_simple(graph_t* G, double* BC, long numSrcs) { attr_id_t *in_degree, *numEdges, *pSums; #if RANDSRCS attr_id_t* Srcs; #endif long num_traversals = 0; #ifdef _OPENMP omp_lock_t* vLock; long chunkSize; #endif #ifdef DIAGNOSTIC double elapsed_time; #endif int seed = 2387; /* The outer loop is parallelized in this case. Each thread does a BFS and the vertex BC values are incremented atomically */ #ifdef _OPENMP #pragma omp parallel firstprivate(G) { #endif attr_id_t *S; /* stack of vertices in the order of non-decreasing distance from s. Also used to implicitly represent the BFS queue */ plist_t* P; /* predecessors of a vertex v on shortest paths from s */ attr_id_t* pListMem; double* sig; /* No. of shortest paths */ attr_id_t* d; /* Length of the shortest path between every pair */ double* del; /* dependency of vertices */ attr_id_t *start, *end; long MAX_NUM_PHASES; long i, j, k, p, count; long v, w, vert; long numV, n, m, phase_num; long tid, nthreads; int* stream; #ifdef DIAGNOSTIC double elapsed_time_part; #endif #ifdef _OPENMP int myLock; tid = omp_get_thread_num(); nthreads = omp_get_num_threads(); #else tid = 0; nthreads = 1; #endif #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time = get_seconds(); elapsed_time_part = get_seconds(); } #endif /* numV: no. of vertices to run BFS from = numSrcs */ numV = numSrcs; n = G->n; m = G->m; /* Permute vertices */ if (tid == 0) { #if RANDSRCS Srcs = (attr_id_t *) malloc(n*sizeof(attr_id_t)); #endif #ifdef _OPENMP vLock = (omp_lock_t *) malloc(n*sizeof(omp_lock_t)); #endif } #ifdef _OPENMP #pragma omp barrier #pragma omp for for (i=0; i<n; i++) { omp_init_lock(&vLock[i]); } #endif /* Initialize RNG stream */ stream = init_sprng(0, tid, nthreads, seed, SPRNG_DEFAULT); #if RANDSRCS #ifdef _OPENMP #pragma omp for #endif for (i=0; i<n; i++) { Srcs[i] = i; } #ifdef _OPENMP #pragma omp for #endif for (i=0; i<n; i++) { j = n * sprng(stream); if (i != j) { #ifdef _OPENMP int l1 = omp_test_lock(&vLock[i]); if (l1) { int l2 = omp_test_lock(&vLock[j]); if (l2) { #endif k = Srcs[i]; Srcs[i] = Srcs[j]; Srcs[j] = k; #ifdef _OPENMP omp_unset_lock(&vLock[j]); } omp_unset_lock(&vLock[i]); } #endif } } #endif #ifdef _OPENMP #pragma omp barrier #endif MAX_NUM_PHASES = 50; /* Initialize predecessor lists */ /* The size of the predecessor list of each vertex is bounded by its in-degree. So we first compute the in-degree of every vertex */ if (tid == 0) { in_degree = (attr_id_t *) calloc(n+1, sizeof(attr_id_t)); numEdges = (attr_id_t *) malloc((n+1)*sizeof(attr_id_t)); pSums = (attr_id_t *) malloc(nthreads*sizeof(attr_id_t)); } #ifdef _OPENMP #pragma omp barrier #pragma omp for #endif for (i=0; i<m; i++) { v = G->endV[i]; #ifdef _OPENMP omp_set_lock(&vLock[v]); #endif in_degree[v]++; #ifdef _OPENMP omp_unset_lock(&vLock[v]); #endif } prefix_sums(in_degree, numEdges, pSums, n); P = (plist_t *) calloc(n, sizeof(plist_t)); pListMem = (attr_id_t *) malloc(m*sizeof(attr_id_t)); for (i=0; i<n; i++) { P[i].list = pListMem + numEdges[i]; P[i].degree = in_degree[i]; P[i].count = 0; } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() -elapsed_time_part; fprintf(stderr, "In-degree computation time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif #ifdef _OPENMP #pragma omp barrier #endif /* Allocate shared memory */ if (tid == 0) { free(in_degree); free(numEdges); free(pSums); } S = (attr_id_t *) malloc(n*sizeof(attr_id_t)); sig = (double *) malloc(n*sizeof(double)); d = (attr_id_t *) malloc(n*sizeof(attr_id_t)); del = (double *) calloc(n, sizeof(double)); start = (attr_id_t *) malloc(MAX_NUM_PHASES*sizeof(attr_id_t)); end = (attr_id_t *) malloc(MAX_NUM_PHASES*sizeof(attr_id_t)); #ifdef _OPENMP #pragma omp barrier #endif for (i=0; i<n; i++) { d[i] = -1; } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() - elapsed_time_part; fprintf(stderr, "BC initialization time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif #ifdef _OPENMP #pragma omp for reduction(+:num_traversals) #endif for (p=0; p<numV; p++) { #if RANDSRCS i = Srcs[p]; #else i = p; #endif if (G->numEdges[i+1] - G->numEdges[i] == 0) { continue; } else { num_traversals++; } sig[i] = 1; d[i] = 0; S[0] = i; start[0] = 0; end[0] = 1; count = 1; phase_num = 0; while (end[phase_num] - start[phase_num] > 0) { for (vert = start[phase_num]; vert < end[phase_num]; vert++) { v = S[vert]; for (j=G->numEdges[v]; j<G->numEdges[v+1]; j++) { w = G->endV[j]; if (v != w) { /* w found for the first time? */ if (d[w] == -1) { S[count++] = w; d[w] = d[v] + 1; sig[w] = sig[v]; P[w].list[P[w].count++] = v; } else if (d[w] == d[v] + 1) { sig[w] += sig[v]; P[w].list[P[w].count++] = v; } } } } phase_num++; start[phase_num] = end[phase_num-1]; end[phase_num] = count; } phase_num--; while (phase_num > 0) { for (j=start[phase_num]; j<end[phase_num]; j++) { w = S[j]; for (k = 0; k<P[w].count; k++) { v = P[w].list[k]; del[v] = del[v] + sig[v]*(1+del[w])/sig[w]; } #ifdef _OPENMP omp_set_lock(&vLock[w]); BC[w] += del[w]; omp_unset_lock(&vLock[w]); #else BC[w] += del[w]; #endif } phase_num--; } for (j=0; j<count; j++) { w = S[j]; d[w] = -1; del[w] = 0; P[w].count = 0; } } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() - elapsed_time_part; fprintf(stderr, "BC computation time: %lf seconds\n", elapsed_time_part); } #endif #ifdef _OPENMP #pragma omp barrier #endif #ifdef _OPENMP #pragma omp for for (i=0; i<n; i++) { omp_destroy_lock(&vLock[i]); } #endif free(S); free(pListMem); free(P); free(sig); free(d); free(del); free(start); free(end); if (tid == 0) { #ifdef _OPENMP free(vLock); #endif #if RANDSRCS free(Srcs); #endif #ifdef DIAGNOSTIC elapsed_time = get_seconds() - elapsed_time; fprintf(stderr, "Total time taken: %lf seconds\n", elapsed_time); #endif } free_sprng(stream); #ifdef _OPENMP #pragma omp barrier } #endif }
double betweennessCentrality(graph* G, DOUBLE_T* BC, int filter) { VERT_T *S; /* stack of vertices in the order of non-decreasing distance from s. Also used to implicitly represent the BFS queue */ plist* P; /* predecessors of a vertex v on shortest paths from s */ DOUBLE_T* sig; /* No. of shortest paths */ LONG_T* d; /* Length of the shortest path between every pair */ DOUBLE_T* del; /* dependency of vertices */ LONG_T *in_degree, *numEdges, *pSums; LONG_T *pListMem; LONG_T* Srcs; LONG_T *start, *end; LONG_T MAX_NUM_PHASES; LONG_T *psCount; #ifdef _OPENMP omp_lock_t* vLock; LONG_T chunkSize; #endif int seed = 2387; double elapsed_time; #ifdef _OPENMP #pragma omp parallel { #endif VERT_T *myS, *myS_t; LONG_T myS_size; LONG_T i, j, k, p, count, myCount; LONG_T v, w, vert; LONG_T numV, num_traversals, n, m, phase_num; LONG_T tid, nthreads; int* stream; #ifdef DIAGNOSTIC double elapsed_time_part; #endif #ifdef _OPENMP int myLock; tid = omp_get_thread_num(); nthreads = omp_get_num_threads(); #else tid = 0; nthreads = 1; #endif #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds(); } #endif /* numV: no. of vertices to run BFS from = 2^K4approx */ numV = 1<<K4approx; n = G->n; m = G->m; /* Permute vertices */ if (tid == 0) { Srcs = (LONG_T *) malloc(n*sizeof(LONG_T)); #ifdef _OPENMP vLock = (omp_lock_t *) malloc(n*sizeof(omp_lock_t)); #endif } #ifdef _OPENMP #pragma omp barrier #pragma omp for for (i=0; i<n; i++) { omp_init_lock(&vLock[i]); } #endif /* Initialize RNG stream */ stream = init_sprng(0, tid, nthreads, seed, SPRNG_DEFAULT); #ifdef _OPENMP #pragma omp for #endif for (i=0; i<n; i++) { Srcs[i] = i; } #ifdef _OPENMP #pragma omp for #endif for (i=0; i<n; i++) { j = n*sprng(stream); if (i != j) { #ifdef _OPENMP int l1 = omp_test_lock(&vLock[i]); if (l1) { int l2 = omp_test_lock(&vLock[j]); if (l2) { #endif k = Srcs[i]; Srcs[i] = Srcs[j]; Srcs[j] = k; #ifdef _OPENMP omp_unset_lock(&vLock[j]); } omp_unset_lock(&vLock[i]); } #endif } } #ifdef _OPENMP #pragma omp barrier #endif #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() -elapsed_time_part; fprintf(stderr, "Vertex ID permutation time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif /* Start timing code from here */ if (tid == 0) { elapsed_time = get_seconds(); #ifdef VERIFYK4 MAX_NUM_PHASES = 2*sqrt(n); #else MAX_NUM_PHASES = 50; #endif } #ifdef _OPENMP #pragma omp barrier #endif /* Initialize predecessor lists */ /* The size of the predecessor list of each vertex is bounded by its in-degree. So we first compute the in-degree of every vertex */ if (tid == 0) { P = (plist *) calloc(n, sizeof(plist)); in_degree = (LONG_T *) calloc(n+1, sizeof(LONG_T)); numEdges = (LONG_T *) malloc((n+1)*sizeof(LONG_T)); pSums = (LONG_T *) malloc(nthreads*sizeof(LONG_T)); } #ifdef _OPENMP #pragma omp barrier #pragma omp for #endif for (i=0; i<m; i++) { v = G->endV[i]; #ifdef _OPENMP omp_set_lock(&vLock[v]); #endif in_degree[v]++; #ifdef _OPENMP omp_unset_lock(&vLock[v]); #endif } prefix_sums(in_degree, numEdges, pSums, n); if (tid == 0) { pListMem = (LONG_T *) malloc(m*sizeof(LONG_T)); } #ifdef _OPENMP #pragma omp barrier #pragma omp for #endif for (i=0; i<n; i++) { P[i].list = pListMem + numEdges[i]; P[i].degree = in_degree[i]; P[i].count = 0; } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() - elapsed_time_part; fprintf(stderr, "In-degree computation time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif /* Allocate shared memory */ if (tid == 0) { free(in_degree); free(numEdges); free(pSums); S = (VERT_T *) malloc(n*sizeof(VERT_T)); sig = (DOUBLE_T *) malloc(n*sizeof(DOUBLE_T)); d = (LONG_T *) malloc(n*sizeof(LONG_T)); del = (DOUBLE_T *) calloc(n, sizeof(DOUBLE_T)); start = (LONG_T *) malloc(MAX_NUM_PHASES*sizeof(LONG_T)); end = (LONG_T *) malloc(MAX_NUM_PHASES*sizeof(LONG_T)); psCount = (LONG_T *) malloc((nthreads+1)*sizeof(LONG_T)); } /* local memory for each thread */ myS_size = (2*n)/nthreads; myS = (LONG_T *) malloc(myS_size*sizeof(LONG_T)); num_traversals = 0; myCount = 0; #ifdef _OPENMP #pragma omp barrier #endif #ifdef _OPENMP #pragma omp for #endif for (i=0; i<n; i++) { d[i] = -1; } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() -elapsed_time_part; fprintf(stderr, "BC initialization time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif for (p=0; p<n; p++) { i = Srcs[p]; //printf ("%d \n", i); // i = p; if (G->numEdges[i+1] - G->numEdges[i] == 0) { continue; } else { num_traversals++; } if (num_traversals == numV + 1) { break; } if (tid == 0) { sig[i] = 1; d[i] = 0; S[0] = i; start[0] = 0; end[0] = 1; } count = 1; phase_num = 0; #ifdef _OPENMP #pragma omp barrier #endif while (end[phase_num] - start[phase_num] > 0) { myCount = 0; #ifdef _OPENMP #pragma omp barrier #pragma omp for schedule(dynamic) #endif for (vert = start[phase_num]; vert < end[phase_num]; vert++) { v = S[vert]; for (j=G->numEdges[v]; j<G->numEdges[v+1]; j++) { if ((G->weight[j] & 7) == 0 && filter==1) continue; w = G->endV[j]; if (v != w) { #ifdef _OPENMP myLock = omp_test_lock(&vLock[w]); if (myLock) { #endif /* w found for the first time? */ if (d[w] == -1) { if (myS_size == myCount) { /* Resize myS */ myS_t = (LONG_T *) malloc(2*myS_size*sizeof(VERT_T)); memcpy(myS_t, myS, myS_size*sizeof(VERT_T)); free(myS); myS = myS_t; myS_size = 2*myS_size; } myS[myCount++] = w; d[w] = d[v] + 1; sig[w] = sig[v]; P[w].list[P[w].count++] = v; } else if (d[w] == d[v] + 1) { sig[w] += sig[v]; P[w].list[P[w].count++] = v; } #ifdef _OPENMP omp_unset_lock(&vLock[w]); } else { if ((d[w] == -1) || (d[w] == d[v]+ 1)) { omp_set_lock(&vLock[w]); sig[w] += sig[v]; P[w].list[P[w].count++] = v; omp_unset_lock(&vLock[w]); } } #endif } } } /* Merge all local stacks for next iteration */ phase_num++; psCount[tid+1] = myCount; #ifdef _OPENMP #pragma omp barrier #endif if (tid == 0) { start[phase_num] = end[phase_num-1]; psCount[0] = start[phase_num]; for(k=1; k<=nthreads; k++) { psCount[k] = psCount[k-1] + psCount[k]; } end[phase_num] = psCount[nthreads]; } #ifdef _OPENMP #pragma omp barrier #endif for (k = psCount[tid]; k < psCount[tid+1]; k++) { S[k] = myS[k-psCount[tid]]; } #ifdef _OPENMP #pragma omp barrier #endif count = end[phase_num]; } phase_num--; #ifdef _OPENMP #pragma omp barrier #endif //printf ("%d\n", phase_num); while (phase_num > 0) { #ifdef _OPENMP #pragma omp for #endif for (j=start[phase_num]; j<end[phase_num]; j++) { w = S[j]; for (k = 0; k<P[w].count; k++) { v = P[w].list[k]; #ifdef _OPENMP omp_set_lock(&vLock[v]); #endif del[v] = del[v] + sig[v]*(1+del[w])/sig[w]; #ifdef _OPENMP omp_unset_lock(&vLock[v]); #endif } BC[w] += del[w]; } phase_num--; #ifdef _OPENMP #pragma omp barrier #endif } #ifdef _OPENMP chunkSize = n/nthreads; #pragma omp for schedule(static, chunkSize) #endif for (j=0; j<count; j++) { w = S[j]; //fprintf (stderr, "w: %d\n", w); d[w] = -1; del[w] = 0; P[w].count = 0; } #ifdef _OPENMP #pragma omp barrier #endif } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() -elapsed_time_part; fprintf(stderr, "BC computation time: %lf seconds\n", elapsed_time_part); } #endif #ifdef _OPENMP #pragma omp for for (i=0; i<n; i++) { omp_destroy_lock(&vLock[i]); } #endif free(myS); if (tid == 0) { free(S); free(pListMem); free(P); free(sig); free(d); free(del); #ifdef _OPENMP free(vLock); #endif free(start); free(end); free(psCount); elapsed_time = get_seconds() - elapsed_time; free(Srcs); } free_sprng(stream); #ifdef _OPENMP } #endif /* Verification */ #ifdef VERIFYK4 double BCval; if (SCALE % 2 == 0) { BCval = 0.5*pow(2, 3*SCALE/2)-pow(2, SCALE)+1.0; } else { BCval = 0.75*pow(2, (3*SCALE-1)/2)-pow(2, SCALE)+1.0; } int failed = 0; for (int i=0; i<G->n; i++) { if (round(BC[i] - BCval) != 0) { failed = 1; break; } } if (failed) { fprintf(stderr, "Kernel 4 failed validation!\n"); } else { fprintf(stderr, "Kernel 4 validation successful!\n"); } #endif for (int i = 0; i < G->n; i++) printf ("BC: %d %f\n",i, BC[i]); return elapsed_time; }
double genScalData(graphSDG* SDGdata) { VERT_T *src, *dest; WEIGHT_T *wt; LONG_T n, m; VERT_T *permV; #ifdef _OPENMP omp_lock_t* vLock; #endif double elapsed_time; int seed; n = N; m = M; /* allocate memory for edge tuples */ src = (VERT_T *) malloc(M*sizeof(VERT_T)); dest = (VERT_T *) malloc(M*sizeof(VERT_T)); assert(src != NULL); assert(dest != NULL); /* sprng seed */ seed = 2387; elapsed_time = get_seconds(); #ifdef _OPENMP #if PARALLEL_SDG omp_set_num_threads(omp_get_max_threads()); // omp_set_num_threads(16); #else omp_set_num_threads(1); #endif #endif #ifdef _OPENMP #pragma omp parallel { #endif int tid, nthreads; #ifdef DIAGNOSTIC double elapsed_time_part; #endif int *stream; LONG_T i, j, u, v, step; DOUBLE_T av, bv, cv, dv, p, S, var; LONG_T tmpVal; #ifdef _OPENMP nthreads = omp_get_num_threads(); tid = omp_get_thread_num(); #else nthreads = 1; tid = 0; #endif /* Initialize RNG stream */ stream = init_sprng(0, tid, nthreads, seed, SPRNG_DEFAULT); #ifdef DIAGNOSTIC if (tid == 0) elapsed_time_part = get_seconds(); #endif /* Start adding edges */ #ifdef _OPENMP #pragma omp for #endif for (i=0; i<m; i++) { u = 1; v = 1; step = n/2; av = A; bv = B; cv = C; dv = D; p = sprng(stream); if (p < av) { /* Do nothing */ } else if ((p >= av) && (p < av+bv)) { v += step; } else if ((p >= av+bv) && (p < av+bv+cv)) { u += step; } else { u += step; v += step; } for (j=1; j<SCALE; j++) { step = step/2; /* Vary a,b,c,d by up to 10% */ var = 0.1; av *= 0.95 + var * sprng(stream); bv *= 0.95 + var * sprng(stream); cv *= 0.95 + var * sprng(stream); dv *= 0.95 + var * sprng(stream); S = av + bv + cv + dv; av = av/S; bv = bv/S; cv = cv/S; dv = dv/S; /* Choose partition */ p = sprng(stream); if (p < av) { /* Do nothing */ } else if ((p >= av) && (p < av+bv)) { v += step; } else if ((p >= av+bv) && (p < av+bv+cv)) { u += step; } else { u += step; v += step; } } src[i] = u-1; dest[i] = v-1; } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() -elapsed_time_part; fprintf(stderr, "Tuple generation time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif /* Generate vertex ID permutations */ if (tid == 0) { permV = (VERT_T *) malloc(N*sizeof(VERT_T)); assert(permV != NULL); } #ifdef _OPENMP #pragma omp barrier #pragma omp for #endif for (i=0; i<n; i++) { permV[i] = i; } #ifdef _OPENMP if (tid == 0) { vLock = (omp_lock_t *) malloc(n*sizeof(omp_lock_t)); assert(vLock != NULL); } #pragma omp barrier #pragma omp for for (i=0; i<n; i++) { omp_init_lock(&vLock[i]); } #endif #ifdef _OPENMP #pragma omp for #endif for (i=0; i<n; i++) { j = n*sprng(stream); if (i != j) { #ifdef _OPENMP int l1 = omp_test_lock(&vLock[i]); if (l1) { int l2 = omp_test_lock(&vLock[j]); if (l2) { #endif tmpVal = permV[i]; permV[i] = permV[j]; permV[j] = tmpVal; #ifdef _OPENMP omp_unset_lock(&vLock[j]); } omp_unset_lock(&vLock[i]); } #endif } } #ifdef _OPENMP #pragma omp for for (i=0; i<n; i++) { omp_destroy_lock(&vLock[i]); } #pragma omp barrier if (tid == 0) { free(vLock); } #endif #ifdef _OPENMP #pragma omp for #endif for (i=0; i<m; i++) { src[i] = permV[src[i]]; dest[i] = permV[dest[i]]; } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() - elapsed_time_part; fprintf(stderr, "Permuting vertex IDs: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif if (tid == 0) { free(permV); } /* Generate edge weights */ if (tid == 0) { wt = (WEIGHT_T *) malloc(M*sizeof(WEIGHT_T)); assert(wt != NULL); } #ifdef _OPENMP #pragma omp barrier #pragma omp for #endif for (i=0; i<m; i++) { wt[i] = 1 + MaxIntWeight * sprng(stream); } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() - elapsed_time_part; fprintf(stderr, "Generating edge weights: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif SDGdata->n = n; SDGdata->m = m; SDGdata->startVertex = src; SDGdata->endVertex = dest; SDGdata->weight = wt; free_sprng(stream); #ifdef _OPENMP } #endif elapsed_time = get_seconds() - elapsed_time; return elapsed_time; }