void testobject::test<2>() { srand(time(NULL)); for (size_t i = 0; i < ITERATIONS; ++i) { RAW_PACKET_OLD p1; RAW_PACKET p2; RAW_PACKET p3; uint8_t buf[68]; genVector(buf); memcpy(p1.pckt, buf, 68); memcpy(p2.rawPacket.pckt, buf, 68); memcpy(p3.rawPacket.pckt, buf, 68); ensure_equals("IP versions", p1.GetIPVersion(), p2.GetIPVersion()); ensure_equals("IP headers length", p1.GetHeaderLen(), p2.GetHeaderLen()); ensure_equals("Protocols", p1.GetProto(), p2.GetProto()); ensure_equals("Source IPs", p1.GetSrcIP(), p2.GetSrcIP()); ensure_equals("Destination IPs", p1.GetDstIP(), p2.GetDstIP()); ensure_equals("Source ports", p1.GetSrcPort(), p2.GetSrcPort()); ensure_equals("Destination ports", p1.GetDstPort(), p2.GetDstPort()); ensure_equals("Self equallity", p2, p3); ensure_equals("Reverse self equallity", p3, p2); } }
int main( int argc, const char** argv ) { // Info for user std::cout << "GenDataMP5: Generates data files to use as input for assignment MP5.\n"; std::cout << "Invoke as: GenDataMP5 [VectorLength]\n\n"; // Read input if ( 2 != argc ) { std::cout << "Error! Wrong number of arguments to program.\n"; return 0; } // Create vectors const int vecLen = atoi( argv[1] ); FloatVec vecA; FloatVec vecB; genVector( vecA, vecLen ); scanVector( vecA, vecB ); // Write to files writeVector( vecA, "vecA.txt" ); writeVector( vecB, "vecB.txt" ); return 0; }
void printDiff(){ double Sn=(M_PI*M_PI)/6; double sum=0; double time=0; for (int i = 4; i < 15; ++i) { time = WallTime(); sum = doSum(genVector(pow(2, i))); printf("Diff (n=%f) = %f,",pow(2, i), sum-Sn); printf(" Elapsed: %fs\n", WallTime()-time); } }
int main() { srand(time(NULL)); vector<int> v; genVector(v,2); sort(v.begin(), v.end()); printVector(v); vector<string> ans = summaryRanges(v); printVector(ans); return 0; }
int main(int argc, char** argv) { int size, rank; #ifdef HAVE_MPI MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); #endif if (!(size & (size-1))==0) { printf("Number of processes must be power of two"); #ifdef HAVE_MPI MPI_Finalize(); #endif return 1; } double time = WallTime(); double Sn=(M_PI*M_PI)/6; double sum=0; for (int i = 4; i <15 ; ++i) { int n= pow(2, i); int *startIndex, *len; splitVector(n, size, &len, &startIndex); Vector vec = genVector(startIndex[rank],startIndex[rank]+len[rank]); sum = doSum(vec); #ifdef HAVE_MPI double s2=sum; MPI_Reduce(&s2, &sum, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); #endif if (rank == 0) { printf("Diff (n=%d) = %f,",n, sum-Sn); printf(" Elapsed: %fs\n", WallTime()-time); } } #ifdef HAVE_MPI MPI_Finalize(); #endif return 0; }
int main(int argc, char* argv[]) { double *A, *B, *b, *y; int n; int my_rank, p; int i; /* Obtain number of rows and columns. We do not check for eroneous input. */ n = atoi(argv[1]); MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &p); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); /* Find how many rows per process. */ int *rows; rows = (int*)Malloc(p, sizeof(int), MPI_COMM_WORLD, my_rank); calcNumsPerProcess(n, p, rows); /* Allocate memory. */ b = Malloc(n, sizeof(double), MPI_COMM_WORLD, my_rank); if (my_rank == 0) { A = (double*)Malloc(n*n, sizeof(double), MPI_COMM_WORLD, my_rank); y = (double*)Malloc(n, sizeof(double), MPI_COMM_WORLD, my_rank); } B = (double*)Malloc(rows[my_rank]*n, sizeof(double), MPI_COMM_WORLD, my_rank); /* Generate matrix and vector */ if (my_rank == 0) { genMatrix(n, n, A); genVector(n, b); } /* Distribute A */ int *displs; int *sendcounts; if (my_rank == 0) { displs = malloc(sizeof(int)*p); sendcounts = malloc(sizeof(int)*p); for (i=0; i<p; i++) sendcounts[i] = rows[i]*n; displs[0] = 0; for (i=1; i<p; i++) displs[i] = displs[i-1] + sendcounts[i-1]; } MPI_Scatterv(A, sendcounts, displs, MPI_DOUBLE, B, rows[my_rank]*n, MPI_DOUBLE, 0, MPI_COMM_WORLD); /* Distribute b */ MPI_Bcast(b, n, MPI_DOUBLE, 0, MPI_COMM_WORLD); double time = MPI_Wtime(); parallelMatrixTimesVector(rows[my_rank], n, B, b, y, 0, my_rank, p, MPI_COMM_WORLD); time = MPI_Wtime()-time; /* Collect the max time from all processes. */ double timerecv; MPI_Reduce(&time,&timerecv, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if (my_rank==0) { printf("%d %d % .2e\n", p, n, timerecv); } if (my_rank==0){ free(sendcounts); free(displs); free(y); } free(A); free(b); free(rows); MPI_Finalize(); return 0; }
int main(int argc, char* argv[]) { double *A, *B, *b, *y; int num_rows, num_cols; int my_rank, p; int i; /* Obtain number of rows and columns. We do not check for eroneous input. */ num_rows = atoi(argv[1]); num_cols= atoi(argv[2]); MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &p); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); /* number of rows on my_rank */ int local_num_rows = NUM_ROWS(my_rank, p, num_rows); /* Allocate memory */ b = Malloc(num_cols, sizeof(double), MPI_COMM_WORLD, my_rank); if (my_rank == 0) { A = (double*)Malloc(num_rows*num_cols, sizeof(double), MPI_COMM_WORLD, my_rank); y = (double*)Malloc(num_rows, sizeof(double), MPI_COMM_WORLD, my_rank); } B = (double*)Malloc(local_num_rows*num_cols, sizeof(double), MPI_COMM_WORLD, my_rank); /* Generate matrix and vector */ if (my_rank == 0) { genMatrix(num_rows, num_cols, A); genVector(num_cols, b); } /* Distribute A */ int *displs; int *sendcounts; if (my_rank == 0) { displs = malloc(sizeof(int)*p); sendcounts = malloc(sizeof(int)*p); sendcounts[0] = NUM_ROWS(0,p,num_rows)*num_cols; displs[0] = 0; for (i=1; i<p; i++) { displs[i] = displs[i-1] + sendcounts[i-1]; sendcounts[i] = NUM_ROWS(i,p,num_rows)*num_cols; } } MPI_Scatterv(A, sendcounts, displs, MPI_DOUBLE, B, local_num_rows*num_cols, MPI_DOUBLE, 0, MPI_COMM_WORLD); /* Distribute b */ MPI_Bcast(b, num_cols, MPI_DOUBLE, 0, MPI_COMM_WORLD); /* Multiply */ double time = MPI_Wtime(); parallelMatrixTimesVector(local_num_rows, num_cols, B, b, y, 0, my_rank, p, MPI_COMM_WORLD); time = MPI_Wtime()-time; /* Collect the max time from all processes. */ double timerecv; MPI_Reduce(&time,&timerecv, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if (my_rank==0) printf("Computed with p = %d, m = %d, n = %d in % .2e seconds\n", p, num_rows, num_cols, timerecv); if (my_rank == 0) getResult(num_rows, num_cols, A, b, y); if (my_rank==0){ free(sendcounts); free(displs); free(y); free(A); free(b); } free(B); MPI_Finalize(); return 0; }
int main(int argc, char* argv[]) { double *A, *b, *y; int m, n; int my_rank, p; int i, dest, source; int offset; MPI_Status status; m = atoi(argv[1]); n = atoi(argv[2]); MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &p); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Find how many rows per process. int *rows; rows = (int*)malloc(sizeof(int)*p); for (i=0; i < p; i++) rows[i] = m/p; for (i=0; i < m % p; i++) rows[i]++; // Allocate memory. b = malloc( n*sizeof(double)); if (my_rank == 0) { A = (double*)malloc(m * n * sizeof(double)); y = (double*)malloc(m * sizeof(double)); } else { A = (double*)malloc(rows[my_rank] * n * sizeof(double)); y = (double*)malloc(rows[my_rank] * sizeof(double)); } // Generate matrix and vector if (my_rank == 0) { genMatrix(m, n, A); genVector(n, b); } // PUT THIS INTO SEPARATE FUNCTION parallelMatrixVectorProduct and in a file parallmatvec.c int tag = 0; if (my_rank ==0) { int offset = 0; for (dest = 1; dest < p; dest++) { offset += rows[dest-1]; MPI_Send(A + offset*n, rows[dest]*n, MPI_DOUBLE, dest, tag, MPI_COMM_WORLD); } } else MPI_Recv(A, rows[my_rank]*n, MPI_DOUBLE, 0, tag, MPI_COMM_WORLD, &status); MPI_Bcast(b, n, MPI_DOUBLE, 0, MPI_COMM_WORLD); compDotProduct(rows[my_rank], n, A, b, y); // Get the data if (my_rank !=0) MPI_Send(y, rows[my_rank], MPI_DOUBLE, 0, tag, MPI_COMM_WORLD); else { offset = 0; for (source = 1; source < p; source++) { offset += rows[source-1]; MPI_Recv(y+offset, rows[source], MPI_DOUBLE, source, tag, MPI_COMM_WORLD, &status); } } // END OF parallMatrixVectorProduct if (my_rank == 0) { getResult(m, n, A, b, y); } free(A); free(y); free(b); free(rows); MPI_Finalize(); return 0; }
int main(int argc, char* argv[]) { int my_rank; /* rank of process */ int p; /* number of processes */ int source=0; /* rank of sender */ int destination; int tag = 0; /* tag for messages */ MPI_Status status; /* status for receive */ int rowsPerProcess; int processesInUse; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); MPI_Comm_size(MPI_COMM_WORLD, &p); int m = atoi(argv[1]); int n = atoi(argv[2]); //allocate b double *b = (double *)malloc(m*sizeof(double)); //assign number of rows process is responsible for if (my_rank > n%p - 1) rowsPerProcess = n/p; else rowsPerProcess = n/p+1; double *rowResult; if (my_rank == 0) rowResult = (double *) malloc(n*sizeof(double)); else rowResult = (double *) malloc(rowsPerProcess*sizeof(double)); //Clear memory contents of final vector if(my_rank ==0){ for ( int i = 0; i < n; i ++){ *rowResult = 0; rowResult++; } rowResult = rowResult - n; } else{ for ( int i = 0; i < rowsPerProcess; i ++){ *rowResult = 0; rowResult++; } rowResult = rowResult - rowsPerProcess; } //assign number of processes that will be used if (p>n){ processesInUse = n; } else processesInUse = p; //allocate mem for rows double *rowofA; if (my_rank == 0){ rowofA = (double *) malloc(n*m*sizeof(double)); } else{ rowofA = (double *) malloc(rowsPerProcess*m*sizeof(double)); } //Enter process 0 main if (my_rank == 0){ int *dest = (int *) malloc((processesInUse)*sizeof(int)); double *totalResult; // = (double *)malloc(n*sizeof(double)); int *numjobs = (int *)malloc(processesInUse*sizeof(int)); //building destination list for (int i = 0; i < processesInUse; i++){ *dest = i; //printf("dest:%d\n", *dest); dest = dest + 1; } dest = dest - (processesInUse); //initialize numjobs[] for (int i = 0; i < processesInUse; i++){ if ( *dest > n%processesInUse - 1){ *numjobs = n/processesInUse; } else{ *numjobs = n/processesInUse+1; } dest++; numjobs++; } dest = dest - processesInUse; numjobs = numjobs - (processesInUse); //gen matrix genMatrix(m,n, rowofA); //gen vector genVector(m, b); //split and send A and b split_send(rowofA, b, rowofA, processesInUse, m, n, numjobs, my_rank, dest, tag); //calculate: result[] = matrix row * vector calc_res(rowofA, b, m, n, processesInUse, rowsPerProcess, rowResult); totalResult = rowResult; rowResult = rowResult + rowsPerProcess; numjobs++; //receive results from other processes -- load into final array for (int i = 1; i < processesInUse; i ++){ MPI_Recv(rowResult, *numjobs, MPI_DOUBLE, i, tag, MPI_COMM_WORLD, &status); //printf("0 Received from %f from %d\n", *rowResult, i); rowResult = rowResult + *numjobs; numjobs++; } rowResult = totalResult; //print final array printf("Total Result: [ "); for (int i = 0; i < n; i++){ printf("%f ", *totalResult); totalResult++; } printf("]\n"); totalResult = rowResult; //Call getResult getResult(m,n, rowofA,b,totalResult); } //All processess other than 0 and < n rows else if (my_rank < n){ //Receive rowsofA MPI_Recv(rowofA, m*rowsPerProcess, MPI_DOUBLE, source, tag, MPI_COMM_WORLD, &status); //get b MPI_Bcast(b, m, MPI_DOUBLE, 0, MPI_COMM_WORLD); //calculate: result[] = matrix row * vector calc_res(rowofA, b, m, n,processesInUse,rowsPerProcess, rowResult); //send results to 0 destination = 0; MPI_Send(rowResult, rowsPerProcess, MPI_DOUBLE, destination, tag, MPI_COMM_WORLD); } MPI_Finalize(); return 0; }