示例#1
0
int main(int argc, char** argv) {
  if (argc != 2) {
    fprintf(stderr, "Usage: avg num_elements_per_proc\n");
    exit(1);
  }

  int num_elements_per_proc = atoi(argv[1]);
  // Seed the random number generator to get different results each time
  srand(time(NULL));

  MPI_Init(NULL, NULL);

  int world_rank;
  MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
  int world_size;
  MPI_Comm_size(MPI_COMM_WORLD, &world_size);

  // Create a random array of elements on the root process. Its total
  // size will be the number of elements per process times the number
  // of processes
  float *rand_nums = NULL;
  if (world_rank == 0) {
    rand_nums = create_rand_nums(num_elements_per_proc * world_size);
  }

  // For each process, create a buffer that will hold a subset of the entire
  // array
  float *sub_rand_nums = (float *)malloc(sizeof(float) * num_elements_per_proc);
  assert(sub_rand_nums != NULL);

  // Scatter the random numbers from the root process to all processes in
  // the MPI world
  MPI_Scatter(rand_nums, num_elements_per_proc, MPI_FLOAT, sub_rand_nums,
              num_elements_per_proc, MPI_FLOAT, 0, MPI_COMM_WORLD);

  // Compute the average of your subset
  float sub_avg = compute_avg(sub_rand_nums, num_elements_per_proc);

  // Gather all partial averages down to all the processes
  float *sub_avgs = (float *)malloc(sizeof(float) * world_size);
  assert(sub_avgs != NULL);
  MPI_Allgather(&sub_avg, 1, MPI_FLOAT, sub_avgs, 1, MPI_FLOAT, MPI_COMM_WORLD);

  // Now that we have all of the partial averages, compute the
  // total average of all numbers. Since we are assuming each process computed
  // an average across an equal amount of elements, this computation will
  // produce the correct answer.
  float avg = compute_avg(sub_avgs, world_size);
  printf("Avg of all elements from proc %d is %f\n", world_rank, avg);

  // Clean up
  if (world_rank == 0) {
    free(rand_nums);
  }
  free(sub_avgs);
  free(sub_rand_nums);

  MPI_Barrier(MPI_COMM_WORLD);
  MPI_Finalize();
}
示例#2
0
int main(int argc, char **argv) {

  if (argc != 2) {
    fprintf(stderr, "Usage: <num_elements_per_proc>\n");
    exit(0);
  }

  int num_elements_per_proc = atoi(*++ argv);

  MPI_Init(NULL, NULL);

  int world_rank;
  int world_size;
  MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
  MPI_Comm_size(MPI_COMM_WORLD, &world_size);

  srand(time(NULL) * world_rank);
  float *rand_nums = NULL;
  rand_nums = create_rand_nums(num_elements_per_proc);

  float local_sum = 0;
  int i;
  for (i = 0; i < num_elements_per_proc; i ++) {
    local_sum += rand_nums[i];
  }

  float global_sum;
  MPI_Allreduce(&local_sum, &global_sum, 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD);
  float mean = global_sum / (num_elements_per_proc * world_size);

  float local_sq_diff = 0;
  for (i = 0; i < num_elements_per_proc; i ++) {
    local_sq_diff += (rand_nums[i] - mean) * (rand_nums[i] - mean);
  }

  float global_sq_diff;
  MPI_Reduce(&local_sq_diff, &global_sq_diff, 1, MPI_FLOAT, MPI_SUM, 0, MPI_COMM_WORLD);

  if (world_rank == 0) {
    float stddev = sqrt(global_sq_diff / (num_elements_per_proc * world_size));
    printf("Mean - %f, Standard deviation = %f\n", mean, stddev);
  }

  free(rand_nums);

  MPI_Barrier(MPI_COMM_WORLD);
  MPI_Finalize();

  return 0;
}
示例#3
0
int main(int argc, char** argv){
	int elements_per_proc = 4;
	srand(time(NULL));
	MPI_Init(NULL, NULL);
	
	int world_size;
	MPI_Comm_size(MPI_COMM_WORLD, &world_size);
	int world_rank;
	MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
	float *rand_nums = NULL;
	if(world_rank == 0){
		rand_nums = create_rand_nums(elements_per_proc * world_size); 
	}

	//Create a buffer that will hold subset of random numbers
	float *sub_rand_nums = malloc(sizeof(float) * elements_per_proc);
	
	//Scatter the random numbers to all processes
	MPI_Scatter(rand_nums, elements_per_proc, MPI_FLOAT, sub_rand_nums, elements_per_proc, MPI_FLOAT, 0, MPI_COMM_WORLD);
	
	float sub_avg = compute_avg(sub_rand_nums, elements_per_proc);	
	
	float *sub_avgs = NULL;
	if(world_rank == 0){
		sub_avgs = malloc(sizeof(float) * world_size);
	}
	MPI_Gather(&sub_avg, 1, MPI_FLOAT, sub_avgs, 1, MPI_FLOAT, 0, MPI_COMM_WORLD);
	
	if(world_rank == 0){
		float avg = compute_avg(sub_avgs, world_size);
		printf("Avg:%f\n", avg);
	}

	if(world_rank == 0){
		free(rand_nums);
		free(sub_avgs);

	}
	free(sub_rand_nums);
	MPI_Barrier(MPI_COMM_WORLD);




	MPI_Finalize();


}
示例#4
0
int main(int argc, char** argv) {
  MPI_Init(NULL, NULL);
  int world_rank;
  MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
  int world_size;
  MPI_Comm_size(MPI_COMM_WORLD, &world_size);
  Pssort_Object pssort_object;
  pssort_init(&pssort_object, MPI_COMM_WORLD);
  int i;

  /* Weak scaling start */
  int num_sample_sorters = 1;
  int num_samples = 8;
  for (num_samples = 32; num_samples <= 128; num_samples *= 2) {
  for (num_sample_sorters = 1; num_sample_sorters <= world_size;
       num_sample_sorters *= 2) {
    pssort_set_num_samples(&pssort_object, num_samples);
    pssort_set_sample_sorter_factor(&pssort_object,
                                    world_size / num_sample_sorters);
    int num_elements = 41943;
    int initial_num_elements = num_elements;
    Record* rand_nums = create_rand_nums(num_elements);
    MPI_Barrier(MPI_COMM_WORLD);
    double t = -MPI_Wtime();
    pssort_a(&pssort_object, (void**)&rand_nums, &num_elements, sizeof(Record),
             compare_record);
    MPI_Barrier(MPI_COMM_WORLD);
    t += MPI_Wtime();
    if (world_rank == 0) {
      fprintf(stderr, "weak:%d:integers_per_proc:%d:total_GB:"
              "%lf:num_sample_sorters:%d:num_samples:%d:time:%lf\n",
               world_size, initial_num_elements,
              ((int64_t)initial_num_elements * sizeof(Record) * world_size)
              / (1024.0 * 1024.0 * 1024.0), num_sample_sorters, num_samples, t);
                 
    }  
    MPI_Barrier(MPI_COMM_WORLD);

    free(rand_nums);
  }
  }
  /* Weak scaling done */

  pssort_finalize(&pssort_object);
  MPI_Finalize();
  return 0;
}
示例#5
0
int main(int argc, char **argv) {

  // Standard
  int world_rank;
  int world_size;

  // Fenix
  int fenix_role;
  MPI_Comm world_comm = NULL;
  MPI_Comm new_comm = NULL;
  int spawn_mode = 0;
  int error;
  float local_sum = 0;
  int i;
  float *rand_nums = NULL;
  int kill_rank;
  int spare_ranks;
  int num_elements_per_proc;
  int recovered;

  if (argc != 4) {
    fprintf(stderr, "Usage: <# elements> <# spare ranks> <rank ID for killing>\n");
    exit(0);
  }
  kill_rank = atoi(argv[3]);
  spare_ranks = atoi(argv[2]);
  num_elements_per_proc = atoi(argv[1]);


  MPI_Init(&argc, &argv);
  MPI_Comm_dup(MPI_COMM_WORLD, &world_comm);
  Fenix_Init(&fenix_role, world_comm, &new_comm, &argc, &argv, 
              spare_ranks, spawn_mode, MPI_INFO_NULL, &error );

  MPI_Comm_rank(new_comm, &world_rank);
  MPI_Comm_size(new_comm, &world_size);

  if (fenix_role == FENIX_ROLE_INITIAL_RANK) {
    recovered = 0;
  } else {
    recovered = 1;
    if( rand_nums != NULL ) {
       free( rand_nums );
    }
  }

  srand(time(NULL) * world_rank);
  rand_nums = create_rand_nums(num_elements_per_proc);

  for (i = 0; i < num_elements_per_proc; i ++) {
    local_sum += rand_nums[i];
  }
  float global_sum;
  MPI_Allreduce(&local_sum, &global_sum, 1, MPI_FLOAT, MPI_SUM, new_comm);
  float mean = global_sum / (num_elements_per_proc * world_size);

  float local_sq_diff = 0;
  for (i = 0; i < num_elements_per_proc; i ++) {
    local_sq_diff += (rand_nums[i] - mean) * (rand_nums[i] - mean);
  }



  if (world_rank == kill_rank && recovered == 0) {
    pid_t pid = getpid();
    kill(pid, SIGKILL);
  }



  float global_sq_diff;
  MPI_Reduce(&local_sq_diff, &global_sq_diff, 1, MPI_FLOAT, MPI_SUM, 0, new_comm);

  free(rand_nums);
  rand_nums = NULL;
  Fenix_Finalize();

  if (world_rank == 0) {
    float stddev = sqrt(global_sq_diff / (num_elements_per_proc * world_size));
    printf("Mean - %f, Standard deviation = %f\n", mean, stddev);
  }
  MPI_Finalize();

  return 0;
}
int main(int argc, char** argv) {
  // Seed the random number generator to get different results each time
  srand(time(NULL));

  MPI_Init(NULL, NULL);

  int world_rank;
  MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
  int world_size;
  MPI_Comm_size(MPI_COMM_WORLD, &world_size);

  // Create a random array of elements on the root process. Its total
  // size will be the squared number of processes
  float *rand_nums = NULL;
  if (world_rank == 0) {
    rand_nums = create_rand_nums(world_size * world_size);
  
    int i, j;
    for (i=0; i<world_size; i++) {
      for (j=0; j<world_size; j++) {
        printf("%f ", rand_nums[i*world_size+j]);
      }
      printf("\n");
    }
  }

  // For each process, create a buffer that will hold a subset of the entire
  // array
  float *sub_rand_nums = (float *)malloc(sizeof(float) * world_size);
  assert(sub_rand_nums != NULL);

  // Scatter the random numbers from the root process to all processes in
  // the MPI world
  MPI_Scatter(rand_nums, world_size, MPI_FLOAT, sub_rand_nums,
              world_size, MPI_FLOAT, 0, MPI_COMM_WORLD);
  //printf("data scattered");
  // Compute the average of your subset
  float* sub_avg = compute_avg(sub_rand_nums, world_size);
  printf("I am the processs %d and my (min, max, avg) is: (%f %f %f)\n",
         world_rank, sub_avg[0], sub_avg[1], sub_avg[2]);
  
  // Gather all partial averages down to the root process
  float *sub_avgs = NULL;
  if (world_rank == 0) {
    sub_avgs = (float *)malloc(sizeof(float) * 3 * world_size);
    assert(sub_avgs != NULL);
  }
  MPI_Gather(sub_avg, 3, MPI_FLOAT, sub_avgs, 3, MPI_FLOAT, 0, MPI_COMM_WORLD);
  //if (world_rank == 0) {
  //    printf("gathered: %f %f %f | %f %f %f\n", 
  //            sub_avgs[0], sub_avgs[1], sub_avgs[2],
  //            sub_avgs[3], sub_avgs[4], sub_avgs[5]);
  //}

  // Now that we have all of the partial averages on the root, compute the
  // total average of all numbers. Since we are assuming each process computed
  // an average across an equal amount of elements, this computation will
  // produce the correct answer.
  if (world_rank == 0) {
    float* avg = compute_avg_end(sub_avgs, 3 * world_size);
    printf("(min, max, avg) of all elements is (%f %f %f)\n", avg[0], avg[1], avg[2]);
    // Compute the average across the original data for comparison
    float* original_data_avg =
      compute_avg(rand_nums, world_size * world_size);
    printf("(min, max, avg) computed across original data is (%f %f %f)\n", 
           original_data_avg[0], original_data_avg[1], original_data_avg[2]);
  }

  // Clean up
  if (world_rank == 0) {
    free(rand_nums);
    free(sub_avgs);
  }
  free(sub_rand_nums);

  MPI_Barrier(MPI_COMM_WORLD);
  MPI_Finalize();
}
示例#7
0
int main(int argc, char** argv) {
 MPI_Init(NULL, NULL);
int n=90;


int world_rank,world_size,hypercube_size,hypercube_rank;
MPI_Comm hypercube_3d;

  MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
  MPI_Comm_size(MPI_COMM_WORLD, &world_size);

//Initializing the dimensions and periods
int ndims=3;
int processPerDims[3]={3,3,3};
int periods[3]={1,1,1}

//Create the ring topology
MPI_Cart_create(MPI_COMM_WORLD,ndims,dims,periods,1,&hypercube_3d);



MPI_Comm_rank(ring_1d,&hypercube_rank);
MPI_Comm_size(ring_1d,&hypercube_size);

printf("the ranks are %d\n", hypercube_rank);
printf("the size is %d\n", hypercube_size);

  int num_elements_per_proc = n/9;

  // Create a random array of elements on the root process. Its total
  // size will be the number of elements per process times the number
  // of processes
 int *rand_nums =(int*)malloc(sizeof(int) * n);
int k=0;
  if (hypercube_rank == 0) {
   
    rand_nums = create_rand_nums(n);
  }

  // For each process, create a buffer that will hold a subset of the entire
  // array
  int *sub_rand_nums = (int*)malloc(sizeof(int) * num_elements_per_proc);

  // Scatter the random numbers from the root process to all processes in
  // the MPI world
  MPI_Scatter(rand_nums, num_elements_per_proc, MPI_INT, sub_rand_nums,
		  num_elements_per_proc, MPI_INT, 0, hypercube_3d);
 for(k=0;k<num_elements_per_proc;k++)
{
        printf("\n My rank is %d and value is: %d",hypercube_rank,sub_rand_nums[k]);
}

  
  // Compute the sum  of your subset
  int sub_sum = compute_sum(sub_rand_nums, num_elements_per_proc);
printf("\nI am rank %d and my sum is %d\n",hypercube_rank,sub_sum);
 
  int *sub_sums=NULL;
  if (hypercube_rank == 0) {
 sub_sums = (int*)malloc(sizeof(int) * hypercube_size);
  }
 
  MPI_Gather(&sub_sum, 1, MPI_INT, sub_sums,1, MPI_INT, 0, hypercube_3d);

int j;
for(j=0;j<hypercube_size;j++){
printf("I am after gather sum %d\n",sub_sums[j]);
}
 
  // p
  // p
  // p
  // p
  // p
  // Now thiae are the random values: %d",rand_nums[k]);
  // }
  // we have all of the partial averages on the root, compute the
  // total average of all numbers. Since we are assuming each process computed
  // an average across an equal amount of elements, this computation will
  // produce the correct answer.
 

  if (hypercube_rank == 0) {
    int sum = compute_sum(sub_sums, hypercube_size);
    printf("Sum of all hypercube_3d is %d\n", sum);
  }
if(hypercube_size==0){
free(rand_nums);
free(sub_sums);
}
free(sub_rand_nums);

  MPI_Barrier(MPI_COMM_WORLD);
  MPI_Finalize();
}
示例#8
0
int main(int argc, char** argv)
{
  if (argc != 2)
  {
    fprintf(stderr, "Usage: avg num_elements_per_proc\n");
    exit(1);
  }

  double start, stop;

  int num_elements_per_proc = atoi(argv[1]);
  // Seed the random number generator to get different results each time
  srand(time(NULL));
  MYTIMESTAMP(start);

  MPI_Init(NULL, NULL);

  int world_rank;
  MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
  int world_size;
  MPI_Comm_size(MPI_COMM_WORLD, &world_size);

  // Create a random array of elements on all processes.
  srand(world_rank);
  float *rand_nums = NULL;
  rand_nums = create_rand_nums(num_elements_per_proc);

  // Sum the numbers locally
  float local_sum = 0;
  int i;
  for (i = 0; i < num_elements_per_proc; i++)
  {
    local_sum += rand_nums[i];
  }

  // Reduce all of the local sums into the global sum in order to
  // calculate the mean
  float global_sum;
  MPI_Allreduce(&local_sum, &global_sum, 1, MPI_FLOAT, MPI_SUM,
                MPI_COMM_WORLD);
  float mean = global_sum / (num_elements_per_proc * world_size);

  // Compute the local sum of the squared differences from the mean
  float local_sq_diff = 0;
  for (i = 0; i < num_elements_per_proc; i++)
  {
    local_sq_diff += (rand_nums[i] - mean) * (rand_nums[i] - mean);
  }

  // Reduce the global sum of the squared differences to the root process
  // and print off the answer
  float global_sq_diff;
  MPI_Reduce(&local_sq_diff, &global_sq_diff, 1, MPI_FLOAT, MPI_SUM, 0,
             MPI_COMM_WORLD);

  // The standard deviation is the square root of the mean of the squared
  // differences.
  if (world_rank == 0)
  {
    float stddev = sqrt(global_sq_diff /
                        (num_elements_per_proc * world_size));
    printf("Mean - %f, Standard deviation = %f\n", mean, stddev);
  }

  // Clean up
  free(rand_nums);

  MPI_Barrier(MPI_COMM_WORLD);
  MPI_Finalize();

  if (world_rank == 0)
  {
    MYTIMESTAMP(stop);
    printf("number of seconds: %f\n", stop - start);
  }
}