void slave(MPI_Comm ring_comm)
{
    int numbers[2];
    int n;

    /* Receive n from root node */
    MPI_Bcast(&n /* Receive n from root */, 1 /* rx 1 number */ , MPI_INT, 0, ring_comm);

    for(int i=0; i<floorf((float)n/(2*p)); i++){
        /* Receive 2 numbers from root */
        MPI_Scatter(NULL, 2 /*Send 2 bytes to everyone from array */, MPI_INT, 
                    numbers, 2 /* Receive 2 bytes from root */, MPI_INT, 0 /* id of root node */,
                    ring_comm);

        int min_no;
        work_t work;
        work.no1 = numbers[0];
        work.no2 = numbers[1];
        work_result_t work_result;
        do_work(work, &work_result);
        min_no = work_result.min_no;

        /* Slaves send the minimum of two numbers */
        MPI_Reduce(&min_no /* everyone sends 1 number to root */, NULL ,1 /* 1 number */, MPI_INT, 
                MPI_MIN , 0 /* id of root node */, 
                ring_comm);
    }/*for*/

    return ;
}/* slave */
int main(int argc, char *argv[]) {
  int send[DATA_SIZE], recv[DATA_SIZE];
  int rank, size, count, root, res;
  MPI_Status status;

  MPI_Init(&argc, &argv); // initialize MPI
  MPI_Comm_rank(MPI_COMM_WORLD, &rank); // get own rank/ID
  MPI_Comm_size(MPI_COMM_WORLD, &size); // get total number of processes

  if(rank == 0) { //If root: Generate data to be distributed.
  }

  //Send data to all nodes. here: an integer array of length "count".
  count = (DATA_SIZE / size); // each receive gets chunk of same size
  // scatter: if rank=0, send data (and get own share); otherwise: receive data
  MPI_Scatter(send, count, MPI_INT, recv, count, MPI_INT, 0, MPI_COMM_WORLD);

  // Each node processes its share of data and sends the result (here: int "res") to root.
  MPI_Gather(&res, 1, MPI_INT, recv, 1, MPI_INT, 0, MPI_COMM_WORLD);

  if(rank == 0) { //If root: process the received data.
  }

  MPI_Finalize(); // shut down MPI
  return 0;
}
int main(int argc,char** argv){
	int rank,size,epp;
	int* dataSend=NULL;
	int i;
	MPI_Init(NULL,NULL);
	MPI_Comm_rank(MPI_COMM_WORLD,&rank);
	MPI_Comm_size(MPI_COMM_WORLD,&size);
	epp=2;
	if(rank==0){
			printf("Master creating data...\n");
			dataSend=(int*)malloc(sizeof(int)*size*epp);
			for(i=0;i<epp*size;i++)
				dataSend[i]=i;
	}
	int* dataRecv1=(int*)malloc(sizeof(int)*epp);
	MPI_Scatter(dataSend,epp,MPI_INT,dataRecv1,epp,MPI_INT,0,MPI_COMM_WORLD);
	float subavg=0.0f;
	for(i=0;i<epp;i++)
		subavg+=dataRecv1[i];
	subavg/=epp;
	printf("%d calculates subavg as %f\n",rank,subavg);
	float finalAvg;
	MPI_Reduce(&subavg,&finalAvg,1,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
	if(rank==0){
		printf("The average is : %f",finalAvg/size);
	}
	MPI_Finalize();
}
Beispiel #4
0
void testAllCollective(){
 int count = 1000;
 int root=2;
 int *in, *out;
 MPI_Comm comm=MPI_COMM_WORLD;

if(rank==0){
MPI_Scatter( buf0, 100, MPI_INT, buf1, 100, MPI_INT, root, comm); 
MPI_Allreduce( in, out, count, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
}
else{
MPI_Scatter( buf0, 100, MPI_INT, buf1, 100, MPI_INT, root, comm); 
MPI_Allreduce( in, out, count, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
}

}
Beispiel #5
0
int main(int argc,char* argv[]){
	int rank,size;
	int i,n;
	int* A=NULL;
	
	int D[2],sum;
	MPI_Init(&argc,&argv);
	MPI_Comm_rank(MPI_COMM_WORLD,&rank);
	MPI_Comm_size(MPI_COMM_WORLD,&size);
	int ctr=size*2;
	A=(int*)malloc(sizeof(int)*size*2);
	if(rank==0){
			printf("Enter %d Elements :\n",size*2);
			for(i=0;i<size*2;i++)
				scanf("%d",&A[i]);
	}
	while(ctr!=1){		
		MPI_Scatter(A,2,MPI_INT,D,2,MPI_INT,0,MPI_COMM_WORLD);
		sum=D[0]+D[1];
		MPI_Gather(&sum,1,MPI_INT,A,1,MPI_INT,0,MPI_COMM_WORLD);
		ctr/=2;
	}
	if(rank==0)
		printf("Total Sum : %d\n",A[0]);
	MPI_Finalize();
}
Beispiel #6
0
double parallelsumArray(double * arr, int num, int id, int numProc){
  int root = 0;
  double sum, totalSum, end, start, scatter, s;
  if(id == 0){ //master reads array and scatters value
    MPI_Bcast(&num, 1, MPI_INT, root, MPI_COMM_WORLD);
    printf("%i\t%g\n", numProc, *arr);
  }
  else{ //workers sum their part of the array
    MPI_Bcast(&num, 1, MPI_INT, root, MPI_COMM_WORLD);
  }
  
  int numElements = num/numProc;
  double* localA = malloc(sizeof(double) * numElements);
  
  start = MPI_Wtime();
  MPI_Scatter(arr, numElements, MPI_DOUBLE, localA, numElements, MPI_DOUBLE, root, MPI_COMM_WORLD);
  end = MPI_Wtime();
  scatter = end - start;
  
  start = MPI_Wtime();
  sum = sumArray(localA, numElements);
  MPI_Reduce(&sum, &totalSum, numProc, MPI_DOUBLE, MPI_SUM, root, MPI_COMM_WORLD);
  end = MPI_Wtime();
  s = end - start; 
  if(id == 0){
    printf("scatter: %f, sum: %f\n", scatter, s);
  }
  //free(localA);
  return totalSum;
}
Beispiel #7
0
static PyObject *scatter_array(PyObject *self, PyObject *args) {
  PyArrayObject *x;
  PyArrayObject *d;
  int source, error, count, numprocs;
  MPI_Datatype mpi_type;

  /* process the parameters */
  if (!PyArg_ParseTuple(args, "OOi", &x, &d, &source))
    return NULL;

  /* Input check and determination of MPI type */          
  mpi_type = type_map(x, &count);
  if (!mpi_type) return NULL;  
   
  error = MPI_Comm_size(MPI_COMM_WORLD,&numprocs);    
  count = count/numprocs;  
  
  /* call the MPI routine */
  error = MPI_Scatter(x->data, count, mpi_type, d->data, count, 
		      mpi_type, source,	MPI_COMM_WORLD);
	 
  if (error != 0) {
    rank_raise_mpi_runtime(error, "MPI_Scatter");
    return NULL;
  }  
      
  Py_INCREF(Py_None);
  return (Py_None);
}
Beispiel #8
0
int main(int argc, char* argv[]) {

	
  int numtasks, rank, sendcount, recvcount, source;
  float sendbuf[SIZE][SIZE] = {
    {1.0, 2.0, 3.0, 4.0},
    {5.0, 6.0, 7.0, 8.0},
    {9.0, 10.0, 11.0, 12.0},
    {13.0, 14.0, 15.0, 16.0}  };
  float recvbuf[SIZE];

  MPI_Init(&argc,&argv);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
	
  if (numtasks == SIZE) {
      source = 1;
      sendcount = SIZE;
      recvcount = SIZE;
      MPI_Scatter(sendbuf,sendcount,MPI_FLOAT,recvbuf,recvcount,
                  MPI_FLOAT,source,MPI_COMM_WORLD);

      printf("rank= %d  Results: %f %f %f %f\n",rank,recvbuf[0],
             recvbuf[1],recvbuf[2],recvbuf[3]);
      }	
  else
  printf("Must specify %d processors. Terminating.\n",SIZE);

  MPI_Finalize();
  }	
Beispiel #9
0
int main(int argc, char *argv[]){
	int numTasks, rank, sendCount, recvCount, source;
	// Array which distributed over processes.
	float sendBuf[SIZE][SIZE] = {{1.0,2.0,3.0,4.0},{5.0,6.0,7.0,8.0},{9.0,10.0,11.0,12.0},{13.0,14.0,15.0,16.0}};
	float recvBuf[SIZE];
	
	// Initilizae MPI enivronment.
	MPI_Init(&argc,&argv);
	MPI_Comm_rank(MPI_COMM_WORLD,&rank);
	MPI_Comm_size(MPI_COMM_WORLD,&numTasks);
	
	// Must Specify the #of proc is 4
	if(numTasks == SIZE){
		source = 1;
		sendCount = SIZE;
		recvCount = SIZE;
		// Scattering array amoung the processes.
		MPI_Scatter(sendBuf,sendCount,MPI_FLOAT,recvBuf,recvCount,MPI_FLOAT,source,MPI_COMM_WORLD);
		printf("Process#%d Results: %f %f %f %f \n",rank,recvBuf[0],recvBuf[1],recvBuf[2],recvBuf[3]);
	}
	else{
		printf("Must specify %d processors. Terminating \n",SIZE);
	}
	MPI_Finalize();
}
int main(int argc, char** argv) {
  if (argc != 2) {
    fprintf(stderr, "Usage: avg num_elements_per_proc\n");
    exit(1);
  }

  int num_elements_per_proc = atoi(argv[1]);
  // Seed the random number generator to get different results each time
  srand(time(NULL));

  MPI_Init(NULL, NULL);

  int world_rank;
  MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
  int world_size;
  MPI_Comm_size(MPI_COMM_WORLD, &world_size);

  // Create a random array of elements on the root process. Its total
  // size will be the number of elements per process times the number
  // of processes
  float *rand_nums = NULL;
  if (world_rank == 0) {
    rand_nums = create_rand_nums(num_elements_per_proc * world_size);
  }

  // For each process, create a buffer that will hold a subset of the entire
  // array
  float *sub_rand_nums = (float *)malloc(sizeof(float) * num_elements_per_proc);
  assert(sub_rand_nums != NULL);

  // Scatter the random numbers from the root process to all processes in
  // the MPI world
  MPI_Scatter(rand_nums, num_elements_per_proc, MPI_FLOAT, sub_rand_nums,
              num_elements_per_proc, MPI_FLOAT, 0, MPI_COMM_WORLD);

  // Compute the average of your subset
  float sub_avg = compute_avg(sub_rand_nums, num_elements_per_proc);

  // Gather all partial averages down to all the processes
  float *sub_avgs = (float *)malloc(sizeof(float) * world_size);
  assert(sub_avgs != NULL);
  MPI_Allgather(&sub_avg, 1, MPI_FLOAT, sub_avgs, 1, MPI_FLOAT, MPI_COMM_WORLD);

  // Now that we have all of the partial averages, compute the
  // total average of all numbers. Since we are assuming each process computed
  // an average across an equal amount of elements, this computation will
  // produce the correct answer.
  float avg = compute_avg(sub_avgs, world_size);
  printf("Avg of all elements from proc %d is %f\n", world_rank, avg);

  // Clean up
  if (world_rank == 0) {
    free(rand_nums);
  }
  free(sub_avgs);
  free(sub_rand_nums);

  MPI_Barrier(MPI_COMM_WORLD);
  MPI_Finalize();
}
Beispiel #11
0
int main (int argc, char* argv[]) {            
  int miID, procesos, local[4], localplus[4], externo;
  
  srand(time(NULL));
  
  MPI_Init(&argc, &argv);
  MPI_Comm_rank(MPI_COMM_WORLD, &miID);
  MPI_Comm_size(MPI_COMM_WORLD, &procesos);
  
  if (miID == 0)
  {
    local[0] = rand() % 100 + 1;
    local[1] = rand() % 100 + 1;
    local[2] = rand() % 100 + 1;
    local[3] = rand() % 100 + 1;
  }
    
  
  //MPI_Scatter(dato, n° de datos, tipo, variable de destino, n° de datos a recibir, comunicador)
  MPI_Scatter(&local, 1, MPI_INT, &externo, 1, MPI_INT, 0, MPI_COMM_WORLD);
  
  printf("El proceso %d tiene el número %d\n", miID, externo);
  
  MPI_Gather(&externo, 1, MPI_INT, &localplus, 1, MPI_INT, 0, MPI_COMM_WORLD);
  
  if (miID == 0)
    printf ("Datos: [%d,%d,%d,%d]\n",localplus[0], localplus[1], localplus[2], localplus[3]);
  
  MPI_Finalize();
  return 0;
}
Beispiel #12
0
int main(int argc,char **argv){
	MPI_Init(&argc,&argv);
	int rank,size,r,q,*A,*B,*C,i,min=100;
	MPI_Comm_rank(MPI_COMM_WORLD,&rank);
	MPI_Comm_size(MPI_COMM_WORLD,&size);
	r=N%(size);
	if(r==0) q=N/size;
	else q=(N+size-r)/size;
	B=(int*)calloc(q,sizeof(int));
	if(rank==0){
		A=(int*)calloc(N,sizeof(int));
		C=(int*)calloc(size,sizeof(int));
		for(i=0;i<N;i++){
			A[i]=rand()%100;
			printf("%d\n",A[i]);

		}
	}
	MPI_Scatter(A,q,MPI_INT,B,q,MPI_INT,0,MPI_COMM_WORLD);
	for(i=0;i<q;i++) if(min>B[i]) min=B[i];
	MPI_Gather(&min,1,MPI_INT,C,1,MPI_INT,0,MPI_COMM_WORLD);
	if(rank==0){
		for(i=1;i<size;i++){
			if(min>C[i]) min=C[i];
		}
		printf("Wynik to %d\n",min);
		free(A);
		free(C);
	}
	free(B);
	MPI_Finalize();
	return 0;	
}
Beispiel #13
0
FORT_DLL_SPEC void FORT_CALL mpi_scatter_ ( void*v1, MPI_Fint *v2, MPI_Fint *v3, void*v4, MPI_Fint *v5, MPI_Fint *v6, MPI_Fint *v7, MPI_Fint *v8, MPI_Fint *ierr ){

#ifndef HAVE_MPI_F_INIT_WORKS_WITH_C
    if (MPIR_F_NeedInit){ mpirinitf_(); MPIR_F_NeedInit = 0; }
#endif
    if (v4 == MPIR_F_MPI_IN_PLACE) v4 = MPI_IN_PLACE;
    *ierr = MPI_Scatter( v1, (int)*v2, (MPI_Datatype)(*v3), v4, (int)*v5, (MPI_Datatype)(*v6), (int)*v7, (MPI_Comm)(*v8) );
}
Beispiel #14
0
void mpi_scatter (void *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, 
		  void *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, 
		  MPI_Fint *root, MPI_Fint *comm, MPI_Fint *__ierr)
{
  *__ierr = MPI_Scatter (sendbuf, *sendcount, MPI_Type_f2c (*sendtype), 
			 recvbuf, *recvcount, MPI_Type_f2c (*recvtype),
			 *root, MPI_Comm_f2c (*comm));
}
Beispiel #15
0
void mpi_random_seed_slave(int pnode, int cnt) {
  int this_seed;

  MPI_Scatter(NULL, 1, MPI_INT, &this_seed, 1, MPI_INT, 0, comm_cart);

  RANDOM_TRACE(printf("%d: Received seed %d\n", this_node, this_seed));
  init_random_seed(this_seed);
}
void dd_scatter(gmx_domdec_t gmx_unused *dd, int gmx_unused nbytes, void gmx_unused *src, void gmx_unused *dest)
{
#ifdef GMX_MPI
    MPI_Scatter(src, nbytes, MPI_BYTE,
                dest, nbytes, MPI_BYTE,
                DDMASTERRANK(dd), dd->mpi_comm_all);
#endif
}
int main(int argc, char *argv[]){

int numprocs,rank,namelen;
char processor_name[MPI_MAX_PROCESSOR_NAME];

int size = 100000;
int splitsize;
//make arrays
int *a = malloc(size * sizeof(int));
int result = 0;
int idx = 0;
int sub_result = 0;
double start,end;

for(idx = 0;idx <size;idx++){
    a[idx]=idx;
}

MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);

MPI_Barrier(MPI_COMM_WORLD);
if(rank==0)
	start = MPI_Wtime();

splitsize = (int) size/numprocs;
int *sub_a = malloc(splitsize * sizeof(int));
int *sub_results = malloc(numprocs * sizeof(int));

MPI_Scatter(a,splitsize,MPI_INT,sub_a,splitsize,MPI_INT,0,MPI_COMM_WORLD);

for(idx=0;idx<splitsize;idx++)
    sub_result += sub_a[idx];

MPI_Gather(&sub_result,1,MPI_INT,sub_results,1,MPI_INT,0,MPI_COMM_WORLD);

for(idx=0;idx<numprocs;idx++)
    result += sub_results[idx];

MPI_Barrier(MPI_COMM_WORLD);
if(rank==0)
	end = MPI_Wtime();

MPI_Finalize();

if(rank==0){
	printf("\n results is %i, should be %i  \n ",result,(size*(size-1))/2);
	printf("\n time was : %f \n ",end-start);
}

free(a);

free(sub_a);
free(sub_results);

return 0;
}
Beispiel #18
0
int main(int argc, char *argv[])
{
int rank,nprocs,n;
int *arr,i,per,t,j;
MPI_Init(&argc,&argv);
MPI_Comm_rank (MPI_COMM_WORLD, &rank);
MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
if(rank==0)
	scanf("%d",&t);
MPI_Bcast(&t,1,MPI_INT,0,MPI_COMM_WORLD);
for(j=0;j<t;j++){
if(rank == 0)
	{
	scanf("%d",&n);
	per = n/nprocs + 1;
	arr = (int *)malloc(sizeof(int)*(2*n+32));
	for(i=0;i<n;i++)
		scanf("%d",&arr[i*2]);
	for(i=0;i<n;i++)
		scanf("%d",&arr[2*i+1]);
	}
MPI_Bcast(&per,1,MPI_INT,0,MPI_COMM_WORLD);

int *client_arr = (int *)malloc(sizeof(int)*2*per);
int *res = (int *)malloc(sizeof(int)*per);
MPI_Scatter(arr, 2*per, MPI_INT, client_arr,
            2*per, MPI_INT, 0, MPI_COMM_WORLD);

for(i=0;i<per;i++)
	res[i] = client_arr[2*i+1] - client_arr[2*i];

MPI_Gather(res, per, MPI_INT, arr, per, MPI_INT, 0,
           MPI_COMM_WORLD);

if(rank == 0)
	{
	for(i=0;i<n;i++)
		printf("%d ",arr[i]);
	printf("\n");
	}
}
/*if(rank == 0)
	{
	int i;
	for(i=1;i<nprocs;i++)
		MPI_Send(&arr[i], 1, MPI_INT, i, 0, MPI_COMM_WORLD);
	
	}*/
/*if(rank!=0)
	{
	int x;
	MPI_Recv(&x,1, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
	printf("%d: %d\n",rank,x);
	}
*/
 MPI_Finalize();
  return 0;
}
void master(MPI_Comm ring_comm)
{
    int n;
    int *array = NULL;
    int *subarray = NULL;
    int numbers[2];
    int lowest_no = 0;
    int lowest_no_so_far = 0;

    read_input(&array, &n);

    /* Let the slaves know too how many numbers we have to work on. */
    MPI_Bcast(&n /* Bcast n to everyone */, 1, MPI_INT, 0, ring_comm);

    lowest_no_so_far = array[0]; /* Assume the first number is the lowest */
    subarray = array;

    for(int i=0; i<floorf((float)n/(2*p)); i++){

        /* Scatter Data to processors including self */
        MPI_Scatter(subarray, 2 /*Send 2 bytes to everyone from array */, MPI_INT, 
                    numbers, 2 /* Receive 2 bytes from self */, MPI_INT, 0 /* id of root node */,
                    ring_comm);



        work_t work;
        work_result_t work_result;

        work.no1 = numbers[0];
        work.no2 = numbers[1];

        int min_no;

        do_work(work, &work_result);

        min_no = work_result.min_no;

        MPI_Reduce(&min_no, &lowest_no /* root receives 1 number from everyone and applies reduction operation on the way*/ ,1 /*1 number */, MPI_INT, 
                MPI_MIN , 0 /* id of root node */,
                ring_comm);

        if(lowest_no < lowest_no_so_far) lowest_no_so_far = lowest_no;

        //printf("Min [ ");
        for(int j=0; j<p*2; j+=2){
            //printf("(%d, %d) ,",subarray[j], subarray[j+1]);
        }
        //printf("\b ] = %d.\n", lowest_no);
        //printf("Lowest no so far is %d\n", lowest_no_so_far);

        subarray = subarray + p*2;
    }/*for*/
    printf("Lowest no is %d.\n", lowest_no_so_far);

    free(array);
    return ;
}/* master */
Beispiel #20
0
int main(int argc, char *argv[])
{
  int myrank, P, from, to, i, j, k;
  int tag = 666;		/* any value will do */
  MPI_Status status;

  MPI_Init (&argc, &argv);
  MPI_Comm_rank(MPI_COMM_WORLD, &myrank);	/* who am i */
  MPI_Comm_size(MPI_COMM_WORLD, &P); /* number of processors */

  /* Just to use the simple variants of MPI_Gather and MPI_Scatter we */
  /* impose that SIZE is divisible by P. By using the vector versions, */
  /* (MPI_Gatherv and MPI_Scatterv) it is easy to drop this restriction. */

  if (SIZE%P!=0) {
    if (myrank==0) printf("Matrix size not divisible by number of processors\n");
    MPI_Finalize();
    exit(-1);
  }

  from = myrank * SIZE/P;
  to = (myrank+1) * SIZE/P;

  /* Process 0 fills the input matrices and broadcasts them to the rest */
  /* (actually, only the relevant stripe of A is sent to each process) */

  if (myrank==0) {
    fill_matrix(A);
    fill_matrix(B);
  }

  MPI_Bcast (B, SIZE*SIZE, MPI_INT, 0, MPI_COMM_WORLD);
  MPI_Scatter (A, SIZE*SIZE/P, MPI_INT, A[from], SIZE*SIZE/P, MPI_INT, 0, MPI_COMM_WORLD);

  printf("computing slice %d (from row %d to %d)\n", myrank, from, to-1);
  for (i=from; i<to; i++)
    for (j=0; j<SIZE; j++) {
      C[i][j]=0;
      for (k=0; k<SIZE; k++)
	C[i][j] += A[i][k]*B[k][j];
    }

  MPI_Gather (C[from], SIZE*SIZE/P, MPI_INT, C, SIZE*SIZE/P, MPI_INT, 0, MPI_COMM_WORLD);

  if (myrank==0) {
    printf("\n\n");
    print_matrix(A);
    printf("\n\n\t       * \n");
    print_matrix(B);
    printf("\n\n\t       = \n");
    print_matrix(C);
    printf("\n\n");
  }

  MPI_Finalize();
  return 0;
}
Beispiel #21
0
FC_FUNC( mpi_scatter, MPI_SCATTER )
                         ( void *sendbuf, int *sendcount, int *sendtype,
			 void *recvbuf, int *recvcount, int *recvtype,
			 int *root, int *comm, int *ierror)
{
  *ierror = MPI_Scatter(sendbuf, *sendcount, *sendtype,
  			mpi_c_in_place(recvbuf), *recvcount, *recvtype,
			*root, *comm);
}
Beispiel #22
0
value caml_mpi_scatter_intarray(value source, value dest,
                                value root, value comm)
{
  mlsize_t len = Wosize_val(dest);
  MPI_Scatter(&Field(source, 0), len, MPI_LONG,
              &Field(dest, 0), len, MPI_LONG,
              Int_val(root), Comm_val(comm));
  return Val_unit;
}
int main(int argc, char** argv) {
  MPI_Init(&argc, &argv);

  int my_rank; // Number of the node
  MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);

  int node_count; // Total number of nodes
  MPI_Comm_size(MPI_COMM_WORLD, &node_count);
  
  // The root must load the input data to distribute to the other nodes
  if(my_rank == 0) {
    // In our case it generates a random array as input data
    srand(time(NULL));
    for(int item = 0; item < items; ++item)
      array[item] = rand();
  }
  
  int items_per_rank = items / node_count;
  int remainder_items = items % node_count;
  int* my_work;
  MPI_Alloc_mem(items_per_rank * sizeof(int), MPI_INFO_NULL, &my_work);
 
  // MPI_Scatter is a collective operation which distributes an equal-sized part of the given array to each node.
  MPI_Scatter(&array[remainder_items] /* send buffer */, items_per_rank /* send count per node */, MPI_INT /* send type */,
	      my_work /* receive buffer on each node */, items_per_rank /* receive count */ , MPI_INT /* receive type */, 
	      0 /* send buffer is stored on this rank */, MPI_COMM_WORLD /* communication channel */);
 
  // This is the actual working-loop
  long sub_sum = 0;
  for(int i=0; i < items_per_rank; i++)
    sub_sum += my_work[i];

  if(my_rank == 0) { // Scatter cannot deal with a division remainder so we manually deal with it
    while(remainder_items > 0)
      sub_sum += array[--remainder_items];
  }

  MPI_Free_mem(my_work);

  // MPI_Reduce with op-code MPI_SUM is a collective operation which sums up the input sub_sum of each node
  // into single a resulting output sum on the master.
  MPI_Reduce(&sub_sum /* input to sum up */, &sum /* output */, 1 /* input count */, MPI_LONG /* input type */,
	     MPI_SUM /* operation */, 0 /* output is stored on this rank */, MPI_COMM_WORLD /* communication channel */);
 
  if(my_rank == 0) {
    // The result of the computation now is available on rank 0.
    // We compare it with the sequential reference implementation to test our parallel implementation.
    if(sum == sum__sequential_reference_implementation())
      fprintf(stderr, "Test OK.\n");
    else
      fprintf(stderr, "Test FAILED!\n");
  }

  MPI_Barrier(MPI_COMM_WORLD);
  MPI_Finalize();
  return EXIT_SUCCESS;
}
Beispiel #24
0
value caml_mpi_scatter_int(value data, value root, value comm)
{
  value n;

  MPI_Scatter(&Field(data, 0), 1, MPI_LONG,
              &n, 1, MPI_LONG,
              Int_val(root), Comm_val(comm));
  return n;
}
Beispiel #25
0
value caml_mpi_scatter_float(value data, value root, value comm)
{
  double * src = caml_mpi_input_floatarray(data, len);
  double dst;
  MPI_Scatter(src, 1, MPI_DOUBLE, &dst, 1, MPI_DOUBLE,
              Int_val(root), Comm_val(comm));
  caml_mpi_free_floatarray(src);
  return copy_double(dst);
}
Beispiel #26
0
int main(int argc,char *argv[]){
	int *myray,*send_ray=NULL,*back_ray=NULL;
#ifndef _CIVL
	int count;
#endif
	int size,mysize,i,k,j,total;
	
	init_it(&argc,&argv);
/* each processor will get count elements from the root */
#ifndef _CIVL
	count=4;
#endif
	myray=(int*)malloc(count*sizeof(int));
/* create the data to be sent on the root */
	if(myid == mpi_root){
	    size=count*numnodes;
		send_ray=(int*)malloc(size*sizeof(int));
		back_ray=(int*)malloc(numnodes*sizeof(int));
		for(i=0;i<size;i++)
			send_ray[i]=i;
		}
/* send different data to each processor */
	mpi_err = MPI_Scatter(	send_ray, count,   MPI_INT,
						    myray,    count,   MPI_INT,
	                 	    mpi_root,
	                 	    MPI_COMM_WORLD);
	                
/* each processor does a local sum */
	total=0;
	for(i=0;i<count;i++)
	    total=total+myray[i];
	printf("myid= %d total= %d\n",myid,total);
#ifdef _CIVL
	$assert(total == myid*25 + 10);
#endif
/* send the local sums back to the root */
    mpi_err = MPI_Gather(&total,    1,  MPI_INT, 
						back_ray, 1,  MPI_INT, 
	                 	mpi_root,                  
	                 	MPI_COMM_WORLD);
/* the root prints the global sum */
	if(myid == mpi_root){
	  total=0;
	  for(i=0;i<numnodes;i++)
	    total=total+back_ray[i];
	  printf("results from all processors= %d \n",total);
#ifdef _CIVL
	  $assert(total == 25*numnodes*(numnodes-1)/2+10*numnodes);
#endif
	}
#ifdef _CIVL
	free(myray);
	free(send_ray);
	free(back_ray);
#endif
    mpi_err = MPI_Finalize();
}
Beispiel #27
0
/* 
 * ===  FUNCTION  ======================================================================
 *         Name:  Read_vec
 *  Description:  
 * =====================================================================================
 */
	void
Read_vec (double *local_a,
		double *tmp,
		 int local_n,
		 int n,
		 int myrank,
		 MPI_Comm comm)
{
	double *a = NULL;
	double *b = NULL;
	if (myrank == 0)
	{
		srand(time(NULL));
		a = malloc( n*sizeof(double) );
		int i = 0;
		printf("a is\n");
		for (i=0; i<n;i++)
		{
			a[i] = rand()%10;
			if((i+1)%M == 0)
				printf("%lf\n", a[i]);
			else printf("%lf, ", a[i]);
		}
		printf("\nb is \n");	
		b = malloc( n*sizeof(double) );
		i = 0;
		for (i=0; i<n;i++)
		{
			b[i] = rand()%10 ;
			if((i+1) % N == 0)
				printf("%lf\n",b[i]);
			else printf("%lf, ", b[i]);
		}
		putchar('\n');
		Transpose(b, tmp, n, M, N);//将b转置之后放入tmp中
		MPI_Bcast(tmp, M*N,MPI_DOUBLE, 0,MPI_COMM_WORLD);
		MPI_Scatter(a, local_n, MPI_DOUBLE, local_a, local_n, MPI_DOUBLE, 0, comm);
		free(a);
		free(b);	
	}else{
		MPI_Bcast(tmp, N*M, MPI_DOUBLE, 0,MPI_COMM_WORLD);
		MPI_Scatter(a, local_n, MPI_DOUBLE, local_a, local_n, MPI_DOUBLE, 0, comm);	
	}	
}		/* -----  end of function Read_vec  ----- */
Beispiel #28
0
/*---------------------------------------------------------------------
 * Function:  Read_matrix
 * Purpose:   Read in the local_matrix on process 0 and scatter it using a 
 *            block row distribution among the processes.
 * In args:   All except local_mat
 * Out arg:   local_mat
 */
void Read_matrix(int local_mat[], int n, int my_rank, int p, 
      MPI_Comm comm) { 
   int i, j;
   int* temp_mat = NULL;

   if (my_rank == 0) {
      temp_mat = malloc(n*n*sizeof(int));
      for (i = 0; i < n; i++)
         for (j = 0; j < n; j++)
            scanf("%d", &temp_mat[i*n+j]);
      MPI_Scatter(temp_mat, n*n/p, MPI_INT, 
                  local_mat, n*n/p, MPI_INT, 0, comm);
      free(temp_mat);
   } else {
      MPI_Scatter(temp_mat, n*n/p, MPI_INT, 
                  local_mat, n*n/p, MPI_INT, 0, comm);
   }

}  /* Read_matrix */
Beispiel #29
0
void mpi_random_seed(int cnt, vector<int> &seeds) {
  int this_seed;
  mpi_call(mpi_random_seed_slave, -1, cnt);

  MPI_Scatter(&seeds[0], 1, MPI_INT, &this_seed, 1, MPI_INT, 0, comm_cart);

  RANDOM_TRACE(printf("%d: Received seed %d\n", this_node, this_seed));

  init_random_seed(this_seed);
}
Beispiel #30
0
int main(int argc,char** argv){
	int rank,size,epp;
	int* A1=NULL;
	int* A2=NULL;
	int* Rec1=NULL;
	int* Rec2=NULL;
	int i,n;
	MPI_Init(NULL,NULL);
	MPI_Comm_rank(MPI_COMM_WORLD,&rank);
	MPI_Comm_size(MPI_COMM_WORLD,&size);
	if(rank==0){
			printf("Enter size of arrays...\n");
			scanf("%d",&n);
			epp=n/size+(n%size==0?0:1);
			A1=(int*)malloc(sizeof(int)*size*epp);
			A2=(int*)malloc(sizeof(int)*size*epp);
			for(i=0;i<n;i++){
				printf("Enter A1[%d]:",i);
				scanf("%d",&A1[i]);
				printf("Enter A2[%d]:",i);
				scanf("%d",&A2[i]);
			}
			for(i=n;i<size*epp;i++){
				A1[i]=0;
				A2[i]=0;
			}
	}
	MPI_Bcast(&epp,1,MPI_INT,0,MPI_COMM_WORLD);
	Rec1=(int*)malloc(sizeof(int)*epp);
	Rec2=(int*)malloc(sizeof(int)*epp);
	MPI_Scatter(A1,epp,MPI_INT,Rec1,epp,MPI_INT,0,MPI_COMM_WORLD);
	MPI_Scatter(A2,epp,MPI_INT,Rec2,epp,MPI_INT,0,MPI_COMM_WORLD);
	for(i=0;i<epp;i++)
		Rec1[i]*=Rec2[i];
	MPI_Gather(Rec1,epp,MPI_INT,A1,epp,MPI_INT,0,MPI_COMM_WORLD);
	if(rank==0){
		printf("Data gathered : ");
		for(i=0;i<n;i++)
			printf("%d ",A1[i]);
		printf("\n");
	}
	MPI_Finalize();
}