示例#1
0
文件: partest.c 项目: acaldero/MiMPI
int parclient ( int me, int nproc, char *processor_name, 
  	        int clients, int servers, int file_size, int a_size, int fs )
{

	double start_time;
        double used_time;
	double us_rate;
	int j ;
        MPI_Comm world, clients_world;
        MPI_Group world_group, client_group;
        long ranks[32];

        printf("Client %d; total %d is alive on %s\n",me,nproc,processor_name);
	start_time = MPI_Wtime();	
	read_data(servers, clients, file_size, a_size);
	used_time = (MPI_Wtime() - start_time);

	us_rate = (double)(( file_size )/ (used_time*(double)1000000));
	printf("FS=%d KB, time= %f sec., rate=%f Mbytes/sec\n",
                        fs*KB, used_time, us_rate);


	world = MPI_COMM_WORLD;

        MPI_Comm_group(world, &world_group);
        for (j = 0; j < servers; j++)
                ranks[j] = j;
        MPI_Group_excl(world_group, servers, ranks, &client_group);
        MPI_Comm_create(world, client_group, &clients_world);

        MPI_Barrier(clients_world);

        printf("Client %d finished\n", me);
        return 1 ;
}
示例#2
0
文件: pio_server.c 项目: AZed/cdo
static void
serverWinCreate(void)
{
  int ranks[1], modelID;
  MPI_Comm commCalc = commInqCommCalc ();
  MPI_Group groupCalc;
  int nProcsModel = commInqNProcsModel ();
  MPI_Info no_locks_info;
  xmpi(MPI_Info_create(&no_locks_info));
  xmpi(MPI_Info_set(no_locks_info, "no_locks", "true"));

  xmpi(MPI_Win_create(MPI_BOTTOM, 0, 1, no_locks_info, commCalc, &getWin));

  /* target group */
  ranks[0] = nProcsModel;
  xmpi ( MPI_Comm_group ( commCalc, &groupCalc ));
  xmpi ( MPI_Group_excl ( groupCalc, 1, ranks, &groupModel ));

  rxWin = xcalloc((size_t)nProcsModel, sizeof (rxWin[0]));
  size_t totalBufferSize = collDefBufferSizes();
  rxWin[0].buffer = (unsigned char*) xmalloc(totalBufferSize);
  size_t ofs = 0;
  for ( modelID = 1; modelID < nProcsModel; modelID++ )
    {
      ofs += rxWin[modelID - 1].size;
      rxWin[modelID].buffer = rxWin[0].buffer + ofs;
    }

  xmpi(MPI_Info_free(&no_locks_info));

  xdebug("%s", "created mpi_win, allocated getBuffer");
}
示例#3
0
int main (int argc, char **argv)
{
  int num, i, rank, localRank;
  MPI_Group all, odd, even;
  MPI_Comm oddComm, evenComm;
  char mess[11];

  MPI_Init (&argc, &argv);
  // copy all the processes in group "all"
  MPI_Comm_group (MPI_COMM_WORLD, &all);
  MPI_Comm_size (MPI_COMM_WORLD, &num);
  MPI_Comm_rank (MPI_COMM_WORLD, &rank);

  int grN = 0;
  int ranks[num / 2];

  for (i = 0; i < num; i += 2)
    ranks[grN++] = i;

  // extract from "all" only the odd ones
  MPI_Group_excl (all, grN, ranks, &odd);
  // sutract odd group from all to get the even ones
  MPI_Group_difference (all, odd, &even);

  MPI_Comm_create (MPI_COMM_WORLD, odd, &oddComm);
  MPI_Comm_create (MPI_COMM_WORLD, even, &evenComm);
  
  // check group membership
  MPI_Group_rank (odd, &localRank);
  if (localRank != MPI_UNDEFINED)
    {
      if (localRank == 0)       // local group root, sets-up message
        strcpy (mess, "ODD GROUP");
      MPI_Bcast (mess, 11, MPI_CHAR, 0, oddComm);
      MPI_Comm_free (&oddComm);  // free communicator in processes where it is valid
    }
  else
    {
      MPI_Comm_rank (evenComm, &localRank);
      if (localRank == 0)       // local group root, sets-up message
        strcpy (mess, "EVEN GROUP");
      MPI_Bcast (mess, 11, MPI_CHAR, 0, evenComm);
      MPI_Comm_free (&evenComm);
    }

  printf ("Process %i with local rank %i received %s\n", rank, localRank, mess);

  // free up memory
  MPI_Group_free (&all);
  MPI_Group_free (&odd);
  MPI_Group_free (&even);
  MPI_Finalize ();
  return 0;
}
示例#4
0
文件: groups.c 项目: DMClambo/pfff
value caml_mpi_group_excl(value group, value vranks)
{
  MPI_Group newgroup;
  int n = Wosize_val(vranks);
  int * ranks = stat_alloc(n * sizeof(int));
  int i;

  for (i = 0; i < n; i++) ranks[i] = Int_val(Field(vranks, i));
  MPI_Group_excl(Group_val(group), n, ranks, &newgroup);
  stat_free(ranks);
  return caml_mpi_alloc_group(newgroup);
}
示例#5
0
文件: mpi_Group.c 项目: 00datman/ompi
JNIEXPORT jlong JNICALL Java_mpi_Group_excl(
        JNIEnv *env, jobject jthis, jlong group, jintArray ranks)
{
    jsize n = (*env)->GetArrayLength(env, ranks);
    jint *jRanks;
    int  *cRanks;
    ompi_java_getIntArray(env, ranks, &jRanks, &cRanks);

    MPI_Group newGroup;
    int rc = MPI_Group_excl((MPI_Group)group, n, cRanks, &newGroup);
    ompi_java_exceptionCheck(env, rc);

    ompi_java_forgetIntArray(env, ranks, jRanks, cRanks);
    return (jlong)newGroup;
}
示例#6
0
int main(int argc, char *argv[])
{
    int size, rank, i, *excl;
    MPI_Group world_group, even_group;
    MPI_Comm  __attribute__((unused)) even_comm;

    MPI_Init(&argc, &argv);

    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    if (size % 2) {
        fprintf(stderr, "this program requires a multiple of 2 number of processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
        exit(1);
    }

    excl = malloc((size / 2) * sizeof(int));
    assert(excl);

    /* exclude the odd ranks */
    for (i = 0; i < size / 2; i++)
        excl[i] = (2 * i) + 1;

    /* Create some groups */
    MPI_Comm_group(MPI_COMM_WORLD, &world_group);
    MPI_Group_excl(world_group, size / 2, excl, &even_group);
    MPI_Group_free(&world_group);

#if !defined(USE_STRICT_MPI) && defined(MPICH)
    if (rank % 2 == 0) {
        /* Even processes create a group for themselves */
        MPI_Comm_create_group(MPI_COMM_WORLD, even_group, 0, &even_comm);
        MPI_Barrier(even_comm);
        MPI_Comm_free(&even_comm);
    }
#endif /* USE_STRICT_MPI */

    MPI_Group_free(&even_group);
    MPI_Barrier(MPI_COMM_WORLD);

    if (rank == 0)
        printf(" No errors\n");

    MPI_Finalize();
    return 0;
}
示例#7
0
/*
 * Class:     mpi_Group
 * Method:    excl
 * Signature: ([I)J
 */
JNIEXPORT jlong JNICALL Java_mpi_Group_excl(JNIEnv *env, jobject jthis, jintArray ranks)
{
    int n;
    jint *rks;
    jboolean isCopy=JNI_TRUE;
    MPI_Group newgroup;

    ompi_java_clearFreeList(env) ;

    n=(*env)->GetArrayLength(env,ranks);
    rks=(*env)->GetIntArrayElements(env,ranks,&isCopy);
    MPI_Group_excl((MPI_Group)((*env)->GetLongField(env,jthis,ompi_java.GrouphandleID)),
                   n, (int*)rks,
                   &newgroup);
    (*env)->ReleaseIntArrayElements(env,ranks,rks,0);
    return (jlong)newgroup;
}
示例#8
0
int main (int argc, char **argv)
{
  int num, i, rank;
  MPI_Group all, odd, even;

  MPI_Init (&argc, &argv);
  // copy all the processes in group "all"
  MPI_Comm_group (MPI_COMM_WORLD, &all);
  MPI_Comm_size (MPI_COMM_WORLD, &num);
  MPI_Comm_rank (MPI_COMM_WORLD, &rank);

  int grN = 0;
  int ranks[num / 2];

  for (i = 0; i < num; i += 2)
    ranks[grN++] = i;

  // extract from "all" only the odd ones
  MPI_Group_excl (all, grN, ranks, &odd);
  // sutract odd group from all to get the even ones
  MPI_Group_difference (all, odd, &even);

  // print group sizes
  if (rank == 0)
    {
      MPI_Group_size (odd, &i);
      printf ("Odd group has %i processes\n", i);
      MPI_Group_size (even, &i);
      printf ("Even group has %i processes\n", i);
    }

  // check group membership
  MPI_Group_rank (odd, &i);
  if (i == MPI_UNDEFINED)
    printf ("Process %i belongs to even group\n", rank);
  else
    printf ("Process %i belongs to odd group\n", rank);

  // free up memory
  MPI_Group_free (&all);
  MPI_Group_free (&odd);
  MPI_Group_free (&even);
  MPI_Finalize ();
  return 0;
}
示例#9
0
/**
 * this is based on the MPI Forum decision that MPI_COMM_WORLD is a C constant 
 */
void make_tcgmsg_comm()
{
    extern int single_cluster();

#ifdef NXTVAL_SERVER
    if( SR_parallel ){   
        /* data server for a single process */
        int server;
        MPI_Group MPI_GROUP_WORLD, tcgmsg_grp;

        MPI_Comm_size(MPI_COMM_WORLD, &server);
        server --; /* the highest numbered process will be excluded */
        MPI_Comm_group(MPI_COMM_WORLD, &MPI_GROUP_WORLD);
        MPI_Group_excl(MPI_GROUP_WORLD, 1, &server, &tcgmsg_grp); 
        MPI_Comm_create(MPI_COMM_WORLD, tcgmsg_grp, &TCGMSG_Comm); 
    }else
#endif
        TCGMSG_Comm = MPI_COMM_WORLD; 
}
示例#10
0
static VALUE group_excl(VALUE self, VALUE ary)
{
    int rv, i, len, *ranks;
    MPI_Group *grp, *newgrp;

    Data_Get_Struct(self, MPI_Group, grp);

    newgrp = ALLOC(MPI_Group);

    len = RARRAY(ary)->len;
    ranks = ALLOCA_N(int, len);

    for (i = 0; i < len; i++)
        ranks[i] = FIX2INT(rb_ary_entry(ary, i));

    rv = MPI_Group_excl(*grp, len, ranks, newgrp);
    mpi_exception(rv);

    return group_new(newgrp);
}
示例#11
0
void ompi_group_excl_f(MPI_Fint *group, MPI_Fint *n,
		      MPI_Fint *ranks, MPI_Fint *newgroup,
		      MPI_Fint *ierr)
{
  int c_ierr;
  ompi_group_t *c_group, *c_newgroup;
  OMPI_ARRAY_NAME_DECL(ranks);

  /* Make the fortran to c representation conversion */
  c_group = MPI_Group_f2c(*group);

  OMPI_ARRAY_FINT_2_INT(ranks, *n);
  c_ierr = MPI_Group_excl(c_group,
                          OMPI_FINT_2_INT(*n),
                          OMPI_ARRAY_NAME_CONVERT(ranks),
                          &c_newgroup);
  if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);

  /* translate the results from c to fortran */
  if (MPI_SUCCESS == c_ierr) {
      *newgroup = c_newgroup->grp_f_to_c_index;
  }
}
示例#12
0
/*
 * This test attempts collective communication after a process in
 * the communicator has failed.
 */
int main(int argc, char **argv)
{
    int rank, size, i, rc, errclass, toterrs, errs = 0;
    char rbuf[100000];
    char *sendbuf;
    int deadprocs[1] = {1};
    MPI_Group world, newgroup;
    MPI_Comm newcomm;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    if (size < 3) {
        fprintf( stderr, "Must run with at least 3 processes\n" );
        MPI_Abort( MPI_COMM_WORLD, 1 );
    }

    if (rank == 1) {
        exit(EXIT_FAILURE);
    }

    /* try a small send first */
    sendbuf = (char *)malloc(10*size*sizeof(char));

    if (rank == 0) {
      for (i=0;i<size;i++) {
          strcpy(sendbuf + i*10, "No Errors");
      }
    }

    rc = MPI_Scatter(sendbuf, 10, MPI_CHAR, rbuf, 10, MPI_CHAR, 0, MPI_COMM_WORLD);

#if defined (MPICH) && (MPICH_NUMVERSION >= 30100102)
    MPI_Error_class(rc, &errclass);
    if ((rc) && (errclass != MPIX_ERR_PROC_FAILED)) {
        fprintf(stderr, "Wrong error code (%d) returned. Expected MPIX_ERR_PROC_FAILED\n", errclass);
        errs++;
    }
#endif

    /* reset the buffers and try a larger scatter */
    free(sendbuf);
    memset(rbuf, 0, sizeof(rbuf));
    sendbuf = (char *)malloc(100000*size*sizeof(char));

    if (rank == 0) {
      for (i=0;i<size;i++) {
          strcpy(sendbuf + i*100000, "No Errors");
      }
    }

    rc = MPI_Scatter(sendbuf, 100000, MPI_CHAR, rbuf, 100000, MPI_CHAR, 0, MPI_COMM_WORLD);

#if defined (MPICH) && (MPICH_NUMVERSION >= 30100102)
    MPI_Error_class(rc, &errclass);
    if ((rc) && (errclass != MPIX_ERR_PROC_FAILED)) {
        fprintf(stderr, "Wrong error code (%d) returned. Expected MPIX_ERR_PROC_FAILED\n", errclass);
        errs++;
    }
#endif

    MPI_Comm_group(MPI_COMM_WORLD, &world);
    MPI_Group_excl(world, 1, deadprocs, &newgroup);
    MPI_Comm_create_group(MPI_COMM_WORLD, newgroup, 0, &newcomm);

    rc = MPI_Reduce(&errs, &toterrs, 1, MPI_INT, MPI_SUM, 0, newcomm);
    if(rc)
        fprintf(stderr, "Failed to get errors from other processes\n");

    if (rank == 0) {
        if (toterrs) {
            printf( " Found %d errors\n", toterrs );
        }
        else {
            printf( " No Errors\n" );
        }
        fflush(stdout);
    }

    free(sendbuf);

    MPI_Finalize();

    return 0;
}
示例#13
0
文件: MPI-api.c 项目: 8l/rose
void declareBindings (void)
{
  /* === Point-to-point === */
  void* buf;
  int count;
  MPI_Datatype datatype;
  int dest;
  int tag;
  MPI_Comm comm;
  MPI_Send (buf, count, datatype, dest, tag, comm); // L12
  int source;
  MPI_Status status;
  MPI_Recv (buf, count, datatype, source, tag, comm, &status); // L15
  MPI_Get_count (&status, datatype, &count);
  MPI_Bsend (buf, count, datatype, dest, tag, comm);
  MPI_Ssend (buf, count, datatype, dest, tag, comm);
  MPI_Rsend (buf, count, datatype, dest, tag, comm);
  void* buffer;
  int size;
  MPI_Buffer_attach (buffer, size); // L22
  MPI_Buffer_detach (buffer, &size);
  MPI_Request request;
  MPI_Isend (buf, count, datatype, dest, tag, comm, &request); // L25
  MPI_Ibsend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Issend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Irsend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Irecv (buf, count, datatype, source, tag, comm, &request);
  MPI_Wait (&request, &status);
  int flag;
  MPI_Test (&request, &flag, &status); // L32
  MPI_Request_free (&request);
  MPI_Request* array_of_requests;
  int index;
  MPI_Waitany (count, array_of_requests, &index, &status); // L36
  MPI_Testany (count, array_of_requests, &index, &flag, &status);
  MPI_Status* array_of_statuses;
  MPI_Waitall (count, array_of_requests, array_of_statuses); // L39
  MPI_Testall (count, array_of_requests, &flag, array_of_statuses);
  int incount;
  int outcount;
  int* array_of_indices;
  MPI_Waitsome (incount, array_of_requests, &outcount, array_of_indices,
		array_of_statuses); // L44--45
  MPI_Testsome (incount, array_of_requests, &outcount, array_of_indices,
		array_of_statuses); // L46--47
  MPI_Iprobe (source, tag, comm, &flag, &status); // L48
  MPI_Probe (source, tag, comm, &status);
  MPI_Cancel (&request);
  MPI_Test_cancelled (&status, &flag);
  MPI_Send_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Bsend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Ssend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Rsend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Recv_init (buf, count, datatype, source, tag, comm, &request);
  MPI_Start (&request);
  MPI_Startall (count, array_of_requests);
  void* sendbuf;
  int sendcount;
  MPI_Datatype sendtype;
  int sendtag;
  void* recvbuf;
  int recvcount;
  MPI_Datatype recvtype;
  MPI_Datatype recvtag;
  MPI_Sendrecv (sendbuf, sendcount, sendtype, dest, sendtag,
		recvbuf, recvcount, recvtype, source, recvtag,
		comm, &status); // L67--69
  MPI_Sendrecv_replace (buf, count, datatype, dest, sendtag, source, recvtag,
			comm, &status); // L70--71
  MPI_Datatype oldtype;
  MPI_Datatype newtype;
  MPI_Type_contiguous (count, oldtype, &newtype); // L74
  int blocklength;
  {
    int stride;
    MPI_Type_vector (count, blocklength, stride, oldtype, &newtype); // L78
  }
  {
    MPI_Aint stride;
    MPI_Type_hvector (count, blocklength, stride, oldtype, &newtype); // L82
  }
  int* array_of_blocklengths;
  {
    int* array_of_displacements;
    MPI_Type_indexed (count, array_of_blocklengths, array_of_displacements,
		      oldtype, &newtype); // L87--88
  }
  {
    MPI_Aint* array_of_displacements;
    MPI_Type_hindexed (count, array_of_blocklengths, array_of_displacements,
                       oldtype, &newtype); // L92--93
    MPI_Datatype* array_of_types;
    MPI_Type_struct (count, array_of_blocklengths, array_of_displacements,
                     array_of_types, &newtype); // L95--96
  }
  void* location;
  MPI_Aint address;
  MPI_Address (location, &address); // L100
  MPI_Aint extent;
  MPI_Type_extent (datatype, &extent); // L102
  MPI_Type_size (datatype, &size);
  MPI_Aint displacement;
  MPI_Type_lb (datatype, &displacement); // L105
  MPI_Type_ub (datatype, &displacement);
  MPI_Type_commit (&datatype);
  MPI_Type_free (&datatype);
  MPI_Get_elements (&status, datatype, &count);
  void* inbuf;
  void* outbuf;
  int outsize;
  int position;
  MPI_Pack (inbuf, incount, datatype, outbuf, outsize, &position, comm); // L114
  int insize;
  MPI_Unpack (inbuf, insize, &position, outbuf, outcount, datatype,
	      comm); // L116--117
  MPI_Pack_size (incount, datatype, comm, &size);

  /* === Collectives === */
  MPI_Barrier (comm); // L121
  int root;
  MPI_Bcast (buffer, count, datatype, root, comm); // L123
  MPI_Gather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
	      root, comm); // L124--125
  int* recvcounts;
  int* displs;
  MPI_Gatherv (sendbuf, sendcount, sendtype,
               recvbuf, recvcounts, displs, recvtype,
	       root, comm); // L128--130
  MPI_Scatter (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
               root, comm); // L131--132
  int* sendcounts;
  MPI_Scatterv (sendbuf, sendcounts, displs, sendtype,
		recvbuf, recvcount, recvtype, root, comm); // L134--135
  MPI_Allgather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
                 comm); // L136--137
  MPI_Allgatherv (sendbuf, sendcount, sendtype,
		  recvbuf, recvcounts, displs, recvtype,
		  comm); // L138--140
  MPI_Alltoall (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
		comm); // L141--142
  int* sdispls;
  int* rdispls;
  MPI_Alltoallv (sendbuf, sendcounts, sdispls, sendtype,
                 recvbuf, recvcounts, rdispls, recvtype,
		 comm); // L145--147
  MPI_Op op;
  MPI_Reduce (sendbuf, recvbuf, count, datatype, op, root, comm); // L149
#if 0
  MPI_User_function function;
  int commute;
  MPI_Op_create (function, commute, &op); // L153
#endif
  MPI_Op_free (&op); // L155
  MPI_Allreduce (sendbuf, recvbuf, count, datatype, op, comm);
  MPI_Reduce_scatter (sendbuf, recvbuf, recvcounts, datatype, op, comm);
  MPI_Scan (sendbuf, recvbuf, count, datatype, op, comm);

  /* === Groups, contexts, and communicators === */
  MPI_Group group;
  MPI_Group_size (group, &size); // L162
  int rank;
  MPI_Group_rank (group, &rank); // L164
  MPI_Group group1;
  int n;
  int* ranks1;
  MPI_Group group2;
  int* ranks2;
  MPI_Group_translate_ranks (group1, n, ranks1, group2, ranks2); // L170
  int result;
  MPI_Group_compare (group1, group2, &result); // L172
  MPI_Group newgroup;
  MPI_Group_union (group1, group2, &newgroup); // L174
  MPI_Group_intersection (group1, group2, &newgroup);
  MPI_Group_difference (group1, group2, &newgroup);
  int* ranks;
  MPI_Group_incl (group, n, ranks, &newgroup); // L178
  MPI_Group_excl (group, n, ranks, &newgroup);
  extern int ranges[][3];
  MPI_Group_range_incl (group, n, ranges, &newgroup); // L181
  MPI_Group_range_excl (group, n, ranges, &newgroup);
  MPI_Group_free (&group);
  MPI_Comm_size (comm, &size);
  MPI_Comm_rank (comm, &rank);
  MPI_Comm comm1;
  MPI_Comm comm2;
  MPI_Comm_compare (comm1, comm2, &result);
  MPI_Comm newcomm;
  MPI_Comm_dup (comm, &newcomm);
  MPI_Comm_create (comm, group, &newcomm);
  int color;
  int key;
  MPI_Comm_split (comm, color, key, &newcomm); // L194
  MPI_Comm_free (&comm);
  MPI_Comm_test_inter (comm, &flag);
  MPI_Comm_remote_size (comm, &size);
  MPI_Comm_remote_group (comm, &group);
  MPI_Comm local_comm;
  int local_leader;
  MPI_Comm peer_comm;
  int remote_leader;
  MPI_Comm newintercomm;
  MPI_Intercomm_create (local_comm, local_leader, peer_comm, remote_leader, tag,
			&newintercomm); // L204--205
  MPI_Comm intercomm;
  MPI_Comm newintracomm;
  int high;
  MPI_Intercomm_merge (intercomm, high, &newintracomm); // L209
  int keyval;
#if 0
  MPI_Copy_function copy_fn;
  MPI_Delete_function delete_fn;
  void* extra_state;
  MPI_Keyval_create (copy_fn, delete_fn, &keyval, extra_state); // L215
#endif
  MPI_Keyval_free (&keyval); // L217
  void* attribute_val;
  MPI_Attr_put (comm, keyval, attribute_val); // L219
  MPI_Attr_get (comm, keyval, attribute_val, &flag);
  MPI_Attr_delete (comm, keyval);

  /* === Environmental inquiry === */
  char* name;
  int resultlen;
  MPI_Get_processor_name (name, &resultlen); // L226
  MPI_Errhandler errhandler;
#if 0
  MPI_Handler_function function;
  MPI_Errhandler_create (function, &errhandler); // L230
#endif
  MPI_Errhandler_set (comm, errhandler); // L232
  MPI_Errhandler_get (comm, &errhandler);
  MPI_Errhandler_free (&errhandler);
  int errorcode;
  char* string;
  MPI_Error_string (errorcode, string, &resultlen); // L237
  int errorclass;
  MPI_Error_class (errorcode, &errorclass); // L239
  MPI_Wtime ();
  MPI_Wtick ();
  int argc;
  char** argv;
  MPI_Init (&argc, &argv); // L244
  MPI_Finalize ();
  MPI_Initialized (&flag);
  MPI_Abort (comm, errorcode);
}
示例#14
0
//main program
int main(int argc, char *argv[])
{
	MPI_Init(&argc, &argv);
	int size, rank;
	long t1, t2;
	static int ranks[1] = { 0 };
	MPI_Request request1, request2, request3, request4;
	MPI_Status status, status1, status2, status3, status4;
	MPI_Group MPI_GROUP_WORLD, grprem;
	MPI_Comm commslave, newcomm;
	MPI_Comm commsp;
	MPI_Comm_group(MPI_COMM_WORLD, &MPI_GROUP_WORLD);
	MPI_Group_excl(MPI_GROUP_WORLD, 1, ranks, &grprem);
	MPI_Comm_create(MPI_COMM_WORLD, grprem, &commslave);
	MPI_Comm_size(MPI_COMM_WORLD, &size);
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	printf("Node %d in %d is ready\n", rank, size);
	//Initialize
	double *grid0 = creat_grid(x_size, y_size, z_size, size, rank, GRID0);
	double *grid1 = creat_grid(x_size, y_size, z_size, size, rank, GRID1);
	MPI_Barrier(MPI_COMM_WORLD);
	if (size != 1)
	{
		if (rank == 0)
		{
			for (int i = 1; i < size; i++)
			{
				int len = (i == size - 1) ? (x_size / (size - 1)) : (x_size / (size - 1) + x_size % (size - 1));
				MPI_Ssend(grid0 + (i - 1)*x_size / (size - 1), len*y_size*z_size, MPI_DOUBLE, i, i, MPI_COMM_WORLD);
			}
		}
		else
		{
			for (int i = 1; i < size; i++)
			{
				if (rank == i)
				{
					int len = (i == size - 1) ? (x_size / (size - 1)) : (x_size / (size - 1) + x_size % (size - 1));
					MPI_Recv(grid0 + y_size*z_size, len*y_size*z_size, MPI_DOUBLE, 0, i, MPI_COMM_WORLD, &status);
				}
			}

		}
	}
	//Compute
	if (rank == 0) printf("Start computing...\n");
	if (rank != 0 && size > 1)
	{
		for (int t = 0; t < stepnum; t++)
		{
			compute(grid0, grid1, rank, size);
			//send right slice of data to next node, then receieve right slice of data form next node
			if (rank < size - 1)
			{
				MPI_Ssend(grid1 + (1 + x_size / (size - 1))*y_size*z_size,
					y_size*z_size, MPI_DOUBLE, rank + 1, rank, MPI_COMM_WORLD);
				MPI_Recv(grid1 + (1 + x_size / (size - 1))*y_size*z_size,
					y_size*z_size, MPI_DOUBLE, rank + 1, rank + 1, MPI_COMM_WORLD, &status1);
				//MPI_Wait(&request1, &status1);
				//MPI_Wait(&request2, &status2);
			}
			//receieve left slice of data from perior node, then send left slice of data to perior node
			if (rank > 1)
			{
				MPI_Recv(grid1, y_size*z_size, MPI_DOUBLE, rank - 1, rank - 1, MPI_COMM_WORLD, &status2);
				MPI_Ssend(grid1, y_size*z_size, MPI_DOUBLE, rank - 1, rank, MPI_COMM_WORLD);
				//MPI_Wait(&request3, &status3);
				//MPI_Wait(&request4, &status4);
			}
			double *temp;
			temp = grid0;
			grid0 = grid1;
			grid1 = temp;
			MPI_Barrier(commslave);
		}
	}
	else if (size == 1)
	{
		for (int t = 0; t < stepnum; t++)
		{
			compute(grid0, grid1, rank, size);
			double *temp;
			temp = grid0;
			grid0 = grid1;
			grid1 = temp;
		}
	}
	else { ; }
	MPI_Barrier(MPI_COMM_WORLD);
	printf("Rank %d finished computing!\n", rank);
	//Gather data form nodes to host
	if (size != 1)
	{
		if (stepnum % 2)
		{
			double *temp;
			temp = grid0;
			grid0 = grid1;
			grid1 = temp;
		}
		for (int i = 1; i < size; i++)
		{
			if (rank == i)
			{
				int len = (i == size - 1) ? (x_size / (size - 1)) : (x_size / (size - 1) + x_size % (size - 1));
				MPI_Ssend(grid0 + y_size*z_size, len*y_size*z_size, MPI_DOUBLE, 0, i, MPI_COMM_WORLD);
				//MPI_Wait(&request2, &status2);
			}
		}
		if (rank == 0)
		{
			for (int i = 1; i < size; i++)
			{
				int len = (i == size - 1) ? (x_size / (size - 1)) : (x_size / (size - 1) + x_size % (size - 1));
				MPI_Recv(grid1, len*y_size*z_size, MPI_DOUBLE, i, i, MPI_COMM_WORLD, &status3);
				//MPI_Wait(&request1, &status1);
			}
		}
	}
	MPI_Barrier(MPI_COMM_WORLD);
	if (rank == 0) printf("All work complete\n");
	MPI_Finalize();
	return 0;
}
示例#15
0
int main (int argc, char *argv[])
{
    
    int iter;
    int in, out, i, ierr, ranks[1], done;
    double x, y, Pi, error, epsilon;
    double start_time, end_time;
    int numprocs, master, myrank, workerrank; 
    int totalin, totalout;
    long max, total;
    int rands[CHUNKSIZE], request;
    
    MPI_Comm world, workers;    // two communicators
    MPI_Group world_group, worker_group;    // two groups
    MPI_Status status;
    
    if (MPI_Init(&argc, &argv) != MPI_SUCCESS)
    {
        fprintf(stderr, "MPI initialization error\n");
        exit(-1);
    }
    
    // world is the MPI_COMM_WORLD communicator
    world = MPI_COMM_WORLD;     
    // get the processors within world communicator
    MPI_Comm_size(world, &numprocs);  
    // assign the rank for each processor in the communicator  
    MPI_Comm_rank(world, &myrank);
    
    if (argc <= 1) 
    {
        fprintf(stderr, "Usage: mpirun -np number_of_process ./mcpi number_of_epsilon number_of_iterations\n");
        MPI_Finalize();
        exit(-1);
    }
    
    // assign to master the rank of the last process in the world communicator
    master = numprocs - 1;      
    // if it is the first process, read the input from console
    if(myrank == 0)
    {
        sscanf(argv[1], "%lf", &epsilon);
        //sscanf(argv[2], "%ld", &total);
    }

    // then send the epsilon to all the worker process
    MPI_Bcast(&epsilon, 1, MPI_DOUBLE, 0, world);
    //MPI_Bcast(&total, 1, MPI_INT, 0, MPI_COMM_WORLD);
    
    // create the world group associated with world communicator
    MPI_Comm_group(world, &world_group);
    ranks[0] = master;
    // create the worker group from world group, but exclude the master process
    MPI_Group_excl(world_group, 1, ranks, &worker_group);
    // create a new communicator from existing communicator and group
    // this is the communicator we will use when we wish to do collective operations
    // that don not involve the master process
    MPI_Comm_create(world, worker_group, &workers);
    // free the two groups, we don't need them any more,
    // left work will be handled by communicators
    MPI_Group_free(&worker_group);
    MPI_Group_free(&world_group);
    
    
    // record the start time
    start_time = MPI_Wtime();
    // the master process is in charge of producing the random points,
    // then send the points to worker processes.
    if(myrank == master)
    {
        #if RANDOM_SEED
        struct timeval time;
        gettimeofday(&time, 0);
        // initialize the random number generator
        srandom((int)(time.tv_usec * 1000000 + time.tv_sec));
        #endif
        do 
        {
            // check whether the worker processes need new random points
            MPI_Recv(&request, 1, MPI_INT, MPI_ANY_SOURCE, REQUEST_TAG, world, &status);
            if(request)
            {
                for(i = 0; i < CHUNKSIZE;)
                {
                    rands[i] = random();
                    if (rands[i] <= INT_MAX) i++;
                }
                MPI_Send(rands, CHUNKSIZE, MPI_INT, status.MPI_SOURCE, REPLY_TAG, world);
            }
        }
        while (request > 0);
    }
    // if it is a worker process
    else
    {
        // if request is 1, the worker process ask master process for random points
        request = 1;
        done = in = out = 0;
        max = (1 << 31) - 1;      // max int, for normalization
        MPI_Send(&request, 1, MPI_INT, master, REQUEST_TAG, world);
        // get the rank of current process in worker communicator
        MPI_Comm_rank(workers, &workerrank);
        // iter = 0;
        while (!done)
        {
            iter++;
            request = 1;
            MPI_Recv(rands, CHUNKSIZE, MPI_INT, master, REPLY_TAG, world, &status);
            for (i = 0; i < CHUNKSIZE-1;)
            {
                #if QUARTER_CIRCLE
                x = ((double) rands[i++])/max;
                y = ((double) rands[i++])/max;
                #else
                // avoid x and y have the same number
                x = (((double) rands[i++])/max) * 2 - 1;
                y = (((double) rands[i++])/max) * 2 - 1;
                #endif
                // if the point is in the circle, increase in
                if(x*x + y*y < 1.0)
                {
                    in++;
                }
                // if the point is out of the circle, increase out
                else
                {
                    out++;
                }
            }
            // combine the result from each worker process, 
            // and then send back to each worker process
            MPI_Allreduce(&in, &totalin, 1, MPI_INT, MPI_SUM, workers);
            MPI_Allreduce(&out, &totalout, 1, MPI_INT, MPI_SUM, workers);
            
            // if stop condition is not satisfied,
            // ask the master process to send points again
            if(myrank == 0)
            {
                Pi = (4.0 * totalin)/(totalin + totalout);
                error = fabs(Pi - M_PI);
                // if the error is acceptable or the total points is larger than 1000000,
                // stop the process
                done = (error < epsilon || (totalin + totalout) > TOTAL);
                request = (done) ? 0 : 1;
                MPI_Send(&request, 1, MPI_INT, master, REQUEST_TAG, world);
                MPI_Bcast(&done, 1, MPI_INT, 0, workers);
            }
            else
            {
                MPI_Bcast(&done, 1, MPI_INT, 0, workers);
                if(request)
                {
                    MPI_Send(&request, 1, MPI_INT, master, REQUEST_TAG, world);
                }
            }
        }
        MPI_Comm_free(&workers);
    }
    end_time = MPI_Wtime();
    
    if(myrank == 0)
    {
        printf("pi is: %23.20f\n", Pi);
        printf("\ntotal points: %d\nin: %d, out: %d, <ret> to exit\n", totalin+totalout, totalin, totalout);
        printf("It took %f seconds\n", end_time - start_time);
        getchar();
    }
    
    // MPE_Close_graphics(&graph);
    MPI_Finalize();
    
    return 0;
}
示例#16
0
文件: mtest.c 项目: mpoquet/simgrid
/*
 * Return an intercomm; set isLeftGroup to 1 if the calling process is
 * a member of the "left" group.
 */
int MTestGetIntercomm(MPI_Comm * comm, int *isLeftGroup, int min_size)
{
    int size, rank, remsize, merr;
    int done = 0;
    MPI_Comm mcomm = MPI_COMM_NULL;
    MPI_Comm mcomm2 = MPI_COMM_NULL;
    int rleader;

    /* The while loop allows us to skip communicators that are too small.
     * MPI_COMM_NULL is always considered large enough.  The size is
     * the sum of the sizes of the local and remote groups */
    while (!done) {
        *comm = MPI_COMM_NULL;
        *isLeftGroup = 0;
        interCommName = "MPI_COMM_NULL";

        switch (interCommIdx) {
        case 0:
            /* Split comm world in half */
            merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
            if (merr)
                MTestPrintError(merr);
            merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
            if (merr)
                MTestPrintError(merr);
            if (size > 1) {
                merr = MPI_Comm_split(MPI_COMM_WORLD, (rank < size / 2), rank, &mcomm);
                if (merr)
                    MTestPrintError(merr);
                if (rank == 0) {
                    rleader = size / 2;
                }
                else if (rank == size / 2) {
                    rleader = 0;
                }
                else {
                    /* Remote leader is signficant only for the processes
                     * designated local leaders */
                    rleader = -1;
                }
                *isLeftGroup = rank < size / 2;
                merr = MPI_Intercomm_create(mcomm, 0, MPI_COMM_WORLD, rleader, 12345, comm);
                if (merr)
                    MTestPrintError(merr);
                interCommName = "Intercomm by splitting MPI_COMM_WORLD";
            }
            else
                *comm = MPI_COMM_NULL;
            break;
        case 1:
            /* Split comm world in to 1 and the rest */
            merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
            if (merr)
                MTestPrintError(merr);
            merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
            if (merr)
                MTestPrintError(merr);
            if (size > 1) {
                merr = MPI_Comm_split(MPI_COMM_WORLD, rank == 0, rank, &mcomm);
                if (merr)
                    MTestPrintError(merr);
                if (rank == 0) {
                    rleader = 1;
                }
                else if (rank == 1) {
                    rleader = 0;
                }
                else {
                    /* Remote leader is signficant only for the processes
                     * designated local leaders */
                    rleader = -1;
                }
                *isLeftGroup = rank == 0;
                merr = MPI_Intercomm_create(mcomm, 0, MPI_COMM_WORLD, rleader, 12346, comm);
                if (merr)
                    MTestPrintError(merr);
                interCommName = "Intercomm by splitting MPI_COMM_WORLD into 1, rest";
            }
            else
                *comm = MPI_COMM_NULL;
            break;

        case 2:
            /* Split comm world in to 2 and the rest */
            merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
            if (merr)
                MTestPrintError(merr);
            merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
            if (merr)
                MTestPrintError(merr);
            if (size > 3) {
                merr = MPI_Comm_split(MPI_COMM_WORLD, rank < 2, rank, &mcomm);
                if (merr)
                    MTestPrintError(merr);
                if (rank == 0) {
                    rleader = 2;
                }
                else if (rank == 2) {
                    rleader = 0;
                }
                else {
                    /* Remote leader is signficant only for the processes
                     * designated local leaders */
                    rleader = -1;
                }
                *isLeftGroup = rank < 2;
                merr = MPI_Intercomm_create(mcomm, 0, MPI_COMM_WORLD, rleader, 12347, comm);
                if (merr)
                    MTestPrintError(merr);
                interCommName = "Intercomm by splitting MPI_COMM_WORLD into 2, rest";
            }
            else
                *comm = MPI_COMM_NULL;
            break;

        case 3:
            /* Split comm world in half, then dup */
            merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
            if (merr)
                MTestPrintError(merr);
            merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
            if (merr)
                MTestPrintError(merr);
            if (size > 1) {
                merr = MPI_Comm_split(MPI_COMM_WORLD, (rank < size / 2), rank, &mcomm);
                if (merr)
                    MTestPrintError(merr);
                if (rank == 0) {
                    rleader = size / 2;
                }
                else if (rank == size / 2) {
                    rleader = 0;
                }
                else {
                    /* Remote leader is signficant only for the processes
                     * designated local leaders */
                    rleader = -1;
                }
                *isLeftGroup = rank < size / 2;
                merr = MPI_Intercomm_create(mcomm, 0, MPI_COMM_WORLD, rleader, 12345, comm);
                if (merr)
                    MTestPrintError(merr);
                /* avoid leaking after assignment below */
                merr = MPI_Comm_free(&mcomm);
                if (merr)
                    MTestPrintError(merr);

                /* now dup, some bugs only occur for dup's of intercomms */
                mcomm = *comm;
                merr = MPI_Comm_dup(mcomm, comm);
                if (merr)
                    MTestPrintError(merr);
                interCommName = "Intercomm by splitting MPI_COMM_WORLD then dup'ing";
            }
            else
                *comm = MPI_COMM_NULL;
            break;

        case 4:
            /* Split comm world in half, form intercomm, then split that intercomm */
            merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
            if (merr)
                MTestPrintError(merr);
            merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
            if (merr)
                MTestPrintError(merr);
            if (size > 1) {
                merr = MPI_Comm_split(MPI_COMM_WORLD, (rank < size / 2), rank, &mcomm);
                if (merr)
                    MTestPrintError(merr);
                if (rank == 0) {
                    rleader = size / 2;
                }
                else if (rank == size / 2) {
                    rleader = 0;
                }
                else {
                    /* Remote leader is signficant only for the processes
                     * designated local leaders */
                    rleader = -1;
                }
                *isLeftGroup = rank < size / 2;
                merr = MPI_Intercomm_create(mcomm, 0, MPI_COMM_WORLD, rleader, 12345, comm);
                if (merr)
                    MTestPrintError(merr);
                /* avoid leaking after assignment below */
                merr = MPI_Comm_free(&mcomm);
                if (merr)
                    MTestPrintError(merr);

                /* now split, some bugs only occur for splits of intercomms */
                mcomm = *comm;
                merr = MPI_Comm_rank(mcomm, &rank);
                if (merr)
                    MTestPrintError(merr);
                /* this split is effectively a dup but tests the split code paths */
                merr = MPI_Comm_split(mcomm, 0, rank, comm);
                if (merr)
                    MTestPrintError(merr);
                interCommName = "Intercomm by splitting MPI_COMM_WORLD then then splitting again";
            }
            else
                *comm = MPI_COMM_NULL;
            break;

        case 5:
            /* split comm world in half discarding rank 0 on the "left"
             * communicator, then form them into an intercommunicator */
            merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
            if (merr)
                MTestPrintError(merr);
            merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
            if (merr)
                MTestPrintError(merr);
            if (size >= 4) {
                int color = (rank < size / 2 ? 0 : 1);
                if (rank == 0)
                    color = MPI_UNDEFINED;

                merr = MPI_Comm_split(MPI_COMM_WORLD, color, rank, &mcomm);
                if (merr)
                    MTestPrintError(merr);

                if (rank == 1) {
                    rleader = size / 2;
                }
                else if (rank == (size / 2)) {
                    rleader = 1;
                }
                else {
                    /* Remote leader is signficant only for the processes
                     * designated local leaders */
                    rleader = -1;
                }
                *isLeftGroup = rank < size / 2;
                if (rank != 0) {        /* 0's mcomm is MPI_COMM_NULL */
                    merr = MPI_Intercomm_create(mcomm, 0, MPI_COMM_WORLD, rleader, 12345, comm);
                    if (merr)
                        MTestPrintError(merr);
                }
                interCommName =
                    "Intercomm by splitting MPI_COMM_WORLD (discarding rank 0 in the left group) then MPI_Intercomm_create'ing";
            }
            else {
                *comm = MPI_COMM_NULL;
            }
            break;

        case 6:
            /* Split comm world in half then form them into an
             * intercommunicator.  Then discard rank 0 from each group of the
             * intercomm via MPI_Comm_create. */
            merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
            if (merr)
                MTestPrintError(merr);
            merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
            if (merr)
                MTestPrintError(merr);
            if (size >= 4) {
                MPI_Group oldgroup, newgroup;
                int ranks[1];
                int color = (rank < size / 2 ? 0 : 1);

                merr = MPI_Comm_split(MPI_COMM_WORLD, color, rank, &mcomm);
                if (merr)
                    MTestPrintError(merr);

                if (rank == 0) {
                    rleader = size / 2;
                }
                else if (rank == (size / 2)) {
                    rleader = 0;
                }
                else {
                    /* Remote leader is signficant only for the processes
                     * designated local leaders */
                    rleader = -1;
                }
                *isLeftGroup = rank < size / 2;
                merr = MPI_Intercomm_create(mcomm, 0, MPI_COMM_WORLD, rleader, 12345, &mcomm2);
                if (merr)
                    MTestPrintError(merr);

                /* We have an intercomm between the two halves of comm world. Now create
                 * a new intercomm that removes rank 0 on each side. */
                merr = MPI_Comm_group(mcomm2, &oldgroup);
                if (merr)
                    MTestPrintError(merr);
                ranks[0] = 0;
                merr = MPI_Group_excl(oldgroup, 1, ranks, &newgroup);
                if (merr)
                    MTestPrintError(merr);
                merr = MPI_Comm_create(mcomm2, newgroup, comm);
                if (merr)
                    MTestPrintError(merr);

                merr = MPI_Group_free(&oldgroup);
                if (merr)
                    MTestPrintError(merr);
                merr = MPI_Group_free(&newgroup);
                if (merr)
                    MTestPrintError(merr);

                interCommName =
                    "Intercomm by splitting MPI_COMM_WORLD then discarding 0 ranks with MPI_Comm_create";
            }
            else {
                *comm = MPI_COMM_NULL;
            }
            break;

        default:
            *comm = MPI_COMM_NULL;
            interCommIdx = -1;
            break;
        }

        if (*comm != MPI_COMM_NULL) {
            merr = MPI_Comm_size(*comm, &size);
            if (merr)
                MTestPrintError(merr);
            merr = MPI_Comm_remote_size(*comm, &remsize);
            if (merr)
                MTestPrintError(merr);
            if (size + remsize >= min_size)
                done = 1;
        }
        else {
            interCommName = "MPI_COMM_NULL";
            done = 1;
        }

        /* we are only done if all processes are done */
        MPI_Allreduce(MPI_IN_PLACE, &done, 1, MPI_INT, MPI_LAND, MPI_COMM_WORLD);

        /* Advance the comm index whether we are done or not, otherwise we could
         * spin forever trying to allocate a too-small communicator over and
         * over again. */
        interCommIdx++;

        if (!done && *comm != MPI_COMM_NULL) {
            /* avoid leaking communicators */
            merr = MPI_Comm_free(comm);
            if (merr)
                MTestPrintError(merr);
        }

        /* cleanup for common temp objects */
        if (mcomm != MPI_COMM_NULL) {
            merr = MPI_Comm_free(&mcomm);
            if (merr)
                MTestPrintError(merr);
        }
        if (mcomm2 != MPI_COMM_NULL) {
            merr = MPI_Comm_free(&mcomm2);
            if (merr)
                MTestPrintError(merr);
        }
    }

    return interCommIdx;
}
示例#17
0
int main( int argc, char **argv )
{
    int errs=0, toterr;
    MPI_Group basegroup;
    MPI_Group g1, g2, g3, g4, g5, g6, g7, g8, g9, g10;
    MPI_Group g3a, g3b;
    MPI_Comm  comm, newcomm, splitcomm, dupcomm;
    int       i, grp_rank, rank, grp_size, size, result;
    int       nranks, *ranks, *ranks_out;
    int       range[1][3];
    int       worldrank;

    MPI_Init( &argc, &argv );
    MPI_Comm_rank( MPI_COMM_WORLD, &worldrank );

    comm = MPI_COMM_WORLD;

    MPI_Comm_group( comm, &basegroup );
    MPI_Comm_rank( comm, &rank );
    MPI_Comm_size( comm, &size );

/* Get the basic information on this group */
    MPI_Group_rank( basegroup, &grp_rank );
    if (grp_rank != rank) {
	errs++;
	fprintf( stdout, "group rank %d != comm rank %d\n", grp_rank, rank );
    }

    MPI_Group_size( basegroup, &grp_size );
    if (grp_size != size) {
	errs++;
	fprintf( stdout, "group size %d != comm size %d\n", grp_size, size );
    }


/* Form a new communicator with inverted ranking */
    MPI_Comm_split( comm, 0, size - rank, &newcomm );
    MPI_Comm_group( newcomm, &g1 );
    ranks	  = (int *)malloc( size * sizeof(int) );
    ranks_out = (int *)malloc( size * sizeof(int) );
    for (i=0; i<size; i++) ranks[i] = i;
    nranks = size;
    MPI_Group_translate_ranks( g1, nranks, ranks, basegroup, ranks_out );
    for (i=0; i<size; i++) {
	if (ranks_out[i] != (size - 1) - i) {
	    errs++;
	    fprintf( stdout, "Translate ranks got %d expected %d\n", 
		     ranks_out[i], (size - 1) - i );
	}
    }

/* Check Compare */
    MPI_Group_compare( basegroup, g1, &result );
    if (result != MPI_SIMILAR) {
	errs++;
	fprintf( stdout, "Group compare should have been similar, was %d\n",
		 result );
    }
    MPI_Comm_dup( comm, &dupcomm );
    MPI_Comm_group( dupcomm, &g2 );
    MPI_Group_compare( basegroup, g2, &result );
    if (result != MPI_IDENT) {
	errs++;
	fprintf( stdout, "Group compare should have been ident, was %d\n",
		 result );
    }
    MPI_Comm_split( comm, rank < size/2, rank, &splitcomm );
    MPI_Comm_group( splitcomm, &g3 );
    MPI_Group_compare( basegroup, g3, &result );
    if (result != MPI_UNEQUAL) {
	errs++;
	fprintf( stdout, "Group compare should have been unequal, was %d\n",
		 result );
    }

    /* Build two groups that have this process and one other, but do not
       have the same processes */
    ranks[0] = rank;
    ranks[1] = (rank + 1) % size;
    MPI_Group_incl( basegroup, 2, ranks, &g3a );
    ranks[1] = (rank + size - 1) % size;
    MPI_Group_incl( basegroup, 2, ranks, &g3b );
    MPI_Group_compare( g3a, g3b, &result );
    if (result != MPI_UNEQUAL) {
        errs++;
	fprintf( stdout, "Group compare of equal sized but different groups should have been unequal, was %d\n", result );
    }
    

/* Build two new groups by excluding members; use Union to put them
   together again */

/* Exclude 0 */
    for (i=0; i<size; i++) ranks[i] = i;
    MPI_Group_excl( basegroup, 1, ranks, &g4 );
/* Exclude 1-(size-1) */
    MPI_Group_excl( basegroup, size-1, ranks+1, &g5 );
    MPI_Group_union( g5, g4, &g6 );
    MPI_Group_compare( basegroup, g6, &result );
    if (result != MPI_IDENT) {
	int usize;
	errs++;
	/* See ordering requirements on union */
	fprintf( stdout, "Group excl and union did not give ident groups\n" );
	fprintf( stdout, "[%d] result of compare was %d\n", rank, result );
	MPI_Group_size( g6, &usize );
	fprintf( stdout, "Size of union is %d, should be %d\n", usize, size );
    }
    MPI_Group_union( basegroup, g4, &g7 );
    MPI_Group_compare( basegroup, g7, &result );
    if (result != MPI_IDENT) {
	int usize;
	errs++;
	fprintf( stdout, "Group union of overlapping groups failed\n" );
	fprintf( stdout, "[%d] result of compare was %d\n", rank, result );
	MPI_Group_size( g7, &usize );
	fprintf( stdout, "Size of union is %d, should be %d\n", usize, size );
    }

/* Use range_excl instead of ranks */
    /* printf ("range excl\n" ); fflush( stdout ); */
    range[0][0] = 1;
    range[0][1] = size-1;
    range[0][2] = 1;
    MPI_Group_range_excl( basegroup, 1, range, &g8 );
    /* printf( "out  of range excl\n" ); fflush( stdout ); */
    MPI_Group_compare( g5, g8, &result );
    /* printf( "out of compare\n" ); fflush( stdout ); */
    if (result != MPI_IDENT) {
	errs++;
	fprintf( stdout, "Group range excl did not give ident groups\n" );
    }

    /* printf( "intersection\n" ); fflush( stdout ); */
    MPI_Group_intersection( basegroup, g4, &g9 );
    MPI_Group_compare( g9, g4, &result );
    if (result != MPI_IDENT) {
	errs++;
	fprintf( stdout, "Group intersection did not give ident groups\n" );
    }

/* Exclude EVERYTHING and check against MPI_GROUP_EMPTY */
    /* printf( "range excl all\n" ); fflush( stdout ); */
    range[0][0] = 0;
    range[0][1] = size-1;
    range[0][2] = 1;
    MPI_Group_range_excl( basegroup, 1, range, &g10 );

    /* printf( "done range excl all\n" ); fflush(stdout); */
    MPI_Group_compare( g10, MPI_GROUP_EMPTY, &result );
    /* printf( "done compare to MPI_GROUP_EMPTY\n" ); fflush(stdout); */

    if (result != MPI_IDENT) {
	errs++;
	fprintf( stdout, 
		 "MPI_GROUP_EMPTY didn't compare against empty group\n");
    }

    /* printf( "freeing groups\n" ); fflush( stdout ); */
    MPI_Group_free( &basegroup );
    MPI_Group_free( &g1 );
    MPI_Group_free( &g2 );
    MPI_Group_free( &g3 );
    MPI_Group_free( &g3a );
    MPI_Group_free( &g3b );
    MPI_Group_free( &g4 );
    MPI_Group_free( &g5 );
    MPI_Group_free( &g6 );
    MPI_Group_free( &g7 );
    MPI_Group_free( &g8 );
    MPI_Group_free( &g9 );
    MPI_Group_free( &g10 );
    MPI_Comm_free( &dupcomm );
    MPI_Comm_free( &splitcomm );
    MPI_Comm_free( &newcomm );

    MPI_Allreduce( &errs, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
    if (worldrank == 0) {
	if (toterr == 0) 
	    printf( " No Errors\n" );
	else
	    printf( "Found %d errors in MPI Group routines\n", toterr );
    }

    MPI_Finalize();
    return toterr;
}
示例#18
0
int main(int argc, char *argv[])
{
	double t1,t2,duration;
    
    if (argc > 1){
        Size = atoi(argv[argc-1]);
    }
	MPI_Init(&argc, &argv);
	MPI_Comm_size(MPI_COMM_WORLD, &ProcNum);//Number of processes
	MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank);//Rank of process
	t1 = MPI_Wtime();
	MPI_Group WorldGroup, CalculatorGroup;
	MPI_Comm Calculators;
	int ranks[1];
	ranks[0] = ProcNum-1;
	MPI_Comm_group(MPI_COMM_WORLD, &WorldGroup);
	MPI_Group_excl(WorldGroup, 1, ranks, &CalculatorGroup);
	MPI_Comm_create(MPI_COMM_WORLD,CalculatorGroup,&Calculators);

	
	int GridSize=sqrt((double)(ProcNum-1));//size of virtual topology(Grid)
	N=Size/GridSize+2;//N-size of subsystem; +2 - for boundary condition
    assert(GridSize*GridSize+1 == ProcNum);
	

	if (ProcRank!=ProcNum-1)
	{
		
		MPI_Datatype column;
		MPI_Type_vector(N,1,N,MPI_DOUBLE,&column);
		MPI_Type_commit(&column);

		//creating datatype for square
		MPI_Datatype square;
		MPI_Type_vector(N-2,N-2,N,MPI_DOUBLE,&square);
		MPI_Type_commit(&square);

		MPI_Comm GridComm;
		CreateGrid(GridSize, &GridComm, &Calculators);
		//Latt_Init1(N,"200last_state_gk_d0p1.bin");
		LattInit(N);
        SolveEquations(250, GridComm, GridSize, column, square);
		MPI_Type_free(&square);
		MPI_Type_free(&column);
		MPI_Group_free(&CalculatorGroup);
		MPI_Comm_free(&Calculators);
		delete[] V;
		delete[] Cell;
		delete[] I_ext;
	}
	else
	{
        convert_buf = new short[(N-2)*(N-2)*ProcNum];
		double* V_all = new double[(N-2)*(N-2)*ProcNum];
		double* V_temp = new double[(N-2)*(N-2)];
		short* V_save = new short[(N-2)*(N-2)*ProcNum];
		int ii;
		//printf("Total number of frames: %i\n", DrawNum);
#ifdef OS_WINDOWS
		int fd = open("200snapshots_gk_d0p1.bin",O_RDWR|O_CREAT | O_BINARY,S_IREAD|S_IWRITE);
#else
        int fd = open("snapshots.bin",O_RDWR|O_CREAT ,S_IREAD|S_IWRITE);
#endif
        for (int i=0;i<DrawNum*2;i++)
		{
			
			MPI_Gather(V_temp,(N-2)*(N-2),MPI_DOUBLE,V_all,(N-2)*(N-2),MPI_DOUBLE,ProcNum-1,MPI_COMM_WORLD);
			for (ii=0;ii<(N-2)*(N-2)*(ProcNum-1);ii++)
			{
				V_save[ii]=short(V_all[ii]*250.);
			}
            convertRst(V_save,convert_buf,Size,GridSize);
            save(convert_buf,Size*Size,fd);
        }
		close(fd);

		MPI_Gather(V_temp,(N-2)*(N-2),MPI_DOUBLE,V_all,(N-2)*(N-2),MPI_DOUBLE,ProcNum-1,MPI_COMM_WORLD);
		for (ii=0;ii<(N-2)*(N-2)*(ProcNum-1);ii++)
		{
			V_save[ii]=short(V_all[ii]*250.);
		}
#ifdef OS_WINDOWS
		fd = open("200last_V_gk_d0p1.bin",O_RDWR|O_CREAT | O_BINARY,S_IREAD|S_IWRITE);
#else
        fd = open("lastV.bin",O_RDWR|O_CREAT,S_IREAD|S_IWRITE);
#endif
		save(V_save,(N-2)*(N-2)*(ProcNum-1),fd);
		close(fd);

#ifdef OS_WINDOWS
		fd = open("200last_state_gk_d0p1.bin",O_RDWR|O_CREAT | O_BINARY,S_IREAD|S_IWRITE);
#else
        fd = open("state.bin",O_RDWR|O_CREAT ,S_IREAD|S_IWRITE);
#endif
		for (int i=0; i<9; i++)
		{
		MPI_Gather(V_temp,(N-2)*(N-2),MPI_DOUBLE,V_all,(N-2)*(N-2),MPI_DOUBLE,ProcNum-1,MPI_COMM_WORLD);
		save_double(V_all,(N-2)*(N-2)*(ProcNum-1),fd);
		}
		close(fd);



		delete[] V_all;
		delete[] V_temp;
		delete[] V_save;
        delete[] convert_buf;
	}



	
	
	t2 = MPI_Wtime();
	if (ProcRank==0)
	{
		printf("Experiment duration: %f seconds\n",t2-t1);
	}
	MPI_Finalize();
	
	return 0;
}
int
main (int argc, char **argv)
{
  int nprocs = -1;
  int rank = -1;
  int comm = MPI_COMM_WORLD;
  char processor_name[128];
  int namelen = 128;
  int i;
  int ranks[2], ranges[1][3];
  MPI_Group newgroup[GROUP_CONSTRUCTOR_COUNT]; 
  MPI_Group newgroup2[GROUP_CONSTRUCTOR_COUNT]; 
  MPI_Comm temp;
  MPI_Comm intercomm = MPI_COMM_NULL;

  /* init */
  MPI_Init (&argc, &argv);
  MPI_Comm_size (comm, &nprocs);
  MPI_Comm_rank (comm, &rank);
  MPI_Get_processor_name (processor_name, &namelen);
  printf ("(%d) is alive on %s\n", rank, processor_name);
  fflush (stdout);

  ranks[0] = 0;
  ranks[1] = 1;

  ranges[0][0] = 0;
  ranges[0][1] = 2;
  ranges[0][2] = 2;

  MPI_Barrier (comm);

  if (nprocs < 3) {
      printf ("requires at least 3 tasks\n");
  }
  else {
    /* create the groups */
    if (GROUP_CONSTRUCTOR_COUNT > 0)
      MPI_Comm_group (MPI_COMM_WORLD, &newgroup[0]);

    if (GROUP_CONSTRUCTOR_COUNT > 1)
      MPI_Group_incl (newgroup[0], 2, ranks, &newgroup[1]);    

    if (GROUP_CONSTRUCTOR_COUNT > 2)
      MPI_Group_excl (newgroup[0], 2, ranks, &newgroup[2]);

    if (GROUP_CONSTRUCTOR_COUNT > 3)
      MPI_Group_range_incl (newgroup[0], 1, ranges, &newgroup[3]);    

    if (GROUP_CONSTRUCTOR_COUNT > 4)
      MPI_Group_range_excl (newgroup[0], 1, ranges, &newgroup[4]);    

    if (GROUP_CONSTRUCTOR_COUNT > 5)
      MPI_Group_union (newgroup[1], newgroup[3], &newgroup[5]);

    if (GROUP_CONSTRUCTOR_COUNT > 6)
      MPI_Group_intersection (newgroup[5], newgroup[2], &newgroup[6]);

    if (GROUP_CONSTRUCTOR_COUNT > 7)
      MPI_Group_difference (newgroup[5], newgroup[2], &newgroup[7]);

    if (GROUP_CONSTRUCTOR_COUNT > 8) {
      /* need lots of stuff for this constructor... */
      MPI_Comm_split (MPI_COMM_WORLD, rank % 3, nprocs - rank, &temp);

      if (rank % 3) {
	MPI_Intercomm_create (temp, 0, MPI_COMM_WORLD, 
			      (((nprocs % 3) == 2) && ((rank % 3) == 2)) ?
			      nprocs - 1 : nprocs - (rank % 3) - (nprocs % 3),
			      INTERCOMM_CREATE_TAG, &intercomm);

	MPI_Comm_remote_group (intercomm, &newgroup[8]);

	MPI_Comm_free (&intercomm);
      }
      else {
	MPI_Comm_group (temp, &newgroup[8]);
      }

      MPI_Comm_free (&temp);
    }
  }      

  MPI_Barrier (comm);

  printf ("(%d) Finished normally\n", rank);
  MPI_Finalize ();
}
示例#20
0
int main( int argc, char *argv[] ) 
{ 
    int iter; 
    int in, out, i, iters, max, ix, iy, ranks[1], done, temp; 
    double x, y, Pi, error, epsilon; 
    int numprocs, myid, server, totalin, totalout, workerid; 
    int rands[CHUNKSIZE], request; 
    MPI_Comm world, workers; 
    MPI_Group world_group, worker_group; 
    MPI_Status status; 
 
    MPI_Init(&argc,&argv); 
    world  = MPI_COMM_WORLD; 
    MPI_Comm_size(world,&numprocs); 
    MPI_Comm_rank(world,&myid); 
    server = numprocs-1;	/* last proc is server */ 


    if (myid == 0) 
      sscanf( argv[1], "%lf", &epsilon ); 
    MPI_Bcast( &epsilon, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD ); 
    
    

    MPI_Comm_group( world, &world_group ); 
    ranks[0] = server; 
    MPI_Group_excl( world_group, 1, ranks, &worker_group ); 
    MPI_Comm_create( world, worker_group, &workers ); 
    MPI_Group_free(&worker_group); 
    if (myid == server) {	/* I am the rand server */ 
	do { 
	    MPI_Recv(&request, 1, MPI_INT, MPI_ANY_SOURCE, REQUEST, 
		     world, &status); 

	   

	    if (request) { 
		
		assert(request>0);

		for (i = 0; i < CHUNKSIZE; ) { 
		        rands[i] = random(); 
			if (rands[i] <= INT_MAX) i++; 
		} 
		MPI_Send(rands, CHUNKSIZE, MPI_INT, 
                         status.MPI_SOURCE, REPLY, world); 

	       
	    } 
	} while( request>0 ); 
	
    } 
    else {			/* I am a worker process */ 
        request = 1; 

	done = in = out = 0; 
	max  = INT_MAX;         /* max int, for normalization */ 
        MPI_Send( &request, 1, MPI_INT, server, REQUEST, world ); 
 
        MPI_Comm_rank( workers, &workerid ); 
	iter = 0; 
	while (!done) { 
	    iter++; 
	    request = 1; 

	    MPI_Recv( rands, CHUNKSIZE, MPI_INT, server, REPLY, 
		     world, &status ); 

	    for (i=0; i<CHUNKSIZE-1; ) { 
	        x = (((double) rands[i++])/max) * 2 - 1; 
		y = (((double) rands[i++])/max) * 2 - 1; 
		if (x*x + y*y < 1.0) 
		    in++; 
		else 
		    out++; 
	    } 
	    MPI_Allreduce(&in, &totalin, 1, MPI_INT, MPI_SUM, 
			  workers); 
	    MPI_Allreduce(&out, &totalout, 1, MPI_INT, MPI_SUM, 
			  workers); 



	    Pi = (4.0*totalin)/(totalin + totalout); 
	    error = fabs( Pi-3.141592653589793238462643); 
	    done = (error < epsilon || (totalin+totalout) > 1000000); 
	    request = (done) ? 0 : 1; 
	    if (myid == 0) { 
	      //printf( "\rpi = %23.20f", Pi ); 
		MPI_Send( &request, 1, MPI_INT, server, REQUEST, 
			 world ); 

	    } 
	    else { 
		if (request) {
			assert(request>0);

		    MPI_Send(&request, 1, MPI_INT, server, REQUEST, 
			     world);
		
		}
	    } 
	} 
	MPI_Comm_free(&workers); 
    } 
 
    if (myid == 0) { 
      //printf( "\npoints: %d\nin: %d, out: %d, <ret> to exit\n", 
      //	       totalin+totalout, totalin, totalout ); 
    } 
    MPI_Finalize(); 
} 
示例#21
0
bool pRPL::Process::
grouping(int nGroups,
         bool incldMaster,
         Process *pGrpedPrc,
         Process *pGrpMaster) const {
  if(!initialized()) {
    cerr << __FILE__ << " " << __FUNCTION__ \
         << " Error: Process has NOT been initialized," \
         << " unable to be grouped" << endl;
    return false;
  }

  if(!active()) {
    cerr << __FILE__ << " " << __FUNCTION__ \
         << " Error: inactive Process," \
         << " unable to group a Null communicator." \
         << " id = " << _id << " nTotPrcs = " << _nTotalPrcs << endl;
    return false;
  }

  if(nGroups <= 0 ||
     nGroups > _nTotalPrcs) {
    cerr << __FILE__ << " " << __FUNCTION__ \
         << " Error: invalid number of groups (" \
         << nGroups << ") as the total number of processes is " \
         << _nTotalPrcs << endl;
    return false;
  }

  if(!incldMaster && _nTotalPrcs <= 1) {
    cerr << __FILE__ << " " << __FUNCTION__ \
         << " Error:  " << _nTotalPrcs << " processes can NOT" \
         << " be grouped without the master process" << endl;
    return false;
  }

  MPI_Group glbGrp;
  MPI_Comm glbComm = _comm;
  MPI_Comm_group(glbComm, &glbGrp);
  int myID = -1;
  int grpID = -1;
  MPI_Comm grpComm = MPI_COMM_NULL;

  if(incldMaster) {
    myID = _id;
    grpID = myID % nGroups;
    MPI_Comm_split(glbComm, grpID, myID, &grpComm);
    if(!pGrpedPrc->set(grpComm, _hasWriter, grpID)) {
      return false;
    }
    if(pGrpMaster != NULL) {
      MPI_Group masterGrp= MPI_GROUP_NULL;
      MPI_Comm masterComm = MPI_COMM_NULL;
      int grpMasterRange[1][3] = {{0, nGroups-1, 1}};
      MPI_Group_range_incl(glbGrp, 1, grpMasterRange, &masterGrp);
      MPI_Comm_create(glbComm, masterGrp, &masterComm);
      if(!pGrpMaster->set(masterComm)) {
        return false;
      }
    }
  }
  else {
    int excldRanks[1] = {0};
    MPI_Group glbGrp2 = MPI_GROUP_NULL;
    MPI_Group_excl(glbGrp, 1, excldRanks, &glbGrp2);
    MPI_Comm_create(_comm, glbGrp2, &glbComm);
    glbGrp = glbGrp2;
    if(!isMaster()) {
      MPI_Comm_rank(glbComm, &myID);
      grpID = myID % nGroups;
      MPI_Comm_split(glbComm, grpID, myID, &grpComm);
      if(!pGrpedPrc->set(grpComm, _hasWriter, grpID)) {
        return false;
      }
      if(pGrpMaster != NULL) {
        MPI_Group masterGrp= MPI_GROUP_NULL;
        MPI_Comm masterComm = MPI_COMM_NULL;
        int grpMasterRange[1][3] = {{0, nGroups-1, 1}};
        MPI_Group_range_incl(glbGrp, 1, grpMasterRange, &masterGrp);
        MPI_Comm_create(glbComm, masterGrp, &masterComm);
        if(!pGrpMaster->set(masterComm)) {
          return false;
        }
      }
    }
  }

  return true;
}
示例#22
0
MTEST_THREAD_RETURN_TYPE test_idup(void *arg)
{
    int i;
    int size, rank;
    int ranges[1][3];
    int rleader, isLeft;
    int *excl = NULL;
    int tid = *(int *) arg;

    MPI_Group ingroup, high_group, even_group;
    MPI_Comm local_comm, inter_comm;
    MPI_Comm idupcomms[NUM_IDUPS];
    MPI_Request reqs[NUM_IDUPS];

    MPI_Comm outcomm;
    MPI_Comm incomm = comms[tid];

    MPI_Comm_size(incomm, &size);
    MPI_Comm_rank(incomm, &rank);
    MPI_Comm_group(incomm, &ingroup);

    /* Idup incomm multiple times */
    for (i = 0; i < NUM_IDUPS; i++) {
        MPI_Comm_idup(incomm, &idupcomms[i], &reqs[i]);
    }

    /* Overlap pending idups with various comm generation functions */
    /* Comm_dup */
    MPI_Comm_dup(incomm, &outcomm);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_split */
    MPI_Comm_split(incomm, rank % 2, size - rank, &outcomm);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_create, high half of incomm */
    ranges[0][0] = size / 2;
    ranges[0][1] = size - 1;
    ranges[0][2] = 1;
    MPI_Group_range_incl(ingroup, 1, ranges, &high_group);
    MPI_Comm_create(incomm, high_group, &outcomm);
    MPI_Group_free(&high_group);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_create_group, even ranks of incomm */
    /* exclude the odd ranks */
    excl = malloc((size / 2) * sizeof(int));
    for (i = 0; i < size / 2; i++)
        excl[i] = (2 * i) + 1;

    MPI_Group_excl(ingroup, size / 2, excl, &even_group);
    free(excl);

    if (rank % 2 == 0) {
        MPI_Comm_create_group(incomm, even_group, 0, &outcomm);
    }
    else {
        outcomm = MPI_COMM_NULL;
    }
    MPI_Group_free(&even_group);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Intercomm_create & Intercomm_merge */
    MPI_Comm_split(incomm, (rank < size / 2), rank, &local_comm);
    if (rank == 0) {
        rleader = size / 2;
    }
    else if (rank == size / 2) {
        rleader = 0;
    }
    else {
        rleader = -1;
    }
    isLeft = rank < size / 2;

    MPI_Intercomm_create(local_comm, 0, incomm, rleader, 99, &inter_comm);
    MPI_Intercomm_merge(inter_comm, isLeft, &outcomm);
    MPI_Comm_free(&local_comm);

    errs[tid] += MTestTestComm(inter_comm);
    MTestFreeComm(&inter_comm);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    MPI_Waitall(NUM_IDUPS, reqs, MPI_STATUSES_IGNORE);
    for (i = 0; i < NUM_IDUPS; i++) {
        errs[tid] += MTestTestComm(idupcomms[i]);
        MPI_Comm_free(&idupcomms[i]);
    }
    MPI_Group_free(&ingroup);
    return NULL;
}
示例#23
0
int main(int argc, char **argv)
{
    int errs = 0;
    int i;
    int rank, size;
    int *excl;
    int ranges[1][3];
    int isLeft, rleader;
    MPI_Group world_group, high_group, even_group;
    MPI_Comm local_comm, inter_comm, test_comm, outcomm;
    MPI_Comm idupcomms[NUM_IDUPS];
    MPI_Request reqs[NUM_IDUPS];

    MTest_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_group(MPI_COMM_WORLD, &world_group);

    if (size < 2) {
        printf("this test requires at least 2 processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    /* Idup MPI_COMM_WORLD multiple times */
    for (i = 0; i < NUM_IDUPS; i++) {
        MPI_Comm_idup(MPI_COMM_WORLD, &idupcomms[i], &reqs[i]);
    }

    /* Overlap pending idups with various comm generation functions */

    /* Comm_dup */
    MPI_Comm_dup(MPI_COMM_WORLD, &outcomm);
    errs += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_split */
    MPI_Comm_split(MPI_COMM_WORLD, rank % 2, size - rank, &outcomm);
    errs += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_create, high half of MPI_COMM_WORLD */
    ranges[0][0] = size / 2;
    ranges[0][1] = size - 1;
    ranges[0][2] = 1;
    MPI_Group_range_incl(world_group, 1, ranges, &high_group);
    MPI_Comm_create(MPI_COMM_WORLD, high_group, &outcomm);
    MPI_Group_free(&high_group);
    errs += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_create_group, even ranks of MPI_COMM_WORLD */
    /* exclude the odd ranks */
    excl = malloc((size / 2) * sizeof(int));
    for (i = 0; i < size / 2; i++)
        excl[i] = (2 * i) + 1;

    MPI_Group_excl(world_group, size / 2, excl, &even_group);
    free(excl);

    if (rank % 2 == 0) {
        MPI_Comm_create_group(MPI_COMM_WORLD, even_group, 0, &outcomm);
    } else {
        outcomm = MPI_COMM_NULL;
    }
    MPI_Group_free(&even_group);

    errs += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Intercomm_create & Intercomm_merge */
    MPI_Comm_split(MPI_COMM_WORLD, (rank < size / 2), rank, &local_comm);

    if (rank == 0) {
        rleader = size / 2;
    } else if (rank == size / 2) {
        rleader = 0;
    } else {
        rleader = -1;
    }
    isLeft = rank < size / 2;

    MPI_Intercomm_create(local_comm, 0, MPI_COMM_WORLD, rleader, 99, &inter_comm);
    MPI_Intercomm_merge(inter_comm, isLeft, &outcomm);
    MPI_Comm_free(&local_comm);

    errs += MTestTestComm(inter_comm);
    MTestFreeComm(&inter_comm);

    errs += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    MPI_Waitall(NUM_IDUPS, reqs, MPI_STATUSES_IGNORE);
    for (i = 0; i < NUM_IDUPS; i++) {
        errs += MTestTestComm(idupcomms[i]);
        MPI_Comm_free(&idupcomms[i]);
    }

    MPI_Group_free(&world_group);

    MTest_Finalize(errs);
    return MTestReturnValue(errs);
}
示例#24
0
文件: bcast.c 项目: NexMirror/MPICH
/*
 * This test attempts collective bcast communication after a process in
 * the communicator has failed.
 */
int main(int argc, char **argv)
{
    int rank, size, rc, errclass, toterrs, errs = 0;
    int deadprocs[] = { 1 };
    char buf[100000];
    MPI_Group world, newgroup;
    MPI_Comm newcomm;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    MPI_Comm_group(MPI_COMM_WORLD, &world);
    MPI_Group_excl(world, 1, deadprocs, &newgroup);
    MPI_Comm_create_group(MPI_COMM_WORLD, newgroup, 0, &newcomm);

    if (size < 3) {
        fprintf(stderr, "Must run with at least 3 processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    if (rank == 1) {
        exit(EXIT_FAILURE);
    }

    if (rank == 0) {
        strcpy(buf, "No Errors");
    }

    /* do a small bcast first */
    rc = MPI_Bcast(buf, 10, MPI_CHAR, 0, MPI_COMM_WORLD);

#if defined (MPICH) && (MPICH_NUMVERSION >= 30100102)
    MPI_Error_class(rc, &errclass);
    if ((rc) && (errclass != MPIX_ERR_PROC_FAILED)) {
        fprintf(stderr, "Wrong error code (%d) returned. Expected MPIX_ERR_PROC_FAILED\n",
                errclass);
        errs++;
    }
#endif

    /* reset the non-root buffers */
    if (rank != 0)
        memset(buf, 0, sizeof(buf));

    /* do a larger bcast */
    rc = MPI_Bcast(buf, 100000, MPI_CHAR, 0, MPI_COMM_WORLD);

#if defined (MPICH) && (MPICH_NUMVERSION >= 30100102)
    MPI_Error_class(rc, &errclass);
    if ((rc) && (errclass != MPIX_ERR_PROC_FAILED)) {
        fprintf(stderr, "Wrong error code (%d) returned. Expected MPIX_ERR_PROC_FAILED\n",
                errclass);
        errs++;
    }
#endif

    rc = MPI_Reduce(&errs, &toterrs, 1, MPI_INT, MPI_SUM, 0, newcomm);
    if (rc)
        fprintf(stderr, "Failed to get errors from other processes\n");

    if (rank == 0) {
        if (toterrs) {
            printf(" Found %d errors\n", toterrs);
        }
        else {
            printf(" No Errors\n");
        }
        fflush(stdout);
    }

    MPI_Group_free(&world);
    MPI_Group_free(&newgroup);
    MPI_Comm_free(&newcomm);
    MPI_Finalize();

    return 0;

}
示例#25
0
XdmfHDF5ControllerDSM::XdmfHDF5ControllerDSM(const std::string & hdf5FilePath,
                                             const std::string & dataSetPath,
                                             const shared_ptr<const XdmfArrayType> type,
                                             const std::vector<unsigned int> & start,
                                             const std::vector<unsigned int> & stride,
                                             const std::vector<unsigned int> & dimensions,
                                             const std::vector<unsigned int> & dataspaceDimensions,
                                             MPI_Comm comm,
                                             unsigned int bufferSize,
                                             int startCoreIndex,
                                             int endCoreIndex) :
  XdmfHDF5Controller(hdf5FilePath,
                     dataSetPath,
                     type,
                     start,
                     stride,
                     dimensions,
                     dataspaceDimensions),
#ifdef XDMF_BUILD_DSM_THREADS
  mDSMBuffer(NULL),
  mDSMManager(NULL),
#endif
  mServerMode(true)

{

  int rank, size;

  MPI_Comm_size(comm, &size);
  MPI_Comm_rank(comm, &rank);

  // Negative values will be changed to maximum range
  if (startCoreIndex < 0) {
    startCoreIndex = 0;
  }
  if (endCoreIndex < 0) {
    endCoreIndex = size - 1;
  }

  // Ensure start index is less than end index
  if (startCoreIndex > endCoreIndex) {
    int tempholder = startCoreIndex;
    startCoreIndex = endCoreIndex;
    endCoreIndex = tempholder;
  }

  MPI_Comm serverComm;

  MPI_Group workers, dsmgroup, serversplit, servergroup;

  int * ServerIds = (int *)calloc((3), sizeof(int));
  unsigned int index = 0;
  for(int i=startCoreIndex ; i <= endCoreIndex ; ++i) {
    ServerIds[index++] = i;
  }

  MPI_Comm_group(comm, &serversplit);
  MPI_Group_incl(serversplit, index, ServerIds, &servergroup);
  MPI_Comm_create(comm, servergroup, &serverComm);
  MPI_Comm_group(comm, &dsmgroup);
  MPI_Group_excl(dsmgroup, index, ServerIds, &workers);
  MPI_Comm_create(comm, workers, &mWorkerComm);
  cfree(ServerIds);

  // Create the manager

  mDSMServerManager = new XdmfDSMManager();

  mDSMServerManager->SetLocalBufferSizeMBytes(bufferSize);
  mDSMServerManager->SetInterCommType(XDMF_DSM_COMM_MPI);

  if (rank >= startCoreIndex && rank <= endCoreIndex) {
    mDSMServerManager->SetMpiComm(serverComm);
    mDSMServerManager->Create();
  }
  else {
    mDSMServerManager->SetMpiComm(mWorkerComm);
    mDSMServerManager->SetIsServer(false);
    mDSMServerManager->Create(startCoreIndex, endCoreIndex);
  }

  XDMF_dsm_set_manager(mDSMServerManager);

  mDSMServerBuffer = mDSMServerManager->GetDsmBuffer();

  mDSMServerBuffer->GetComm()->DupInterComm(comm);
  mDSMServerBuffer->SetIsConnected(true);

  if (startCoreIndex < size) {
    if (rank >= startCoreIndex && rank <= endCoreIndex) {
      mDSMServerManager->GetDsmBuffer()->ReceiveInfo();
    }
    else {
      mDSMServerManager->GetDsmBuffer()->SendInfo();
    }
  }

  MPI_Barrier(comm);

  // Loop needs to be started before anything can be done to the file
  // since the service is what sets up the file

  if (rank < startCoreIndex || rank > endCoreIndex) {
    // Turn off the server designation
    mDSMServerBuffer->SetIsServer(false);
    // If this is set to false then the buffer will attempt to
    // connect to the intercomm for DSM stuff
    mDSMServerManager->SetIsServer(false);
  }
  else {
    // On cores where memory is set up, start the service loop
    // This should iterate infinitely until a value to end the loop is passed
    int returnOpCode;
    mDSMServerBuffer->BufferServiceLoop(&returnOpCode);
  }
}
示例#26
0
文件: mtest.c 项目: mpoquet/simgrid
/*
 * Get an intracommunicator with at least min_size members.  If "allowSmaller"
 * is true, allow the communicator to be smaller than MPI_COMM_WORLD and
 * for this routine to return MPI_COMM_NULL for some values.  Returns 0 if
 * no more communicators are available.
 */
int MTestGetIntracommGeneral(MPI_Comm * comm, int min_size, int allowSmaller)
{
    int size, rank, merr;
    int done = 0;
    int isBasic = 0;

    /* The while loop allows us to skip communicators that are too small.
     * MPI_COMM_NULL is always considered large enough */
    while (!done) {
        isBasic = 0;
        intraCommName = "";
        switch (intraCommIdx) {
        case 0:
            *comm = MPI_COMM_WORLD;
            isBasic = 1;
            intraCommName = "MPI_COMM_WORLD";
            break;
        case 1:
            /* dup of world */
            merr = MPI_Comm_dup(MPI_COMM_WORLD, comm);
            if (merr)
                MTestPrintError(merr);
            intraCommName = "Dup of MPI_COMM_WORLD";
            break;
        case 2:
            /* reverse ranks */
            merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
            if (merr)
                MTestPrintError(merr);
            merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
            if (merr)
                MTestPrintError(merr);
            merr = MPI_Comm_split(MPI_COMM_WORLD, 0, size - rank, comm);
            if (merr)
                MTestPrintError(merr);
            intraCommName = "Rank reverse of MPI_COMM_WORLD";
            break;
        case 3:
            /* subset of world, with reversed ranks */
            merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
            if (merr)
                MTestPrintError(merr);
            merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
            if (merr)
                MTestPrintError(merr);
            merr = MPI_Comm_split(MPI_COMM_WORLD, ((rank < size / 2) ? 1 : MPI_UNDEFINED),
                                  size - rank, comm);
            if (merr)
                MTestPrintError(merr);
            intraCommName = "Rank reverse of half of MPI_COMM_WORLD";
            break;
        case 4:
            *comm = MPI_COMM_SELF;
            isBasic = 1;
            intraCommName = "MPI_COMM_SELF";
            break;
        case 5:
            {
#if MTEST_HAVE_MIN_MPI_VERSION(3,0)
                /* Dup of the world using MPI_Intercomm_merge */
                int rleader, isLeft;
                MPI_Comm local_comm, inter_comm;
                MPI_Comm_size(MPI_COMM_WORLD, &size);
                MPI_Comm_rank(MPI_COMM_WORLD, &rank);
                if (size > 1) {
                    merr = MPI_Comm_split(MPI_COMM_WORLD, (rank < size / 2), rank, &local_comm);
                    if (merr)
                        MTestPrintError(merr);
                    if (rank == 0) {
                        rleader = size / 2;
                    }
                    else if (rank == size / 2) {
                        rleader = 0;
                    }
                    else {
                        rleader = -1;
                    }
                    isLeft = rank < size / 2;
                    merr =
                        MPI_Intercomm_create(local_comm, 0, MPI_COMM_WORLD, rleader, 99,
                                             &inter_comm);
                    if (merr)
                        MTestPrintError(merr);
                    merr = MPI_Intercomm_merge(inter_comm, isLeft, comm);
                    if (merr)
                        MTestPrintError(merr);
                    MPI_Comm_free(&inter_comm);
                    MPI_Comm_free(&local_comm);
                    intraCommName = "Dup of WORLD created by MPI_Intercomm_merge";
                }
                else {
                    *comm = MPI_COMM_NULL;
                }
            }
            break;
        case 6:
            {
                /* Even of the world using MPI_Comm_create_group */
                int i;
                MPI_Group world_group, even_group;
                int *excl = NULL;

                MPI_Comm_size(MPI_COMM_WORLD, &size);
                MPI_Comm_rank(MPI_COMM_WORLD, &rank);
                if (allowSmaller && (size + 1) / 2 >= min_size) {
                    /* exclude the odd ranks */
                    excl = malloc((size / 2) * sizeof(int));
                    for (i = 0; i < size / 2; i++)
                        excl[i] = (2 * i) + 1;

                    MPI_Comm_group(MPI_COMM_WORLD, &world_group);
                    MPI_Group_excl(world_group, size / 2, excl, &even_group);
                    MPI_Group_free(&world_group);
                    free(excl);

                    if (rank % 2 == 0) {
                        /* Even processes create a comm. for themselves */
                        MPI_Comm_create_group(MPI_COMM_WORLD, even_group, 0, comm);
                        intraCommName = "Even of WORLD created by MPI_Comm_create_group";
                    }
                    else {
                        *comm = MPI_COMM_NULL;
                    }

                    MPI_Group_free(&even_group);
                }
                else {
                    *comm = MPI_COMM_NULL;
                }
#else
                *comm = MPI_COMM_NULL;
#endif
            }
            break;
        case 7:
            {
                /* High half of the world using MPI_Comm_create */
                int ranges[1][3];
                MPI_Group world_group, high_group;
                MPI_Comm_size(MPI_COMM_WORLD, &size);
                MPI_Comm_rank(MPI_COMM_WORLD, &rank);
                ranges[0][0] = size / 2;
                ranges[0][1] = size - 1;
                ranges[0][2] = 1;

                if (allowSmaller && (size + 1) / 2 >= min_size) {
                    MPI_Comm_group(MPI_COMM_WORLD, &world_group);
                    merr = MPI_Group_range_incl(world_group, 1, ranges, &high_group);
                    if (merr)
                        MTestPrintError(merr);
                    merr = MPI_Comm_create(MPI_COMM_WORLD, high_group, comm);
                    if (merr)
                        MTestPrintError(merr);
                    MPI_Group_free(&world_group);
                    MPI_Group_free(&high_group);
                    intraCommName = "High half of WORLD created by MPI_Comm_create";
                }
                else {
                    *comm = MPI_COMM_NULL;
                }
            }
            break;
            /* These next cases are communicators that include some
             * but not all of the processes */
        case 8:
        case 9:
        case 10:
        case 11:
            {
                int newsize;
                merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
                if (merr)
                    MTestPrintError(merr);
                newsize = size - (intraCommIdx - 7);

                if (allowSmaller && newsize >= min_size) {
                    merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
                    if (merr)
                        MTestPrintError(merr);
                    merr = MPI_Comm_split(MPI_COMM_WORLD, rank < newsize, rank, comm);
                    if (merr)
                        MTestPrintError(merr);
                    if (rank >= newsize) {
                        merr = MPI_Comm_free(comm);
                        if (merr)
                            MTestPrintError(merr);
                        *comm = MPI_COMM_NULL;
                    }
                    else {
                        intraCommName = "Split of WORLD";
                    }
                }
                else {
                    /* Act like default */
                    *comm = MPI_COMM_NULL;
                    intraCommIdx = -1;
                }
            }
            break;

            /* Other ideas: dup of self, cart comm, graph comm */
        default:
            *comm = MPI_COMM_NULL;
            intraCommIdx = -1;
            break;
        }

        if (*comm != MPI_COMM_NULL) {
            merr = MPI_Comm_size(*comm, &size);
            if (merr)
                MTestPrintError(merr);
            if (size >= min_size)
                done = 1;
        }
        else {
            intraCommName = "MPI_COMM_NULL";
            isBasic = 1;
            done = 1;
        }

        /* we are only done if all processes are done */
        MPI_Allreduce(MPI_IN_PLACE, &done, 1, MPI_INT, MPI_LAND, MPI_COMM_WORLD);

        /* Advance the comm index whether we are done or not, otherwise we could
         * spin forever trying to allocate a too-small communicator over and
         * over again. */
        intraCommIdx++;

        if (!done && !isBasic && *comm != MPI_COMM_NULL) {
            /* avoid leaking communicators */
            merr = MPI_Comm_free(comm);
            if (merr)
                MTestPrintError(merr);
        }
    }

    return intraCommIdx;
}
示例#27
0
int main( int argc, char *argv[] )
{
    MPI_Group g1, g2, g4, g5, g45, selfgroup, g6;
    int ranks[16], size, rank, myrank, range[1][3];
    int errs = 0;
    int i, rin[16], rout[16], result;

    MPI_Init(&argc,&argv);

	MPI_Comm_group( MPI_COMM_WORLD, &g1 );
	MPI_Comm_rank( MPI_COMM_WORLD, &myrank );
	MPI_Comm_size( MPI_COMM_WORLD, &size );
	if (size < 8) {
	    fprintf( stderr, 
		  "Test requires 8 processes (16 prefered) only %d provided\n",
		     size );
	    errs++;
	}

	/* 16 members, this process is rank 0, return in group 1 */
	ranks[0] = myrank; ranks[1] = 2; ranks[2] = 7;
	if (myrank == 2) ranks[1] = 3;
	if (myrank == 7) ranks[2] = 6;
	MPI_Group_incl( g1, 3, ranks, &g2 );
	
	/* Check the resulting group */
	MPI_Group_size( g2, &size );
	MPI_Group_rank( g2, &rank );
	
	if (size != 3) {
	    fprintf( stderr, "Size should be %d, is %d\n", 3, size );
	    errs++;
	}
	if (rank != 0) {
	    fprintf( stderr, "Rank should be %d, is %d\n", 0, rank );
	    errs++;
	}

	rin[0] = 0; rin[1] = 1; rin[2] = 2;
	MPI_Group_translate_ranks( g2, 3, rin, g1, rout );
	for (i=0; i<3; i++) {
	    if (rout[i] != ranks[i]) {
		fprintf( stderr, "translated rank[%d] %d should be %d\n", 
			 i, rout[i], ranks[i] );
		errs++;
	    }
	}
	
	/* Translate the process of the self group against another group */
	MPI_Comm_group( MPI_COMM_SELF, &selfgroup );
	rin[0] = 0;
	MPI_Group_translate_ranks( selfgroup, 1, rin, g1, rout );
	if (rout[0] != myrank) {
	    fprintf( stderr, "translated of self is %d should be %d\n", 
			 rout[0], myrank );
	    errs++;
	}

	for (i=0; i<size; i++) 
	    rin[i] = i;
	MPI_Group_translate_ranks( g1, size, rin, selfgroup, rout );
	for (i=0; i<size; i++) {
	    if (i == myrank && rout[i] != 0) {
		fprintf( stderr, "translated world to self of %d is %d\n",
			 i, rout[i] );
		errs++;
	    }
	    else if (i != myrank && rout[i] != MPI_UNDEFINED) {
		fprintf( stderr, "translated world to self of %d should be undefined, is %d\n",
			 i, rout[i] );
		errs++;
	    }
	}
	MPI_Group_free( &selfgroup );

	/* Exclude everyone in our group */
	{
	    int ii, *lranks, g1size;

	    MPI_Group_size( g1, &g1size );
	    
	    lranks = (int *)malloc( g1size * sizeof(int) );
	    for (ii=0; ii<g1size; ii++) lranks[ii] = ii;
	    MPI_Group_excl( g1, g1size, lranks, &g6 );
	    if (g6 != MPI_GROUP_EMPTY) {
		fprintf( stderr, "Group formed by excluding all ranks not empty\n" );
		errs++;
		MPI_Group_free( &g6 );
	    }
	    free( lranks );
	}
	
	/* Add tests for additional group operations */
	/* 
	   g2 = incl 1,3,7
	   g3 = excl 1,3,7
	   intersect ( w, g2 ) => g2
	   intersect ( w, g3 ) => g3
	   intersect ( g2, g3 ) => empty
	   
	   g4 = rincl 1:n-1:2
	   g5 = rexcl 1:n-1:2
	   union( g4, g5 ) => world
	   g6 = rincl n-1:1:-1 
	   g7 = rexcl n-1:1:-1
	   union( g6, g7 ) => concat of entries, similar to world
	   diff( w, g2 ) => g3
	*/
	MPI_Group_free( &g2 );

	range[0][0] = 1;
	range[0][1] = size-1;
	range[0][2] = 2;
	MPI_Group_range_excl( g1, 1, range, &g5 );

	range[0][0] = 1;
	range[0][1] = size-1;
	range[0][2] = 2;
	MPI_Group_range_incl( g1, 1, range, &g4 );
	MPI_Group_union( g4, g5, &g45 );
	MPI_Group_compare( MPI_GROUP_EMPTY, g4, &result );
	if (result != MPI_UNEQUAL) {
	    errs++;
	    fprintf( stderr, "Comparison with empty group gave %d, not 3\n",
		     result );
	}
	MPI_Group_free( &g4 );
	MPI_Group_free( &g5 );
	MPI_Group_free( &g45 );

	/* Now, duplicate the test, but using negative strides */
	range[0][0] = size-1;
	range[0][1] = 1;
	range[0][2] = -2;
	MPI_Group_range_excl( g1, 1, range, &g5 );

	range[0][0] = size-1;
	range[0][1] = 1;
	range[0][2] = -2;
	MPI_Group_range_incl( g1, 1, range, &g4 );

	MPI_Group_union( g4, g5, &g45 );

	MPI_Group_compare( MPI_GROUP_EMPTY, g4, &result );
	if (result != MPI_UNEQUAL) {
	    errs++;
	    fprintf( stderr, "Comparison with empty group (formed with negative strides) gave %d, not 3\n",
		     result );
	}
	MPI_Group_free( &g4 );
	MPI_Group_free( &g5 );
	MPI_Group_free( &g45 );
        MPI_Group_free( &g1 );

    if (myrank == 0) 
    {
	if (errs == 0) {
	    printf( " No Errors\n" );
	}
	else {
	    printf( "Found %d errors\n", errs );
	}
    }

    MPI_Finalize();
    return 0;
}
示例#28
0
文件: phg.c 项目: askhl/octopus-dfrt2
int Zoltan_PHG_Initialize_Params(
  ZZ *zz,   /* the Zoltan data structure */
  float *part_sizes,
  PHGPartParams *hgp
)
{
  int err = ZOLTAN_OK;
  char *yo = "Zoltan_PHG_Initialize_Params";
  int nProc;
  int usePrimeComm;
  MPI_Comm communicator;
  char add_obj_weight[MAX_PARAM_STRING_LEN];
  char edge_weight_op[MAX_PARAM_STRING_LEN];
  char cut_objective[MAX_PARAM_STRING_LEN];
  char *package = hgp->hgraph_pkg; 
  char *method = hgp->hgraph_method;
  char buf[1024];

  memset(hgp, 0, sizeof(*hgp)); /* in the future if we forget to initialize
                                   another param at least it will be 0 */
  
  Zoltan_Bind_Param(PHG_params, "HYPERGRAPH_PACKAGE", &hgp->hgraph_pkg);
  Zoltan_Bind_Param(PHG_params, "PHG_MULTILEVEL", &hgp->useMultilevel);
  Zoltan_Bind_Param(PHG_params, "PHG_FROM_GRAPH_METHOD", hgp->convert_str);  
  Zoltan_Bind_Param(PHG_params, "PHG_OUTPUT_LEVEL", &hgp->output_level);
  Zoltan_Bind_Param(PHG_params, "FINAL_OUTPUT", &hgp->final_output); 
  Zoltan_Bind_Param(PHG_params, "CHECK_GRAPH", &hgp->check_graph);   
  Zoltan_Bind_Param(PHG_params, "CHECK_HYPERGRAPH", &hgp->check_graph);   
  Zoltan_Bind_Param(PHG_params, "PHG_NPROC_VERTEX", &hgp->nProc_x_req);
  Zoltan_Bind_Param(PHG_params, "PHG_NPROC_EDGE", &hgp->nProc_y_req);
  Zoltan_Bind_Param(PHG_params, "PHG_COARSENING_LIMIT", &hgp->redl);
  Zoltan_Bind_Param(PHG_params, "PHG_COARSENING_NCANDIDATE", &hgp->nCand);
  Zoltan_Bind_Param(PHG_params, "PHG_COARSENING_METHOD", hgp->redm_str);
  Zoltan_Bind_Param(PHG_params, "PHG_COARSENING_METHOD_FAST", hgp->redm_fast);
  Zoltan_Bind_Param(PHG_params, "PHG_VERTEX_VISIT_ORDER", &hgp->visit_order);
  Zoltan_Bind_Param(PHG_params, "PHG_EDGE_SCALING", &hgp->edge_scaling);
  Zoltan_Bind_Param(PHG_params, "PHG_VERTEX_SCALING", &hgp->vtx_scaling);
  Zoltan_Bind_Param(PHG_params, "PHG_REFINEMENT_METHOD", hgp->refinement_str);
  Zoltan_Bind_Param(PHG_params, "PHG_DIRECT_KWAY", &hgp->kway);
  Zoltan_Bind_Param(PHG_params, "PHG_REFINEMENT_LOOP_LIMIT", 
                                &hgp->fm_loop_limit);
  Zoltan_Bind_Param(PHG_params, "PHG_REFINEMENT_MAX_NEG_MOVE", 
                                &hgp->fm_max_neg_move);  
  Zoltan_Bind_Param(PHG_params, "PHG_REFINEMENT_QUALITY", 
                                &hgp->refinement_quality);  
  Zoltan_Bind_Param(PHG_params, "PHG_COARSEPARTITION_METHOD", 
                                 hgp->coarsepartition_str);
  Zoltan_Bind_Param(PHG_params, "PHG_USE_TIMERS",
                                 (void*) &hgp->use_timers);  
  Zoltan_Bind_Param(PHG_params, "USE_TIMERS",
                                 (void*) &hgp->use_timers);  
  Zoltan_Bind_Param(PHG_params, "PHG_EDGE_SIZE_THRESHOLD",
                                 (void*) &hgp->EdgeSizeThreshold);  
  Zoltan_Bind_Param(PHG_params, "PHG_MATCH_EDGE_SIZE_THRESHOLD",
                                 (void*) &hgp->MatchEdgeSizeThreshold);  
  Zoltan_Bind_Param(PHG_params, "PHG_BAL_TOL_ADJUSTMENT",
                                 (void*) &hgp->bal_tol_adjustment);  
  Zoltan_Bind_Param(PHG_params, "PARKWAY_SERPART",
                                 (void *) hgp->parkway_serpart);
  Zoltan_Bind_Param(PHG_params, "PHG_CUT_OBJECTIVE",
                                 (void *) &cut_objective);
  Zoltan_Bind_Param(PHG_params, "ADD_OBJ_WEIGHT",
                                 (void *) add_obj_weight);
  Zoltan_Bind_Param(PHG_params, "PHG_EDGE_WEIGHT_OPERATION",
                                 (void *) edge_weight_op);
  Zoltan_Bind_Param(PHG_params, "PHG_RANDOMIZE_INPUT",
                                 (void*) &hgp->RandomizeInitDist);  
  Zoltan_Bind_Param(PHG_params, "PHG_PROCESSOR_REDUCTION_LIMIT",
		                 (void*) &hgp->ProRedL);
  Zoltan_Bind_Param(PHG_params, "PHG_REPART_MULTIPLIER",
		                 (void*) &hgp->RepartMultiplier);
  Zoltan_Bind_Param(PHG_params, "PATOH_ALLOC_POOL0",
                                 (void*) &hgp->patoh_alloc_pool0);
  Zoltan_Bind_Param(PHG_params, "PATOH_ALLOC_POOL1",
                                 (void*) &hgp->patoh_alloc_pool1);
  
  
  /* Set default values */
  strncpy(hgp->hgraph_pkg,           "phg", MAX_PARAM_STRING_LEN);
  strncpy(hgp->convert_str,    "neighbors", MAX_PARAM_STRING_LEN);
  strncpy(hgp->redm_str,             "agg", MAX_PARAM_STRING_LEN);
  hgp->match_array_type = 0;
  strncpy(hgp->redm_fast,          "l-ipm", MAX_PARAM_STRING_LEN);
  strncpy(hgp->coarsepartition_str, "auto", MAX_PARAM_STRING_LEN);
  strncpy(hgp->refinement_str,       "fm2", MAX_PARAM_STRING_LEN);
  strncpy(hgp->parkway_serpart,    "patoh", MAX_PARAM_STRING_LEN);
  strncpy(cut_objective,    "connectivity", MAX_PARAM_STRING_LEN);
  strncpy(add_obj_weight,           "none", MAX_PARAM_STRING_LEN);
  strncpy(edge_weight_op,            "max", MAX_PARAM_STRING_LEN);
  /* LB.Approach is initialized to "REPARTITION", and set in Set_Key_Params  */
  strncpy(hgp->hgraph_method,  zz->LB.Approach, MAX_PARAM_STRING_LEN);
  if (!strcasecmp(zz->LB.Approach,"REFINE")) 
    hgp->useMultilevel = 0;
  else
    hgp->useMultilevel = 1;

  hgp->use_timers = 0;
  hgp->LocalCoarsePartition = 0;
  hgp->edge_scaling = 0;
  hgp->vtx_scaling = 0;
  hgp->vtx_scal_size = 0;
  hgp->vtx_scal = NULL;  /* Array for storing vertex degree scale vector. 
                            Should perhaps go in hg structure, not the
                            param struct? */
  hgp->connectivity_cut = 1; 
  hgp->visit_order = 0;  /* Random */
  hgp->check_graph = 0;
  hgp->bal_tol = zz->LB.Imbalance_Tol[0]; /* Make vector for multiconstraint */
  hgp->bal_tol_adjustment = 0.7;
  hgp->nCand = 100;
  hgp->redl = MAX(2*zz->LB.Num_Global_Parts, 100);
  hgp->output_level = PHG_DEBUG_NONE;
  hgp->final_output = 0;
  hgp->nProc_x_req = -1;
  hgp->nProc_y_req = -1;
  hgp->kway = 0;
  hgp->fm_loop_limit = 10;
  hgp->fm_max_neg_move = 250;  
  hgp->refinement_quality = 1;
  hgp->RandomizeInitDist = 0;
  hgp->EdgeSizeThreshold = 0.25;
  hgp->MatchEdgeSizeThreshold = 500;  
  hgp->hybrid_keep_factor = 0.;
  hgp->ProRedL = 0.0; /* UVCUVC: CHECK default set to 0 until we run more experiments */
  hgp->RepartMultiplier = 100.;
  hgp->patoh_alloc_pool0 = 0;
  hgp->patoh_alloc_pool1 = 0;
  hgp->UseFixedVtx = 0;
  hgp->UsePrefPart = 0;
  
  /* Get application values of parameters. */
  err = Zoltan_Assign_Param_Vals(zz->Params, PHG_params, zz->Debug_Level, 
          zz->Proc, zz->Debug_Proc);
  
  nProc = zz->Num_Proc;
  usePrimeComm = 0;

  /* Parse add_obj_weight parameter */

  if (!strcasecmp(add_obj_weight, "none")) {
    hgp->add_obj_weight = PHG_ADD_NO_WEIGHT;
    hgp->part_sizes = part_sizes;
  }
  else if (zz->Obj_Weight_Dim > 0) {
    /* Do not add_obj_weight until multiconstraint PHG is implemented */
    ZOLTAN_PRINT_WARN(zz->Proc, yo,
     "Both application supplied *and* ADD_OBJ_WEIGHT "
     "calculated vertex weights were provided.");
    ZOLTAN_PRINT_WARN(zz->Proc, yo,
      "Only the first application supplied weight per vertex will be used.");
    hgp->add_obj_weight = PHG_ADD_NO_WEIGHT;
    hgp->part_sizes = part_sizes;
  } 
  else {
    if (!strcasecmp(add_obj_weight, "vertices")){
      hgp->add_obj_weight = PHG_ADD_UNIT_WEIGHT;
    } else if (!strcasecmp(add_obj_weight, "unit")){
      hgp->add_obj_weight = PHG_ADD_UNIT_WEIGHT;
    } else if (!strcasecmp(add_obj_weight, "vertex degree")){
      hgp->add_obj_weight = PHG_ADD_PINS_WEIGHT;
    } else if (!strcasecmp(add_obj_weight, "nonzeros")){
      hgp->add_obj_weight = PHG_ADD_PINS_WEIGHT;
    } else if (!strcasecmp(add_obj_weight, "pins")){
      hgp->add_obj_weight = PHG_ADD_PINS_WEIGHT;
    } else{
      ZOLTAN_PRINT_ERROR(zz->Proc, yo, "Invalid ADD_OBJ_WEIGHT parameter.\n");
      err = ZOLTAN_WARN;
    }
    /* Set hgp->part_sizes to new array of part_sizes with added obj weight. */
    if (part_sizes)
      err = Zoltan_LB_Add_Part_Sizes_Weight(zz, 
                          (zz->Obj_Weight_Dim ? zz->Obj_Weight_Dim : 1), 
                          zz->Obj_Weight_Dim+1, 
                          part_sizes, &(hgp->part_sizes));
  }

  if ((zz->Obj_Weight_Dim==0) &&      /* no application supplied weights */
      (hgp->add_obj_weight==PHG_ADD_NO_WEIGHT)){ /* no calculated weight */

    hgp->add_obj_weight = PHG_ADD_UNIT_WEIGHT; /* default object weight */
  }

  if (!strcasecmp(cut_objective, "default")
      || !strcasecmp(cut_objective, "connectivity"))
      hgp->connectivity_cut = 1;
  else if (!strcasecmp(cut_objective, "hyperedges"))
      hgp->connectivity_cut = 0;
  else {
      ZOLTAN_PRINT_ERROR(zz->Proc, yo, "Invalid PHG_CUT_OBJECTIVE parameter.\n");
      goto End;
  }

  if (!strcasecmp(edge_weight_op, "max")){
    hgp->edge_weight_op = PHG_MAX_EDGE_WEIGHTS;
  } else if (!strcasecmp(edge_weight_op, "add")){
    hgp->edge_weight_op = PHG_ADD_EDGE_WEIGHTS;
  } else if (!strcasecmp(edge_weight_op, "error")){
    hgp->edge_weight_op = PHG_FLAG_ERROR_EDGE_WEIGHTS;
  } else{
    ZOLTAN_PRINT_ERROR(zz->Proc, yo,
      "Invalid PHG_EDGE_WEIGHT_OPERATION parameter.\n");
    err = ZOLTAN_WARN;
  }

  if ((strcasecmp(method, "PARTITION")) &&
      (strcasecmp(method, "REPARTITION")) &&
      (strcasecmp(method, "REFINE"))) {
    sprintf(buf,"%s is not a valid hypergraph method\n",method);
    ZOLTAN_PRINT_ERROR (zz->Proc, yo, buf);
    err = ZOLTAN_FATAL;
    goto End;
  }

  /* Adjust refinement parameters using hgp->refinement_quality */
  if (hgp->refinement_quality < 0.5/hgp->fm_loop_limit) 
    /* No refinement */
    strncpy(hgp->refinement_str,      "no",   MAX_PARAM_STRING_LEN);
  else {
    /* Scale FM parameters */
    hgp->fm_loop_limit   *= hgp->refinement_quality;
    hgp->fm_max_neg_move *= hgp->refinement_quality;
  }

  if (!strcasecmp(package, "PHG")){
    /* Test to determine whether we should change the number of processors
       used for partitioning to make more efficient 2D decomposition */

    if (hgp->nProc_x_req != 1 && hgp->nProc_y_req != 1)  /* Want 2D decomp */
      if (zz->Num_Proc > SMALL_PRIME && Zoltan_PHG_isPrime(zz->Num_Proc)) 
        /* 2D data decomposition is requested but we have a prime 
         * number of processors. */
        usePrimeComm = 1;

    if ((!strcasecmp(method, "REPARTITION"))){
        zz->LB.Remap_Flag = 0;
    }

    if ((!strcasecmp(method, "REPARTITION")) ||
        (!strcasecmp(method, "REFINE"))) {
        hgp->fm_loop_limit = 4; /* experimental evaluation showed that for
                                repartitioning/refinement small number of passes
                                is "good enough". These are all heuristics hence
                                it is possible to create a pathological cases; 
                                but in general this seems to be sufficient */
    }
    
    if (!hgp->useMultilevel) {
        /* don't do coarsening */
        strncpy(hgp->redm_str, "no", MAX_PARAM_STRING_LEN);

        /* we have modified all coarse partitioners to handle preferred part
           if user wants to choose one she can choose; otherwise default 
           partitioner
           (greedy growing) does work better than previous default partitioning
           for phg_refine ("no"). */        
        hgp->UsePrefPart = 1;

    }
    if (!strcasecmp(method, "REFINE") && hgp->useMultilevel){
        /* UVCUVC: as a heuristic we prefer local matching;
           in our experiments for IPDPS'07 and WileyChapter multilevel_refine
           didn't prove itself useful; it is too costly even with local matching
           hence it will not be be released yet (i.e. not in v3). */
        strncpy(hgp->redm_str, "l-ipm", MAX_PARAM_STRING_LEN);                
        hgp->UsePrefPart = 1;
    }    
  }
  else if (!strcasecmp(package, "PARKWAY")){
    if (hgp->nProc_x_req>1) {
      err = ZOLTAN_FATAL;
      ZOLTAN_PRINT_ERROR(zz->Proc, yo, "ParKway requires nProc_x=1 or -1.");
      goto End;
    }
    hgp->nProc_x_req = 1;
  } 
  else if (!strcasecmp(package, "PATOH")){
    if (zz->Num_Proc>1) {
      err = ZOLTAN_FATAL;
      ZOLTAN_PRINT_ERROR(zz->Proc, yo, "PaToH only works with Num_Proc=1.");
      goto End;
    }
  }

  if (!usePrimeComm)
    MPI_Comm_dup(zz->Communicator, &communicator);
  else {
    MPI_Group newgrp, zzgrp;
    nProc--;
    MPI_Comm_group(zz->Communicator, &zzgrp);
    MPI_Group_excl(zzgrp, 1, &nProc, &newgrp);
    MPI_Comm_create(zz->Communicator, newgrp, &communicator);
    MPI_Group_free(&newgrp);
    MPI_Group_free(&zzgrp);
  }

  err = Zoltan_PHG_Set_2D_Proc_Distrib(zz, communicator, zz->Proc, 
                                       nProc, hgp->nProc_x_req, 
                                       hgp->nProc_y_req, 
                                       &hgp->globalcomm);
  if (err != ZOLTAN_OK) 
    goto End;

  /* Convert strings to function pointers. */
  err = Zoltan_PHG_Set_Part_Options (zz, hgp);
  
End:
  return err;
}
示例#29
0
void SpParHelper::MemoryEfficientPSort(pair<KEY,VAL> * array, IT length, IT * dist, const MPI_Comm & comm)
{
	int nprocs, myrank;
	MPI_Comm_size(comm, &nprocs);
	MPI_Comm_rank(comm, &myrank);
	int nsize = nprocs / 2;	// new size
	if(nprocs < 1000)
	{
		bool excluded =  false;
		if(dist[myrank] == 0)	excluded = true;

		int nreals = 0; 
		for(int i=0; i< nprocs; ++i)	
			if(dist[i] != 0) ++nreals;

		if(nreals == nprocs)	// general case
		{
			long * dist_in = new long[nprocs];
                	for(int i=0; i< nprocs; ++i)    dist_in[i] = (long) dist[i];
                	vpsort::parallel_sort (array, array+length,  dist_in, comm);
                	delete [] dist_in;
		}
		else
		{
			long * dist_in = new long[nreals];
			int * dist_out = new int[nprocs-nreals];	// ranks to exclude
			int indin = 0;
			int indout = 0;
			for(int i=0; i< nprocs; ++i)	
			{
				if(dist[i] == 0)
					dist_out[indout++] = i;
				else
					dist_in[indin++] = (long) dist[i];	
			}
		
			#ifdef DEBUG	
			ostringstream outs;
			outs << "To exclude indices: ";
			copy(dist_out, dist_out+indout, ostream_iterator<int>(outs, " ")); outs << endl;
			SpParHelper::Print(outs.str());
			#endif

			MPI_Group sort_group, real_group;
			MPI_Comm_group(comm, &sort_group);
			MPI_Group_excl(sort_group, indout, dist_out, &real_group);
			MPI_Group_free(&sort_group);

			// The Create() function should be executed by all processes in comm, 
			// even if they do not belong to the new group (in that case MPI_COMM_NULL is returned as real_comm?)
			// MPI::Intracomm MPI::Intracomm::Create(const MPI::Group& group) const;
			MPI_Comm real_comm;
			MPI_Comm_create(comm, real_group, &real_comm);
			if(!excluded)
			{
				vpsort::parallel_sort (array, array+length,  dist_in, real_comm);
				MPI_Comm_free(&real_comm);
			}
			MPI_Group_free(&real_group);
			delete [] dist_in;
			delete [] dist_out;
		}
	}
	else
	{
		IT gl_median = accumulate(dist, dist+nsize, static_cast<IT>(0));	// global rank of the first element of the median processor
		sort(array, array+length);	// re-sort because we might have swapped data in previous iterations
		int color = (myrank < nsize)? 0: 1;
		
		pair<KEY,VAL> * low = array;
		pair<KEY,VAL> * upp = array;
		GlobalSelect(gl_median, low, upp, array, length, comm);
		BipartiteSwap(low, array, length, nsize, color, comm);

		if(color == 1)	dist = dist + nsize;	// adjust for the second half of processors

		// recursive call; two implicit 'spawn's where half of the processors execute different paramaters
		// MPI::Intracomm MPI::Intracomm::Split(int color, int key) const;

		MPI_Comm halfcomm;
		MPI_Comm_split(comm, color, myrank, &halfcomm);	// split into two communicators
		MemoryEfficientPSort(array, length, dist, halfcomm);
	}
}
示例#30
0
文件: main.c 项目: torgiren/szkola
int main( int argc, char *argv[] )
{
    int numprocs, myid, server, workerid, ranks[1], 
        request, i, iter, done;
    long rands[CHUNKSIZE], max, in, out, totalin, totalout;
    double x, y, Pi, error, epsilon;
    MPI_Comm world, workers;
    MPI_Group world_group, worker_group;
    MPI_Status status;

    MPI_Init( &argc, &argv );
    world  = MPI_COMM_WORLD;
    MPI_Comm_size( world, &numprocs );
    MPI_Comm_rank( world, &myid );
    server = numprocs-1;	// Last process is a random server 

/***
   * Now Master should read epsilon from command line
   * and distribute it to all processes.
   */
    if (myid == 0)  // Read epsilon from command line 
	{
        sscanf( argv[1], "%lf", &epsilon );

	};

//	MPE_Start_log();
	
//	MPE_Log_event(START_BCAST,0,"bcast epsilon");
    MPI_Bcast( &epsilon, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD );
//	MPE_Log_event(END_BCAST,0,"bcast epsilon");

/***
   * Create new process group called world_group containing all 
   * processes and its communicator called world
   * and a group called worker_group containing all processes
   * except the last one (called here server) 
   * and its communicator called workers.
   */
    MPI_Comm_group( world, &world_group );
    ranks[0] = server;
    MPI_Group_excl( world_group, 1, ranks, &worker_group );
    MPI_Comm_create( world, worker_group, &workers );
    MPI_Group_free( &worker_group );

/***
   * Server part
   *
   * Server should loop until request code is 0, in each iteration:
   * - receiving request code from any slave
   * - generating a vector of CHUNKSIZE randoms <= INT_MAX
   * - sending vector back to slave 
   */
    if (myid == server) {	// I am the random generator server

	do {
	    MPI_Recv( &request, 1, MPI_INT, MPI_ANY_SOURCE, REQUEST,
		     world, &status );
	    if (request) {
		for (i = 0; i < CHUNKSIZE; ) {
		        rands[i] = random();
			if ( rands[i] <= INT_MAX ) i++;
		}
		MPI_Send( rands, CHUNKSIZE, MPI_LONG,
                         status.MPI_SOURCE, REPLY, world );
	    }
	}
	while( request > 0 );

    }
/***
   * Workers (including Master) part
   *
   * Worker should send initial request to server.
   * Later, in a loop worker should:
   * - receive vector of randoms
   * - compute x,y point inside unit square
   * - check (and count result) if point is inside/outside 
   *   unit circle
   * - sum both counts over all workers
   * - calculate pi and its error (from "exact" value)
   * - test if error is within epsilon limit
   * - test continuation condition (error and max. points limit)
   * - print pi by master only
   * - send a request to server (all if more or master only if finish)
   * Before finishing workers should free their communicator.
   */ 
    else {			// I am a worker process

        request = 1;
	done = 0; 
	in = out = 0;
	max  = INT_MAX;         // max int, for normalization
        MPI_Send( &request, 1, MPI_INT, server, REQUEST, world );
        MPI_Comm_rank( workers, &workerid );
	iter = 0;
	while (!done) {
	    iter++;
	    request = 1;
	    MPI_Recv( rands, CHUNKSIZE, MPI_LONG, server, REPLY,
		     world, &status );
	    for (i = 0; i < CHUNKSIZE - 1; ) {
	        x = (((double) rands[i++])/max) * 2 - 1;
		y = (((double) rands[i++])/max) * 2 - 1;
		if ( x*x + y*y < 1.0 ) {
		    in++;
		}
		else
		    out++;
	    }
	    MPI_Allreduce( &in, &totalin, 1, MPI_LONG, MPI_SUM, workers );
	    MPI_Allreduce( &out, &totalout, 1, MPI_LONG, MPI_SUM, workers );
	    Pi = ( 4.0 * totalin ) / ( totalin + totalout );
	    error = fabs( Pi - PI );
	    done = ( error < epsilon || (totalin + totalout) > THROW_MAX );
	    request = (done) ? 0 : 1;
	    if (myid == 0) {
		printf( "\rpi = %23.20f", Pi );
		MPI_Send( &request, 1, MPI_INT, server, REQUEST, world );
	    }
	    else {
		if (request)
		    MPI_Send( &request, 1, MPI_INT, server, REQUEST, world );
	    }
	}
	MPI_Comm_free( &workers );
    }

/***
   * Master should print final point counts.
   */
    if (myid == 0) {
        printf( "\npoints: %ld\nin: %ld, out: %ld, <ret> to exit\n",
	       totalin+totalout, totalin, totalout );
	getchar();
    }
    MPI_Finalize();
	return 0;
}