Example #1
0
main(int argc, char* argv[]) {
    int         test_count = 2;    /* Number of tests  */
    int         max_size = 1000;   /* Max msg. length  */
    int         min_size = 1000;   /* Min msg. length  */
    int         size_incr = 1000;  /* Increment for    */
                                   /*     msg. sizes   */
    float*      x;                 /* Message buffer   */
    double*     times;             /* Elapsed times    */
    double*     max_times;         /* Max times        */
    double*     min_times;         /* Min times        */
    int         time_array_order;  /* Size of timing   */
                                   /*     arrays.      */
    double      start;             /* Start time       */
    double      elapsed;           /* Elapsed time     */
    int         i, test, size;     /* Loop variables   */
    int         p, my_rank, source, dest; 
    MPI_Comm    io_comm;
    MPI_Status  status;

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &p);
    MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
    MPI_Comm_dup(MPI_COMM_WORLD, &io_comm);
    Cache_io_rank(MPI_COMM_WORLD, io_comm);

    Initialize(max_size, min_size, size_incr, my_rank,
        &x, &times, &max_times, &min_times, 
        &time_array_order);

    source = (my_rank -  1) % p;
    dest = (my_rank + 1) % p;

    /* For each message size, find average circuit time */
    /*     Loop var size = message size                 */
    /*     Loop var i = index into arrays for timings   */
    for (size = min_size, i = 0; size <= max_size; 
            size = size + size_incr, i++) {
        times[i] =0.0;
        max_times[i] = 0.0;
        min_times[i] = 1000000.0;
        for (test = 0; test < test_count; test++) {
            start = MPI_Wtime();
            MPI_Recv(x, size, MPI_FLOAT, source, 0, 
                 MPI_COMM_WORLD, &status);
            MPI_Send(x, size, MPI_FLOAT, dest, 0, 
                 MPI_COMM_WORLD);
            elapsed = MPI_Wtime() - start;
            times[i] = times[i] + elapsed;
            if (elapsed > max_times[i])
                max_times[i] = elapsed;
            if (elapsed < min_times[i])
                min_times[i] = elapsed;
        }
    } /* for size . . . */

    Print_results(io_comm, my_rank, min_size, max_size, 
        size_incr, time_array_order, test_count, times, 
        max_times, min_times);

    MPI_Finalize();
} /* main */
Example #2
0
/* @ingroup PIO_init
 *
 * Library initialization used when IO tasks are a subset of compute
 * tasks.
 *
 * This function creates an MPI intracommunicator between a set of IO
 * tasks and one or more sets of computational tasks.
 *
 * The caller must create all comp_comm and the io_comm MPI
 * communicators before calling this function.
 *
 * @param comp_comm the MPI_Comm of the compute tasks
 *
 * @param num_iotasks the number of io tasks to use
 *
 * @param stride the offset between io tasks in the comp_comm
 *
 * @param base the comp_comm index of the first io task
 *
 * @param rearr the rearranger to use by default, this may be
 * overriden in the @ref PIO_initdecomp
 *
 * @param iosysidp index of the defined system descriptor
 *
 * @return 0 on success, otherwise a PIO error code.
 */
int PIOc_Init_Intracomm(const MPI_Comm comp_comm, const int num_iotasks,
			const int stride, const int base, const int rearr,
			int *iosysidp)
{
  iosystem_desc_t *iosys;
  int ierr = PIO_NOERR;
  int ustride;
  int lbase;
  int mpierr;

  iosys = (iosystem_desc_t *) malloc(sizeof(iosystem_desc_t));

  /* Copy the computation communicator into union_comm. */
  mpierr = MPI_Comm_dup(comp_comm, &iosys->union_comm);
  CheckMPIReturn(mpierr, __FILE__, __LINE__);
  if (mpierr)
      ierr = PIO_EIO;

  /* Copy the computation communicator into comp_comm. */
  if (!ierr)
  {
      mpierr = MPI_Comm_dup(comp_comm, &iosys->comp_comm);
      CheckMPIReturn(mpierr, __FILE__, __LINE__);
      if (mpierr)
	  ierr = PIO_EIO;
  }

  if (!ierr)
  {
      iosys->my_comm = iosys->comp_comm;
      iosys->io_comm = MPI_COMM_NULL;
      iosys->intercomm = MPI_COMM_NULL;
      iosys->error_handler = PIO_INTERNAL_ERROR;
      iosys->async_interface= false;
      iosys->compmaster = 0;
      iosys->iomaster = 0;
      iosys->ioproc = false;
      iosys->default_rearranger = rearr;
      iosys->num_iotasks = num_iotasks;

      ustride = stride;

      /* Find MPI rank and number of tasks in comp_comm communicator. */
      CheckMPIReturn(MPI_Comm_rank(iosys->comp_comm, &(iosys->comp_rank)),__FILE__,__LINE__);
      CheckMPIReturn(MPI_Comm_size(iosys->comp_comm, &(iosys->num_comptasks)),__FILE__,__LINE__);
      if(iosys->comp_rank==0)
	  iosys->compmaster = MPI_ROOT;

      /* Ensure that settings for number of computation tasks, number
       * of IO tasks, and the stride are reasonable. */
      if((iosys->num_comptasks == 1) && (num_iotasks*ustride > 1)) {
	  // This is a serial run with a bad configuration. Set up a single task.
	  fprintf(stderr, "PIO_TP PIOc_Init_Intracomm reset stride and tasks.\n");
	  iosys->num_iotasks = 1;
	  ustride = 1;
      }
      if((iosys->num_iotasks < 1) || ((iosys->num_iotasks*ustride) > iosys->num_comptasks)){
	  fprintf(stderr, "PIO_TP PIOc_Init_Intracomm error\n");
	  fprintf(stderr, "num_iotasks=%d, ustride=%d, num_comptasks=%d\n", num_iotasks, ustride, iosys->num_comptasks);
	  return PIO_EBADID;
      }

      /* Create an array that holds the ranks of the tasks to be used for IO. */
      iosys->ioranks = (int *) calloc(sizeof(int), iosys->num_iotasks);
      for(int i=0;i< iosys->num_iotasks; i++){
	  iosys->ioranks[i] = (base + i*ustride) % iosys->num_comptasks;
	  if(iosys->ioranks[i] == iosys->comp_rank)
	      iosys->ioproc = true;
      }
      iosys->ioroot = iosys->ioranks[0];

      /* Create an MPI info object. */
      CheckMPIReturn(MPI_Info_create(&(iosys->info)),__FILE__,__LINE__);
      iosys->info = MPI_INFO_NULL;

      if(iosys->comp_rank == iosys->ioranks[0])
	  iosys->iomaster = MPI_ROOT;

      /* Create a group for the computation tasks. */
      CheckMPIReturn(MPI_Comm_group(iosys->comp_comm, &(iosys->compgroup)),__FILE__,__LINE__);

      /* Create a group for the IO tasks. */
      CheckMPIReturn(MPI_Group_incl(iosys->compgroup, iosys->num_iotasks, iosys->ioranks,
				    &(iosys->iogroup)),__FILE__,__LINE__);

      /* Create an MPI communicator for the IO tasks. */
      CheckMPIReturn(MPI_Comm_create(iosys->comp_comm, iosys->iogroup, &(iosys->io_comm))
		     ,__FILE__,__LINE__);

      /* For the tasks that are doing IO, get their rank. */
      if (iosys->ioproc)
	  CheckMPIReturn(MPI_Comm_rank(iosys->io_comm, &(iosys->io_rank)),__FILE__,__LINE__);
      else
	  iosys->io_rank = -1;

      iosys->union_rank = iosys->comp_rank;

      /* Add this iosys struct to the list in the PIO library. */
      *iosysidp = pio_add_to_iosystem_list(iosys);

      pio_get_env();

      /* allocate buffer space for compute nodes */
      compute_buffer_init(*iosys);
  }

  return ierr;
}
Example #3
0
int main(int argc, char *argv[])
{

    int provided, i[2], k;
    char *buffer, *ptr_dt;
    buffer = (char *) malloc(BUFSIZE * sizeof(char));
    MPI_Status status;
    pthread_t receiver_thread, sender_thread[NUMSENDS];
    pthread_attr_t attr;
    MPI_Comm communicator;
    int bs;

    MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided);

    if (provided != MPI_THREAD_MULTIPLE) {
        printf("Error\n");
        MPI_Abort(911, MPI_COMM_WORLD);
    }

    MPI_Buffer_attach(buffer, BUFSIZE);

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_dup(MPI_COMM_WORLD, &communicator);        /* We do not use this communicator in this program, but
                                                           with this call, the problem appears more reliably.
                                                           If the MPI_Comm_dup() call is commented out, it is still
                                                           evident but does not appear that often (don't know why) */

    /* Initialize and set thread detached attribute */
    pthread_attr_init(&attr);
    pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);

    pthread_create(&receiver_thread, &attr, &receiver, NULL);
    for (k = 0; k < NUMSENDS; k++)
        pthread_create(&sender_thread[k], &attr, &sender_bsend, NULL);
    pthread_join(receiver_thread, NULL);
    for (k = 0; k < NUMSENDS; k++)
        pthread_join(sender_thread[k], NULL);
    MPI_Barrier(MPI_COMM_WORLD);

    pthread_create(&receiver_thread, &attr, &receiver, NULL);
    for (k = 0; k < NUMSENDS; k++)
        pthread_create(&sender_thread[k], &attr, &sender_ibsend, NULL);
    pthread_join(receiver_thread, NULL);
    for (k = 0; k < NUMSENDS; k++)
        pthread_join(sender_thread[k], NULL);
    MPI_Barrier(MPI_COMM_WORLD);

    pthread_create(&receiver_thread, &attr, &receiver, NULL);
    for (k = 0; k < NUMSENDS; k++)
        pthread_create(&sender_thread[k], &attr, &sender_isend, NULL);
    pthread_join(receiver_thread, NULL);
    for (k = 0; k < NUMSENDS; k++)
        pthread_join(sender_thread[k], NULL);
    MPI_Barrier(MPI_COMM_WORLD);

    pthread_create(&receiver_thread, &attr, &receiver, NULL);
    for (k = 0; k < NUMSENDS; k++)
        pthread_create(&sender_thread[k], &attr, &sender_send, NULL);
    pthread_join(receiver_thread, NULL);
    for (k = 0; k < NUMSENDS; k++)
        pthread_join(sender_thread[k], NULL);
    MPI_Barrier(MPI_COMM_WORLD);

    pthread_attr_destroy(&attr);
    if (!rank)
        printf( " No Errors\n" );

    MPI_Comm_free(&communicator);
    MPI_Buffer_detach(&ptr_dt, &bs);
    free(buffer);
    MPI_Finalize();
    return 0;
}
Example #4
0
int main( int argc, char* argv[] ) {
	MPI_Comm CommWorld;
	int rank;
	int numProcessors;
	int procToWatch;
	
	/* Initialise MPI, get world info */
	MPI_Init( &argc, &argv );
	MPI_Comm_dup( MPI_COMM_WORLD, &CommWorld );
	MPI_Comm_size( CommWorld, &numProcessors );
	MPI_Comm_rank( CommWorld, &rank );
	
	BaseFoundation_Init( &argc, &argv );
	BaseIO_Init( &argc, &argv );
	
	if( argc >= 2 ) {
		procToWatch = atoi( argv[1] );
	}
	else {
		procToWatch = 0;
	}
	if( rank == procToWatch )
	{
		Stream* myInfo;
		Stream* myDebug;		
		Stream* myDump;
		Stream* myError;
		
		Stream* allNew;

		myInfo = Journal_Register( Info_Type, "MyInfo" );
		myDebug = Journal_Register( Debug_Type, "MyDebug" );
		myDump = Journal_Register( Dump_Type, "MyDump" );
		myError = Journal_Register( Error_Type, "MyError" );
		
		allNew = Journal_Register( "My own stream", "allNew" );
		
		printf( "TEST: \"HELLO\" should appear\n" );
		Journal_Printf( myInfo, "%s\n", "HELLO" );
		printf( "TEST: \"WORLD\" should NOT appear\n" );
		Journal_Printf( myDebug, "%s\n", "HELLO" );
		printf( "TEST: \"HELLO\" should NOT appear\n" );
		Journal_Printf( myDump, "%s\n", "HELLO" );
		printf( "TEST: \"WORLD\" should NOT appear\n" );
		Journal_Printf( myError, "%s\n", "HELLO" );

		printf( "Turning off myInfo\n" );
		Journal_Enable_NamedStream( Info_Type, "MyInfo" , False );
		
		printf( "TEST: \"HELLO\" should NOT appear\n" );
		Journal_Printf( myInfo, "%s\n", "HELLO" );

		printf( "Turning on Dump\n"  );
		Journal_Enable_TypedStream( Dump_Type, True );
		Journal_Enable_NamedStream( Dump_Type, "MyDump", True );
		
		printf( "TEST: \"HELLO\" should appear\n" );
		Journal_Printf( myDump, "%s\n", "HELLO" );
		
		printf( "Turning off Journal\n" );
		stJournal->enable = False;

		printf( "TEST: \"HELLO\" should NOT appear\n" );		
		Journal_Printf( myDump, "%s\n", "HELLO" );
		
		stJournal->enable = True;
		
		Journal_Enable_NamedStream( Info_Type, "MyInfo", True );
		
		printf( "TEST: DPrintf\n" );
		Journal_DPrintf( myInfo, "DPrintf\n" );	
	}

	Memory_Print();
	
	BaseIO_Finalise();

	BaseFoundation_Finalise();

	/* Close off MPI */
	MPI_Finalize();
	
	return EXIT_SUCCESS;
}
Example #5
0
int main(int narg, char* arg[])
{
  sint transpose=0;
  sint id=0,np=1;
  sint i,handle,maxv=3;
  real *u;
  slong *glindex;
#ifndef MPI
  int comm;
#else
  MPI_Comm comm;
  MPI_Init(&narg,&arg);
  MPI_Comm_dup(MPI_COMM_WORLD,&comm);
  { int i;
    MPI_Comm_rank(comm,&i); id=i;
    MPI_Comm_size(comm,&i); np=i;
  }
#endif

  glindex = malloc(np*2*sizeof(slong));
  for(i=0;i<np;++i) glindex[2*i+1] = glindex[2*i] = i+1;
  i=np*2;
  fgs_setup(&handle,glindex,&i,&comm,&np);
  free(glindex);
  
  u = malloc(np*2*sizeof(real));
  for(i=0;i<np;++i) u[2*i  ] = (real)( 2*np*id + 2*i ),
                    u[2*i+1] = (real)( 2*np*id + 2*i+1 );
  /*for(i=0;i<np;++i) printf(" (%g %g)", u[2*i], u[2*i+1]); printf("\n");*/
  i=1, fgs_op(&handle,u,&datatype,&i,&transpose);
  /*for(i=0;i<np;++i) printf(" (%g %g)", u[2*i], u[2*i+1]); printf("\n");*/
  for(i=0;i<np;++i) assert_is_zero( np*(2*np*(np-1)+4*i+1) - u[2*i] ),
                    assert_is_zero( np*(2*np*(np-1)+4*i+1) - u[2*i+1]  );
  free(u);

  u = malloc(np*2*3*sizeof(real));
  for(i=0;i<np;++i)
    u[3*(2*i  )+0] = (real)( 3*(2*np*id + 2*i  ) + 0 ),
    u[3*(2*i  )+1] = (real)( 3*(2*np*id + 2*i  ) + 1 ),
    u[3*(2*i  )+2] = (real)( 3*(2*np*id + 2*i  ) + 2 ),
    u[3*(2*i+1)+0] = (real)( 3*(2*np*id + 2*i+1) + 0 ),
    u[3*(2*i+1)+1] = (real)( 3*(2*np*id + 2*i+1) + 1 ),
    u[3*(2*i+1)+2] = (real)( 3*(2*np*id + 2*i+1) + 2 );
  /*for(i=0;i<np;++i) {
    int j;
    printf("%d: ( ", id);
    for(j=3*(2*i);j<=3*(2*i+1)+2;++j) printf("%g ",u[j]);
    printf(")\n");
  }*/
  i=1, maxv=3, fgs_op_vec(&handle,u,&maxv,&datatype,&i,&transpose);
  /*for(i=0;i<np;++i) {
    int j;
    printf("%d: ( ", id);
    for(j=3*(2*i);j<=3*(2*i+1)+2;++j) printf("%g ",u[j]);
    printf(")\n");
  }*/
  for(i=0;i<np;++i)
    assert_is_zero( np*(6*np*(np-1)+12*i+3+2*0) - u[3*(2*i  )+0] ),
    assert_is_zero( np*(6*np*(np-1)+12*i+3+2*1) - u[3*(2*i  )+1] ),
    assert_is_zero( np*(6*np*(np-1)+12*i+3+2*2) - u[3*(2*i  )+2] ),
    assert_is_zero( np*(6*np*(np-1)+12*i+3+2*0) - u[3*(2*i+1)+0] ),
    assert_is_zero( np*(6*np*(np-1)+12*i+3+2*1) - u[3*(2*i+1)+1] ),
    assert_is_zero( np*(6*np*(np-1)+12*i+3+2*2) - u[3*(2*i+1)+2] );
  free(u);

  u = malloc(np*2*3*sizeof(real));
  for(i=0;i<np;++i)
    u[2*np*0+(2*i  )] = (real)( 3*(2*np*id + 2*i  ) + 0 ),
    u[2*np*1+(2*i  )] = (real)( 3*(2*np*id + 2*i  ) + 1 ),
    u[2*np*2+(2*i  )] = (real)( 3*(2*np*id + 2*i  ) + 2 ),
    u[2*np*0+(2*i+1)] = (real)( 3*(2*np*id + 2*i+1) + 0 ),
    u[2*np*1+(2*i+1)] = (real)( 3*(2*np*id + 2*i+1) + 1 ),
    u[2*np*2+(2*i+1)] = (real)( 3*(2*np*id + 2*i+1) + 2 );
  i=1, maxv=3, fgs_op_many(&handle,u,u+2*np,u+4*np,0,0,0,&maxv,
                           &datatype,&i,&transpose);
  for(i=0;i<np;++i)
    assert_is_zero( np*(6*np*(np-1)+12*i+3+2*0) - u[2*np*0+(2*i  )] ),
    assert_is_zero( np*(6*np*(np-1)+12*i+3+2*1) - u[2*np*1+(2*i  )] ),
    assert_is_zero( np*(6*np*(np-1)+12*i+3+2*2) - u[2*np*2+(2*i  )] ),
    assert_is_zero( np*(6*np*(np-1)+12*i+3+2*0) - u[2*np*0+(2*i+1)] ),
    assert_is_zero( np*(6*np*(np-1)+12*i+3+2*1) - u[2*np*1+(2*i+1)] ),
    assert_is_zero( np*(6*np*(np-1)+12*i+3+2*2) - u[2*np*2+(2*i+1)] );
  free(u);
  
  fgs_free(&handle);
  printf("test on node %d/%d succeeded\n", (int)id+1, (int)np);
#ifdef MPI  
  MPI_Comm_free(&comm);
  MPI_Finalize();
#endif
  return 0;
}
Example #6
0
/*@C
   PetscFinalize - Checks for options to be called at the conclusion
   of the program. MPI_Finalize() is called only if the user had not
   called MPI_Init() before calling PetscInitialize().

   Collective on PETSC_COMM_WORLD

   Options Database Keys:
+  -options_table - Calls PetscOptionsView()
.  -options_left - Prints unused options that remain in the database
.  -objects_dump [all] - Prints list of objects allocated by the user that have not been freed, the option all cause all outstanding objects to be listed
.  -mpidump - Calls PetscMPIDump()
.  -malloc_dump - Calls PetscMallocDump()
.  -malloc_info - Prints total memory usage
-  -malloc_log - Prints summary of memory usage

   Level: beginner

   Note:
   See PetscInitialize() for more general runtime options.

.seealso: PetscInitialize(), PetscOptionsView(), PetscMallocDump(), PetscMPIDump(), PetscEnd()
@*/
PetscErrorCode  PetscFinalize(void)
{
  PetscErrorCode ierr;
  PetscMPIInt    rank;
  PetscInt       nopt;
  PetscBool      flg1 = PETSC_FALSE,flg2 = PETSC_FALSE,flg3 = PETSC_FALSE;
#if defined(PETSC_HAVE_AMS)
  PetscBool      flg = PETSC_FALSE;
#endif
#if defined(PETSC_USE_LOG)
  char           mname[PETSC_MAX_PATH_LEN];
#endif

  PetscFunctionBegin;
  if (!PetscInitializeCalled) {
    printf("PetscInitialize() must be called before PetscFinalize()\n");
    PetscFunctionReturn(PETSC_ERR_ARG_WRONGSTATE);
  }
  ierr = PetscInfo(NULL,"PetscFinalize() called\n");CHKERRQ(ierr);

#if defined(PETSC_SERIALIZE_FUNCTIONS)
  ierr = PetscFPTDestroy();CHKERRQ(ierr);
#endif


#if defined(PETSC_HAVE_AMS)
  ierr = PetscOptionsGetBool(NULL,"-options_gui",&flg,NULL);CHKERRQ(ierr);
  if (flg) {
    ierr = PetscOptionsAMSDestroy();CHKERRQ(ierr);
  }
#endif

#if defined(PETSC_HAVE_SERVER)
  flg1 = PETSC_FALSE;
  ierr = PetscOptionsGetBool(NULL,"-server",&flg1,NULL);CHKERRQ(ierr);
  if (flg1) {
    /*  this is a crude hack, but better than nothing */
    ierr = PetscPOpen(PETSC_COMM_WORLD,NULL,"pkill -9 petscwebserver","r",NULL);CHKERRQ(ierr);
  }
#endif

  ierr = PetscHMPIFinalize();CHKERRQ(ierr);

  ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);
  ierr = PetscOptionsGetBool(NULL,"-malloc_info",&flg2,NULL);CHKERRQ(ierr);
  if (!flg2) {
    flg2 = PETSC_FALSE;
    ierr = PetscOptionsGetBool(NULL,"-memory_info",&flg2,NULL);CHKERRQ(ierr);
  }
  if (flg2) {
    ierr = PetscMemoryShowUsage(PETSC_VIEWER_STDOUT_WORLD,"Summary of Memory Usage in PETSc\n");CHKERRQ(ierr);
  }

#if defined(PETSC_USE_LOG)
  flg1 = PETSC_FALSE;
  ierr = PetscOptionsGetBool(NULL,"-get_total_flops",&flg1,NULL);CHKERRQ(ierr);
  if (flg1) {
    PetscLogDouble flops = 0;
    ierr = MPI_Reduce(&petsc_TotalFlops,&flops,1,MPI_DOUBLE,MPI_SUM,0,PETSC_COMM_WORLD);CHKERRQ(ierr);
    ierr = PetscPrintf(PETSC_COMM_WORLD,"Total flops over all processors %g\n",flops);CHKERRQ(ierr);
  }
#endif


#if defined(PETSC_USE_LOG)
#if defined(PETSC_HAVE_MPE)
  mname[0] = 0;

  ierr = PetscOptionsGetString(NULL,"-log_mpe",mname,PETSC_MAX_PATH_LEN,&flg1);CHKERRQ(ierr);
  if (flg1) {
    if (mname[0]) {ierr = PetscLogMPEDump(mname);CHKERRQ(ierr);}
    else          {ierr = PetscLogMPEDump(0);CHKERRQ(ierr);}
  }
#endif
  mname[0] = 0;

  ierr = PetscOptionsGetString(NULL,"-log_summary",mname,PETSC_MAX_PATH_LEN,&flg1);CHKERRQ(ierr);
  if (flg1) {
    PetscViewer viewer;
    if (mname[0]) {
      ierr = PetscViewerASCIIOpen(PETSC_COMM_WORLD,mname,&viewer);CHKERRQ(ierr);
      ierr = PetscLogView(viewer);CHKERRQ(ierr);
      ierr = PetscViewerDestroy(&viewer);CHKERRQ(ierr);
    } else {
      viewer = PETSC_VIEWER_STDOUT_WORLD;
      ierr   = PetscLogView(viewer);CHKERRQ(ierr);
    }
  }

  mname[0] = 0;

  ierr = PetscOptionsGetString(NULL,"-log_summary_python",mname,PETSC_MAX_PATH_LEN,&flg1);CHKERRQ(ierr);
  if (flg1) {
    PetscViewer viewer;
    if (mname[0]) {
      ierr = PetscViewerASCIIOpen(PETSC_COMM_WORLD,mname,&viewer);CHKERRQ(ierr);
      ierr = PetscLogViewPython(viewer);CHKERRQ(ierr);
      ierr = PetscViewerDestroy(&viewer);CHKERRQ(ierr);
    } else {
      viewer = PETSC_VIEWER_STDOUT_WORLD;
      ierr   = PetscLogViewPython(viewer);CHKERRQ(ierr);
    }
  }

  ierr = PetscOptionsGetString(NULL,"-log_detailed",mname,PETSC_MAX_PATH_LEN,&flg1);CHKERRQ(ierr);
  if (flg1) {
    if (mname[0])  {ierr = PetscLogPrintDetailed(PETSC_COMM_WORLD,mname);CHKERRQ(ierr);}
    else           {ierr = PetscLogPrintDetailed(PETSC_COMM_WORLD,0);CHKERRQ(ierr);}
  }

  mname[0] = 0;

  ierr = PetscOptionsGetString(NULL,"-log_all",mname,PETSC_MAX_PATH_LEN,&flg1);CHKERRQ(ierr);
  ierr = PetscOptionsGetString(NULL,"-log",mname,PETSC_MAX_PATH_LEN,&flg2);CHKERRQ(ierr);
  if (flg1 || flg2) {
    if (mname[0]) PetscLogDump(mname);
    else          PetscLogDump(0);
  }
#endif

  /*
     Free all objects registered with PetscObjectRegisterDestroy() such as PETSC_VIEWER_XXX_().
  */
  ierr = PetscObjectRegisterDestroyAll();CHKERRQ(ierr);

  ierr = PetscStackDestroy();CHKERRQ(ierr);

  flg1 = PETSC_FALSE;
  ierr = PetscOptionsGetBool(NULL,"-no_signal_handler",&flg1,NULL);CHKERRQ(ierr);
  if (!flg1) { ierr = PetscPopSignalHandler();CHKERRQ(ierr);}
  flg1 = PETSC_FALSE;
  ierr = PetscOptionsGetBool(NULL,"-mpidump",&flg1,NULL);CHKERRQ(ierr);
  if (flg1) {
    ierr = PetscMPIDump(stdout);CHKERRQ(ierr);
  }
  flg1 = PETSC_FALSE;
  flg2 = PETSC_FALSE;
  /* preemptive call to avoid listing this option in options table as unused */
  ierr = PetscOptionsHasName(NULL,"-malloc_dump",&flg1);CHKERRQ(ierr);
  ierr = PetscOptionsHasName(NULL,"-objects_dump",&flg1);CHKERRQ(ierr);
  ierr = PetscOptionsGetBool(NULL,"-options_table",&flg2,NULL);CHKERRQ(ierr);

  if (flg2) {
    PetscViewer viewer;
    ierr = PetscViewerASCIIGetStdout(PETSC_COMM_WORLD,&viewer);CHKERRQ(ierr);
    ierr = PetscOptionsView(viewer);CHKERRQ(ierr);
    ierr = PetscViewerDestroy(&viewer);CHKERRQ(ierr);
  }

  /* to prevent PETSc -options_left from warning */
  ierr = PetscOptionsHasName(NULL,"-nox",&flg1);CHKERRQ(ierr);
  ierr = PetscOptionsHasName(NULL,"-nox_warning",&flg1);CHKERRQ(ierr);

  if (!PetscHMPIWorker) { /* worker processes skip this because they do not usually process options */
    flg3 = PETSC_FALSE; /* default value is required */
    ierr = PetscOptionsGetBool(NULL,"-options_left",&flg3,&flg1);CHKERRQ(ierr);
    ierr = PetscOptionsAllUsed(&nopt);CHKERRQ(ierr);
    if (flg3) {
      if (!flg2) { /* have not yet printed the options */
        PetscViewer viewer;
        ierr = PetscViewerASCIIGetStdout(PETSC_COMM_WORLD,&viewer);CHKERRQ(ierr);
        ierr = PetscOptionsView(viewer);CHKERRQ(ierr);
        ierr = PetscViewerDestroy(&viewer);CHKERRQ(ierr);
      }
      if (!nopt) {
        ierr = PetscPrintf(PETSC_COMM_WORLD,"There are no unused options.\n");CHKERRQ(ierr);
      } else if (nopt == 1) {
        ierr = PetscPrintf(PETSC_COMM_WORLD,"There is one unused database option. It is:\n");CHKERRQ(ierr);
      } else {
        ierr = PetscPrintf(PETSC_COMM_WORLD,"There are %D unused database options. They are:\n",nopt);CHKERRQ(ierr);
      }
    }
#if defined(PETSC_USE_DEBUG)
    if (nopt && !flg3 && !flg1) {
      ierr = PetscPrintf(PETSC_COMM_WORLD,"WARNING! There are options you set that were not used!\n");CHKERRQ(ierr);
      ierr = PetscPrintf(PETSC_COMM_WORLD,"WARNING! could be spelling mistake, etc!\n");CHKERRQ(ierr);
      ierr = PetscOptionsLeft();CHKERRQ(ierr);
    } else if (nopt && flg3) {
#else
    if (nopt && flg3) {
#endif
      ierr = PetscOptionsLeft();CHKERRQ(ierr);
    }
  }

  {
    PetscThreadComm tcomm_world;
    ierr = PetscGetThreadCommWorld(&tcomm_world);CHKERRQ(ierr);
    /* Free global thread communicator */
    ierr = PetscThreadCommDestroy(&tcomm_world);CHKERRQ(ierr);
  }

  /*
       List all objects the user may have forgot to free
  */
  ierr = PetscOptionsHasName(NULL,"-objects_dump",&flg1);CHKERRQ(ierr);
  if (flg1) {
    MPI_Comm local_comm;
    char     string[64];

    ierr = PetscOptionsGetString(NULL,"-objects_dump",string,64,NULL);CHKERRQ(ierr);
    ierr = MPI_Comm_dup(MPI_COMM_WORLD,&local_comm);CHKERRQ(ierr);
    ierr = PetscSequentialPhaseBegin_Private(local_comm,1);CHKERRQ(ierr);
    ierr = PetscObjectsDump(stdout,(string[0] == 'a') ? PETSC_TRUE : PETSC_FALSE);CHKERRQ(ierr);
    ierr = PetscSequentialPhaseEnd_Private(local_comm,1);CHKERRQ(ierr);
    ierr = MPI_Comm_free(&local_comm);CHKERRQ(ierr);
  }
  PetscObjectsCounts    = 0;
  PetscObjectsMaxCounts = 0;

  ierr = PetscFree(PetscObjects);CHKERRQ(ierr);

#if defined(PETSC_USE_LOG)
  ierr = PetscLogDestroy();CHKERRQ(ierr);
#endif

  /*
     Destroy any packages that registered a finalize
  */
  ierr = PetscRegisterFinalizeAll();CHKERRQ(ierr);

  /*
     Destroy all the function registration lists created
  */
  ierr = PetscFinalize_DynamicLibraries();CHKERRQ(ierr);

  /*
     Print PetscFunctionLists that have not been properly freed

  ierr = PetscFunctionListPrintAll();CHKERRQ(ierr);
  */

  if (petsc_history) {
    ierr = PetscCloseHistoryFile(&petsc_history);CHKERRQ(ierr);
    petsc_history = 0;
  }

  ierr = PetscInfoAllow(PETSC_FALSE,NULL);CHKERRQ(ierr);

  {
    char fname[PETSC_MAX_PATH_LEN];
    FILE *fd;
    int  err;

    fname[0] = 0;

    ierr = PetscOptionsGetString(NULL,"-malloc_dump",fname,250,&flg1);CHKERRQ(ierr);
    flg2 = PETSC_FALSE;
    ierr = PetscOptionsGetBool(NULL,"-malloc_test",&flg2,NULL);CHKERRQ(ierr);
#if defined(PETSC_USE_DEBUG)
    if (PETSC_RUNNING_ON_VALGRIND) flg2 = PETSC_FALSE;
#else
    flg2 = PETSC_FALSE;         /* Skip reporting for optimized builds regardless of -malloc_test */
#endif
    if (flg1 && fname[0]) {
      char sname[PETSC_MAX_PATH_LEN];

      sprintf(sname,"%s_%d",fname,rank);
      fd   = fopen(sname,"w"); if (!fd) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_FILE_OPEN,"Cannot open log file: %s",sname);
      ierr = PetscMallocDump(fd);CHKERRQ(ierr);
      err  = fclose(fd);
      if (err) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SYS,"fclose() failed on file");
    } else if (flg1 || flg2) {
      MPI_Comm local_comm;

      ierr = MPI_Comm_dup(MPI_COMM_WORLD,&local_comm);CHKERRQ(ierr);
      ierr = PetscSequentialPhaseBegin_Private(local_comm,1);CHKERRQ(ierr);
      ierr = PetscMallocDump(stdout);CHKERRQ(ierr);
      ierr = PetscSequentialPhaseEnd_Private(local_comm,1);CHKERRQ(ierr);
      ierr = MPI_Comm_free(&local_comm);CHKERRQ(ierr);
    }
  }

  {
    char fname[PETSC_MAX_PATH_LEN];
    FILE *fd = NULL;

    fname[0] = 0;

    ierr = PetscOptionsGetString(NULL,"-malloc_log",fname,250,&flg1);CHKERRQ(ierr);
    ierr = PetscOptionsHasName(NULL,"-malloc_log_threshold",&flg2);CHKERRQ(ierr);
    if (flg1 && fname[0]) {
      int err;

      if (!rank) {
        fd = fopen(fname,"w");
        if (!fd) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_FILE_OPEN,"Cannot open log file: %s",fname);
      }
      ierr = PetscMallocDumpLog(fd);CHKERRQ(ierr);
      if (fd) {
        err = fclose(fd);
        if (err) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SYS,"fclose() failed on file");
      }
    } else if (flg1 || flg2) {
      ierr = PetscMallocDumpLog(stdout);CHKERRQ(ierr);
    }
  }
  /* Can be destroyed only after all the options are used */
  ierr = PetscOptionsDestroy();CHKERRQ(ierr);

  PetscGlobalArgc = 0;
  PetscGlobalArgs = 0;

#if defined(PETSC_USE_REAL___FLOAT128)
  ierr = MPI_Type_free(&MPIU___FLOAT128);CHKERRQ(ierr);
#if defined(PETSC_HAVE_COMPLEX)
  ierr = MPI_Type_free(&MPIU___COMPLEX128);CHKERRQ(ierr);
#endif
  ierr = MPI_Op_free(&MPIU_MAX);CHKERRQ(ierr);
  ierr = MPI_Op_free(&MPIU_MIN);CHKERRQ(ierr);
#endif

#if defined(PETSC_HAVE_COMPLEX)
#if !defined(PETSC_HAVE_MPI_C_DOUBLE_COMPLEX)
  ierr = MPI_Type_free(&MPIU_C_DOUBLE_COMPLEX);CHKERRQ(ierr);
  ierr = MPI_Type_free(&MPIU_C_COMPLEX);CHKERRQ(ierr);
#endif
#endif

#if (defined(PETSC_HAVE_COMPLEX) && !defined(PETSC_HAVE_MPI_C_DOUBLE_COMPLEX)) || defined(PETSC_USE_REAL___FLOAT128)
  ierr = MPI_Op_free(&MPIU_SUM);CHKERRQ(ierr);
#endif

  ierr = MPI_Type_free(&MPIU_2SCALAR);CHKERRQ(ierr);
#if defined(PETSC_USE_64BIT_INDICES) || !defined(MPI_2INT)
  ierr = MPI_Type_free(&MPIU_2INT);CHKERRQ(ierr);
#endif
  ierr = MPI_Op_free(&PetscMaxSum_Op);CHKERRQ(ierr);
  ierr = MPI_Op_free(&PetscADMax_Op);CHKERRQ(ierr);
  ierr = MPI_Op_free(&PetscADMin_Op);CHKERRQ(ierr);

  /*
     Destroy any known inner MPI_Comm's and attributes pointing to them
     Note this will not destroy any new communicators the user has created.

     If all PETSc objects were not destroyed those left over objects will have hanging references to
     the MPI_Comms that were freed; but that is ok because those PETSc objects will never be used again
 */
  {
    PetscCommCounter *counter;
    PetscMPIInt      flg;
    MPI_Comm         icomm;
    union {MPI_Comm comm; void *ptr;} ucomm;
    ierr = MPI_Attr_get(PETSC_COMM_SELF,Petsc_InnerComm_keyval,&ucomm,&flg);CHKERRQ(ierr);
    if (flg) {
      icomm = ucomm.comm;
      ierr = MPI_Attr_get(icomm,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr);
      if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");

      ierr = MPI_Attr_delete(PETSC_COMM_SELF,Petsc_InnerComm_keyval);CHKERRQ(ierr);
      ierr = MPI_Attr_delete(icomm,Petsc_Counter_keyval);CHKERRQ(ierr);
      ierr = MPI_Comm_free(&icomm);CHKERRQ(ierr);
    }
    ierr = MPI_Attr_get(PETSC_COMM_WORLD,Petsc_InnerComm_keyval,&ucomm,&flg);CHKERRQ(ierr);
    if (flg) {
      icomm = ucomm.comm;
      ierr = MPI_Attr_get(icomm,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr);
      if (!flg) SETERRQ(PETSC_COMM_WORLD,PETSC_ERR_ARG_CORRUPT,"Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");

      ierr = MPI_Attr_delete(PETSC_COMM_WORLD,Petsc_InnerComm_keyval);CHKERRQ(ierr);
      ierr = MPI_Attr_delete(icomm,Petsc_Counter_keyval);CHKERRQ(ierr);
      ierr = MPI_Comm_free(&icomm);CHKERRQ(ierr);
    }
  }

  ierr = MPI_Keyval_free(&Petsc_Counter_keyval);CHKERRQ(ierr);
  ierr = MPI_Keyval_free(&Petsc_InnerComm_keyval);CHKERRQ(ierr);
  ierr = MPI_Keyval_free(&Petsc_OuterComm_keyval);CHKERRQ(ierr);

#if defined(PETSC_HAVE_CUDA)
  {
    PetscInt p;
    for (p = 0; p < PetscGlobalSize; ++p) {
      if (p == PetscGlobalRank) cublasShutdown();
      ierr = MPI_Barrier(PETSC_COMM_WORLD);CHKERRQ(ierr);
    }
  }
#endif

  if (PetscBeganMPI) {
#if defined(PETSC_HAVE_MPI_FINALIZED)
    PetscMPIInt flag;
    ierr = MPI_Finalized(&flag);CHKERRQ(ierr);
    if (flag) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI_Finalize() has already been called, even though MPI_Init() was called by PetscInitialize()");
#endif
    ierr = MPI_Finalize();CHKERRQ(ierr);
  }
/*

     Note: In certain cases PETSC_COMM_WORLD is never MPI_Comm_free()ed because
   the communicator has some outstanding requests on it. Specifically if the
   flag PETSC_HAVE_BROKEN_REQUEST_FREE is set (for IBM MPI implementation). See
   src/vec/utils/vpscat.c. Due to this the memory allocated in PetscCommDuplicate()
   is never freed as it should be. Thus one may obtain messages of the form
   [ 1] 8 bytes PetscCommDuplicate() line 645 in src/sys/mpiu.c indicating the
   memory was not freed.

*/
  ierr = PetscMallocClear();CHKERRQ(ierr);

  PetscInitializeCalled = PETSC_FALSE;
  PetscFinalizeCalled   = PETSC_TRUE;
  PetscFunctionReturn(ierr);
}

#if defined(PETSC_MISSING_LAPACK_lsame_)
PETSC_EXTERN int lsame_(char *a,char *b)
{
  if (*a == *b) return 1;
  if (*a + 32 == *b) return 1;
  if (*a - 32 == *b) return 1;
  return 0;
}
#endif

#if defined(PETSC_MISSING_LAPACK_lsame)
PETSC_EXTERN int lsame(char *a,char *b)
{
  if (*a == *b) return 1;
  if (*a + 32 == *b) return 1;
  if (*a - 32 == *b) return 1;
  return 0;
}
Example #7
0
int main( int argc, char **argv )
{
  int size, rank, key, his_key, lrank, result;
  MPI_Comm myComm;
  MPI_Comm myFirstComm;
  MPI_Comm mySecondComm;
  int errors = 0, sum_errors;
  MPI_Status status;
  
  /* Initialization */
  MPI_Init ( &argc, &argv );
  MPI_Comm_rank ( MPI_COMM_WORLD, &rank);
  if (verbose) printf("[%d] MPI_Init complete!\n",rank);fflush(stdout);
  MPI_Comm_size ( MPI_COMM_WORLD, &size);

  /* Only works for 2 or more processes */
  if (size >= 2) {
    MPI_Comm merge1, merge2, merge3, merge4;

    /* Generate membership key in the range [0,1] */
    key = rank % 2;
    
    MPI_Comm_split ( MPI_COMM_WORLD, key, rank, &myComm );
    /* This creates an intercomm that is the size of comm world 
       but has processes grouped by even and odd */
    MPI_Intercomm_create (myComm, 0, MPI_COMM_WORLD, (key+1)%2, 1, 
                          &myFirstComm );
    /* Dup an intercomm */
    MPI_Comm_dup ( myFirstComm, &mySecondComm );
    MPI_Comm_rank( mySecondComm, &lrank );
    his_key = -1;

	if (verbose) printf("[%d] Communicators created!\n",rank);fflush(stdout);
    
    /* Leaders communicate with each other */
    if (lrank == 0) {
      MPI_Sendrecv (&key,     1, MPI_INT, 0, 0,
                    &his_key, 1, MPI_INT, 0, 0, mySecondComm, &status);
      if (key != (his_key+1)%2) {
	  printf( "Received %d but expected %d\n", his_key, (his_key+1)%2 );
	  errors++;
      }
    }
    if (verbose) printf("[%d] MPI_Sendrecv completed!\n",rank);fflush(stdout);
    if (errors)
      printf("[%d] Failed!\n",rank);

	
    if (verbose) printf( "About to merge intercommunicators\n" );fflush(stdout);
    MPI_Intercomm_merge ( mySecondComm, key, &merge1 );
	if (verbose) printf( "merge1 done\n" );fflush(stdout);
    MPI_Intercomm_merge ( mySecondComm, (key+1)%2, &merge2 );
	if (verbose) printf( "merge2 done\n" );fflush(stdout);
    MPI_Intercomm_merge ( mySecondComm, 0, &merge3 );
	if (verbose) printf( "merge3 done\n" );fflush(stdout);
    MPI_Intercomm_merge ( mySecondComm, 1, &merge4 );
	if (verbose) printf( "merge4 done\n" );fflush(stdout);

	if (verbose) printf("[%d] MPI_Intercomm_merge completed!\n",rank);fflush(stdout);

    /* We should check that these are correct!  An easy test is that
       the merged comms are all MPI_SIMILAR (unless 2 processes used, 
       in which case MPI_CONGRUENT is ok */
    MPI_Comm_compare( merge1, MPI_COMM_WORLD, &result );
    if ((size > 2 && result != MPI_SIMILAR) ||
	(size == 2 && result != MPI_CONGRUENT)) {
	errors ++;
	printf( "merge1 is not the same size as comm world\n" );
    }
    /* merge 2 isn't ordered the same way as the others, even for 2 processes */
    MPI_Comm_compare( merge2, MPI_COMM_WORLD, &result );
    if (result != MPI_SIMILAR) {
	errors ++;
	printf( "merge2 is not the same size as comm world\n" );
    }
    MPI_Comm_compare( merge3, MPI_COMM_WORLD, &result );
    if ((size > 2 && result != MPI_SIMILAR) ||
	(size == 2 && result != MPI_CONGRUENT)) {
	errors ++;
	printf( "merge3 is not the same size as comm world\n" );
    }
    MPI_Comm_compare( merge4, MPI_COMM_WORLD, &result );
    if ((size > 2 && result != MPI_SIMILAR) ||
	(size == 2 && result != MPI_CONGRUENT)) {
	errors ++;
	printf( "merge4 is not the same size as comm world\n" );
    }

	if (verbose) printf("[%d] MPI_Comm_compare completed!\n",rank);fflush(stdout);

    /* Free communicators */
    if (verbose) printf( "About to free communicators\n" );
    MPI_Comm_free( &myComm );
    MPI_Comm_free( &myFirstComm );
    MPI_Comm_free( &mySecondComm );
    MPI_Comm_free( &merge1 );
    MPI_Comm_free( &merge2 );
    MPI_Comm_free( &merge3 );
    MPI_Comm_free( &merge4 );

	if (verbose) printf("[%d] MPI_Comm_free completed!\n",rank);fflush(stdout);

  }
  else {
      errors ++;
      printf("[%d] Failed - at least 2 nodes must be used\n",rank);
  }

  MPI_Barrier( MPI_COMM_WORLD );
  MPI_Allreduce( &errors, &sum_errors, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
  if (sum_errors > 0) {
      printf( "%d errors on process %d\n", errors, rank );
      }
  else if (rank == 0) {
      printf( " No Errors\n" );
      }
  /* Finalize and end! */

  MPI_Finalize();
  return 0;
}
Example #8
0
/* 
 * Return an intercomm; set isLeftGroup to 1 if the calling process is 
 * a member of the "left" group.
 */
int MTestGetIntercomm( MPI_Comm *comm, int *isLeftGroup, int min_size )
{
    int size, rank, remsize, merr;
    int done=0;
    MPI_Comm mcomm  = MPI_COMM_NULL;
    MPI_Comm mcomm2 = MPI_COMM_NULL;
    int rleader;

    /* The while loop allows us to skip communicators that are too small.
       MPI_COMM_NULL is always considered large enough.  The size is
       the sum of the sizes of the local and remote groups */
    while (!done) {
        *comm = MPI_COMM_NULL;
        *isLeftGroup = 0;
        interCommName = "MPI_COMM_NULL";

	switch (interCommIdx) {
	case 0:
	    /* Split comm world in half */
	    merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
	    if (merr) MTestPrintError( merr );
	    merr = MPI_Comm_size( MPI_COMM_WORLD, &size );
	    if (merr) MTestPrintError( merr );
	    if (size > 1) {
		merr = MPI_Comm_split( MPI_COMM_WORLD, (rank < size/2), rank, 
				       &mcomm );
		if (merr) MTestPrintError( merr );
		if (rank == 0) {
		    rleader = size/2;
		}
		else if (rank == size/2) {
		    rleader = 0;
		}
		else {
		    /* Remote leader is signficant only for the processes
		       designated local leaders */
		    rleader = -1;
		}
		*isLeftGroup = rank < size/2;
		merr = MPI_Intercomm_create( mcomm, 0, MPI_COMM_WORLD, rleader,
					     12345, comm );
		if (merr) MTestPrintError( merr );
		interCommName = "Intercomm by splitting MPI_COMM_WORLD";
	    }
	    else 
		*comm = MPI_COMM_NULL;
	    break;
	case 1:
	    /* Split comm world in to 1 and the rest */
	    merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
	    if (merr) MTestPrintError( merr );
	    merr = MPI_Comm_size( MPI_COMM_WORLD, &size );
	    if (merr) MTestPrintError( merr );
	    if (size > 1) {
		merr = MPI_Comm_split( MPI_COMM_WORLD, rank == 0, rank, 
				       &mcomm );
		if (merr) MTestPrintError( merr );
		if (rank == 0) {
		    rleader = 1;
		}
		else if (rank == 1) {
		    rleader = 0;
		}
		else {
		    /* Remote leader is signficant only for the processes
		       designated local leaders */
		    rleader = -1;
		}
		*isLeftGroup = rank == 0;
		merr = MPI_Intercomm_create( mcomm, 0, MPI_COMM_WORLD, 
					     rleader, 12346, comm );
		if (merr) MTestPrintError( merr );
		interCommName = "Intercomm by splitting MPI_COMM_WORLD into 1, rest";
	    }
	    else
		*comm = MPI_COMM_NULL;
	    break;

	case 2:
	    /* Split comm world in to 2 and the rest */
	    merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
	    if (merr) MTestPrintError( merr );
	    merr = MPI_Comm_size( MPI_COMM_WORLD, &size );
	    if (merr) MTestPrintError( merr );
	    if (size > 3) {
		merr = MPI_Comm_split( MPI_COMM_WORLD, rank < 2, rank, 
				       &mcomm );
		if (merr) MTestPrintError( merr );
		if (rank == 0) {
		    rleader = 2;
		}
		else if (rank == 2) {
		    rleader = 0;
		}
		else {
		    /* Remote leader is signficant only for the processes
		       designated local leaders */
		    rleader = -1;
		}
		*isLeftGroup = rank < 2;
		merr = MPI_Intercomm_create( mcomm, 0, MPI_COMM_WORLD, 
					     rleader, 12347, comm );
		if (merr) MTestPrintError( merr );
		interCommName = "Intercomm by splitting MPI_COMM_WORLD into 2, rest";
	    }
	    else 
		*comm = MPI_COMM_NULL;
	    break;

	case 3:
	    /* Split comm world in half, then dup */
	    merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
	    if (merr) MTestPrintError( merr );
	    merr = MPI_Comm_size( MPI_COMM_WORLD, &size );
	    if (merr) MTestPrintError( merr );
	    if (size > 1) {
		merr = MPI_Comm_split( MPI_COMM_WORLD, (rank < size/2), rank, 
				       &mcomm );
		if (merr) MTestPrintError( merr );
		if (rank == 0) {
		    rleader = size/2;
		}
		else if (rank == size/2) {
		    rleader = 0;
		}
		else {
		    /* Remote leader is signficant only for the processes
		       designated local leaders */
		    rleader = -1;
		}
		*isLeftGroup = rank < size/2;
		merr = MPI_Intercomm_create( mcomm, 0, MPI_COMM_WORLD, rleader,
					     12345, comm );
		if (merr) MTestPrintError( merr );
                /* avoid leaking after assignment below */
		merr = MPI_Comm_free( &mcomm );
		if (merr) MTestPrintError( merr );

		/* now dup, some bugs only occur for dup's of intercomms */
		mcomm = *comm;
		merr = MPI_Comm_dup(mcomm, comm);
		if (merr) MTestPrintError( merr );
		interCommName = "Intercomm by splitting MPI_COMM_WORLD then dup'ing";
	    }
	    else 
		*comm = MPI_COMM_NULL;
	    break;

	case 4:
	    /* Split comm world in half, form intercomm, then split that intercomm */
	    merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
	    if (merr) MTestPrintError( merr );
	    merr = MPI_Comm_size( MPI_COMM_WORLD, &size );
	    if (merr) MTestPrintError( merr );
	    if (size > 1) {
		merr = MPI_Comm_split( MPI_COMM_WORLD, (rank < size/2), rank, 
				       &mcomm );
		if (merr) MTestPrintError( merr );
		if (rank == 0) {
		    rleader = size/2;
		}
		else if (rank == size/2) {
		    rleader = 0;
		}
		else {
		    /* Remote leader is signficant only for the processes
		       designated local leaders */
		    rleader = -1;
		}
		*isLeftGroup = rank < size/2;
		merr = MPI_Intercomm_create( mcomm, 0, MPI_COMM_WORLD, rleader,
					     12345, comm );
		if (merr) MTestPrintError( merr );
                /* avoid leaking after assignment below */
		merr = MPI_Comm_free( &mcomm );
		if (merr) MTestPrintError( merr );

		/* now split, some bugs only occur for splits of intercomms */
		mcomm = *comm;
		rank = MPI_Comm_rank(mcomm, &rank);
		if (merr) MTestPrintError( merr );
		/* this split is effectively a dup but tests the split code paths */
		merr = MPI_Comm_split(mcomm, 0, rank, comm);
		if (merr) MTestPrintError( merr );
		interCommName = "Intercomm by splitting MPI_COMM_WORLD then then splitting again";
	    }
	    else
		*comm = MPI_COMM_NULL;
	    break;

	case 5:
            /* split comm world in half discarding rank 0 on the "left"
             * communicator, then form them into an intercommunicator */
	    merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
	    if (merr) MTestPrintError( merr );
	    merr = MPI_Comm_size( MPI_COMM_WORLD, &size );
	    if (merr) MTestPrintError( merr );
	    if (size >= 4) {
                int color = (rank < size/2 ? 0 : 1);
                if (rank == 0)
                    color = MPI_UNDEFINED;

		merr = MPI_Comm_split( MPI_COMM_WORLD, color, rank, &mcomm );
		if (merr) MTestPrintError( merr );

		if (rank == 1) {
		    rleader = size/2;
		}
		else if (rank == (size/2)) {
		    rleader = 1;
		}
		else {
		    /* Remote leader is signficant only for the processes
		       designated local leaders */
		    rleader = -1;
		}
		*isLeftGroup = rank < size/2;
                if (rank != 0) { /* 0's mcomm is MPI_COMM_NULL */
                    merr = MPI_Intercomm_create( mcomm, 0, MPI_COMM_WORLD, rleader, 12345, comm );
                    if (merr) MTestPrintError( merr );
                }
                interCommName = "Intercomm by splitting MPI_COMM_WORLD (discarding rank 0 in the left group) then MPI_Intercomm_create'ing";
            }
            else {
                *comm = MPI_COMM_NULL;
            }
            break;

        case 6:
            /* Split comm world in half then form them into an
             * intercommunicator.  Then discard rank 0 from each group of the
             * intercomm via MPI_Comm_create. */
	    merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
	    if (merr) MTestPrintError( merr );
	    merr = MPI_Comm_size( MPI_COMM_WORLD, &size );
	    if (merr) MTestPrintError( merr );
	    if (size >= 4) {
                MPI_Group oldgroup, newgroup;
                int ranks[1];
                int color = (rank < size/2 ? 0 : 1);

		merr = MPI_Comm_split( MPI_COMM_WORLD, color, rank, &mcomm );
		if (merr) MTestPrintError( merr );

		if (rank == 0) {
		    rleader = size/2;
		}
		else if (rank == (size/2)) {
		    rleader = 0;
		}
		else {
		    /* Remote leader is signficant only for the processes
		       designated local leaders */
		    rleader = -1;
		}
		*isLeftGroup = rank < size/2;
                merr = MPI_Intercomm_create( mcomm, 0, MPI_COMM_WORLD, rleader, 12345, &mcomm2 );
                if (merr) MTestPrintError( merr );

                /* We have an intercomm between the two halves of comm world. Now create
                 * a new intercomm that removes rank 0 on each side. */
                merr = MPI_Comm_group(mcomm2, &oldgroup);
                if (merr) MTestPrintError( merr );
                ranks[0] = 0;
                merr = MPI_Group_excl(oldgroup, 1, ranks, &newgroup);
                if (merr) MTestPrintError( merr );
                merr = MPI_Comm_create(mcomm2, newgroup, comm);
                if (merr) MTestPrintError( merr );

                merr = MPI_Group_free(&oldgroup);
                if (merr) MTestPrintError( merr );
                merr = MPI_Group_free(&newgroup);
                if (merr) MTestPrintError( merr );

                interCommName = "Intercomm by splitting MPI_COMM_WORLD then discarding 0 ranks with MPI_Comm_create";
            }
            else {
                *comm = MPI_COMM_NULL;
            }
            break;

	default:
	    *comm = MPI_COMM_NULL;
	    interCommIdx = -1;
	    break;
	}

	if (*comm != MPI_COMM_NULL) {
	    merr = MPI_Comm_size( *comm, &size );
	    if (merr) MTestPrintError( merr );
	    merr = MPI_Comm_remote_size( *comm, &remsize );
	    if (merr) MTestPrintError( merr );
	    if (size + remsize >= min_size) done = 1;
	}
	else {
	    interCommName = "MPI_COMM_NULL";
	    done = 1;
        }

        /* we are only done if all processes are done */
        MPI_Allreduce(MPI_IN_PLACE, &done, 1, MPI_INT, MPI_LAND, MPI_COMM_WORLD);

        /* Advance the comm index whether we are done or not, otherwise we could
         * spin forever trying to allocate a too-small communicator over and
         * over again. */
        interCommIdx++;

        if (!done && *comm != MPI_COMM_NULL) {
            /* avoid leaking communicators */
            merr = MPI_Comm_free(comm);
            if (merr) MTestPrintError(merr);
        }

        /* cleanup for common temp objects */
        if (mcomm != MPI_COMM_NULL) {
            merr = MPI_Comm_free(&mcomm);
            if (merr) MTestPrintError( merr );
        }
        if (mcomm2 != MPI_COMM_NULL) {
            merr = MPI_Comm_free(&mcomm2);
            if (merr) MTestPrintError( merr );
        }
    }

    return interCommIdx;
}
Example #9
0
/* 
 * Get an intracommunicator with at least min_size members.  If "allowSmaller"
 * is true, allow the communicator to be smaller than MPI_COMM_WORLD and
 * for this routine to return MPI_COMM_NULL for some values.  Returns 0 if
 * no more communicators are available.
 */
int MTestGetIntracommGeneral( MPI_Comm *comm, int min_size, int allowSmaller )
{
    int size, rank, merr;
    int done2, done=0;
    int isBasic = 0;

    /* The while loop allows us to skip communicators that are too small.
       MPI_COMM_NULL is always considered large enough */
    while (!done) {
	isBasic = 0;
	intraCommName = "";
	switch (intraCommIdx) {
	case 0:
	    *comm = MPI_COMM_WORLD;
	    isBasic = 1;
	    intraCommName = "MPI_COMM_WORLD";
	    break;
	case 1:
	    /* dup of world */
	    merr = MPI_Comm_dup(MPI_COMM_WORLD, comm );
	    if (merr) MTestPrintError( merr );
	    intraCommName = "Dup of MPI_COMM_WORLD";
	    break;
	case 2:
	    /* reverse ranks */
	    merr = MPI_Comm_size( MPI_COMM_WORLD, &size );
	    if (merr) MTestPrintError( merr );
	    merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
	    if (merr) MTestPrintError( merr );
	    merr = MPI_Comm_split( MPI_COMM_WORLD, 0, size-rank, comm );
	    if (merr) MTestPrintError( merr );
	    intraCommName = "Rank reverse of MPI_COMM_WORLD";
	    break;
	case 3:
	    /* subset of world, with reversed ranks */
	    merr = MPI_Comm_size( MPI_COMM_WORLD, &size );
	    if (merr) MTestPrintError( merr );
	    merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
	    if (merr) MTestPrintError( merr );
	    merr = MPI_Comm_split( MPI_COMM_WORLD, ((rank < size/2) ? 1 : MPI_UNDEFINED),
				   size-rank, comm );
	    if (merr) MTestPrintError( merr );
	    intraCommName = "Rank reverse of half of MPI_COMM_WORLD";
	    break;
	case 4:
	    *comm = MPI_COMM_SELF;
	    isBasic = 1;
	    intraCommName = "MPI_COMM_SELF";
	    break;

	    /* These next cases are communicators that include some
	       but not all of the processes */
	case 5:
	case 6:
	case 7:
	case 8:
	{
	    int newsize;
	    merr = MPI_Comm_size( MPI_COMM_WORLD, &size );
	    if (merr) MTestPrintError( merr );
	    newsize = size - (intraCommIdx - 4);
	    
	    if (allowSmaller && newsize >= min_size) {
		merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
		if (merr) MTestPrintError( merr );
		merr = MPI_Comm_split( MPI_COMM_WORLD, rank < newsize, rank, 
				       comm );
		if (merr) MTestPrintError( merr );
		if (rank >= newsize) {
		    merr = MPI_Comm_free( comm );
		    if (merr) MTestPrintError( merr );
		    *comm = MPI_COMM_NULL;
		}
		else {
		    intraCommName = "Split of WORLD";
		}
	    }
	    else {
		/* Act like default */
		*comm = MPI_COMM_NULL;
		intraCommIdx = -1;
	    }
	}
	break;
	    
	    /* Other ideas: dup of self, cart comm, graph comm */
	default:
	    *comm = MPI_COMM_NULL;
	    intraCommIdx = -1;
	    break;
	}

	if (*comm != MPI_COMM_NULL) {
	    merr = MPI_Comm_size( *comm, &size );
	    if (merr) MTestPrintError( merr );
	    if (size >= min_size)
		done = 1;
	}
        else {
            intraCommName = "MPI_COMM_NULL";
            isBasic = 1;
            done = 1;
        }
done2=done;
        /* we are only done if all processes are done */
        MPI_Allreduce(&done2, &done, 1, MPI_INT, MPI_LAND, MPI_COMM_WORLD);

        /* Advance the comm index whether we are done or not, otherwise we could
         * spin forever trying to allocate a too-small communicator over and
         * over again. */
        intraCommIdx++;

        if (!done && !isBasic && *comm != MPI_COMM_NULL) {
            /* avoid leaking communicators */
            merr = MPI_Comm_free(comm);
            if (merr) MTestPrintError(merr);
        }
    }

    return intraCommIdx;
}
Example #10
0
int main(int argc, char **argv)
{
    int i;
    char *env = NULL;
    simulation_data sim;
    simulation_data_ctor(&sim);

#ifdef PARALLEL
    /* Initialize MPI */
    MPI_Init(&argc, &argv);

    /* Create a new communicator. */
    if (MPI_Comm_dup(MPI_COMM_WORLD, &sim.par_comm) != MPI_SUCCESS)
        sim.par_comm = MPI_COMM_WORLD;

    MPI_Comm_rank (sim.par_comm, &sim.par_rank);
    MPI_Comm_size (sim.par_comm, &sim.par_size);
#endif

    /* Process command line arguments. */
    SimulationArguments(argc, argv);
    for(i = 1; i < argc; ++i)
    {
        if(strcmp(argv[i], "-nobounds") == 0)
            sim.doDomainBoundaries = 0;
    }

#ifdef PARALLEL
    /* Install callback functions for global communication. */
    VisItSetBroadcastIntFunction2(visit_broadcast_int_callback, (void*)&sim);
    VisItSetBroadcastStringFunction2(visit_broadcast_string_callback, (void*)&sim);

    /* Tell libsim whether the simulation is parallel. */
    VisItSetParallel(sim.par_size > 1);
    VisItSetParallelRank(sim.par_rank);

    /* Tell libsim which communicator to use. You must pass the address of
     * an MPI_Comm object.
     */
    VisItSetMPICommunicator((void *)&sim.par_comm);
#endif

    /* Only read the environment on rank 0. This could happen before MPI_Init if
     * we are using an MPI that does not like to let us spawn processes but we
     * would not know our processor rank.
     */
    if(sim.par_rank == 0)
        env = VisItGetEnvironment();

    /* Pass the environment to all other processors collectively. */
    VisItSetupEnvironment2(env);
    if(env != NULL)
        free(env);

    /* Write out .sim file that VisIt uses to connect. Only do it
     * on processor 0.
     */
    /* CHANGE 3 */
    if(sim.par_rank == 0)
    {
        /* Write out .sim file that VisIt uses to connect. */
        VisItInitializeSocketAndDumpSimFile(
#ifdef PARALLEL
            "domainbounds_par",
#else
            "domainbounds",
#endif
            "Demonstrates domain boundaries",
            "/path/to/where/sim/was/started",
            NULL, NULL, SimulationFilename());
    }

    simulation_data_create_domains(&sim);

    /* Call the main loop. */
    mainloop(&sim);

    simulation_data_dtor(&sim);
#ifdef PARALLEL
    MPI_Finalize();
#endif

    return 0;
}
Example #11
0
MTEST_THREAD_RETURN_TYPE test_idup(void *arg)
{
    int i;
    int size, rank;
    int ranges[1][3];
    int rleader, isLeft;
    int *excl = NULL;
    int tid = *(int *) arg;

    MPI_Group ingroup, high_group, even_group;
    MPI_Comm local_comm, inter_comm;
    MPI_Comm idupcomms[NUM_IDUPS];
    MPI_Request reqs[NUM_IDUPS];

    MPI_Comm outcomm;
    MPI_Comm incomm = comms[tid];

    MPI_Comm_size(incomm, &size);
    MPI_Comm_rank(incomm, &rank);
    MPI_Comm_group(incomm, &ingroup);

    /* Idup incomm multiple times */
    for (i = 0; i < NUM_IDUPS; i++) {
        MPI_Comm_idup(incomm, &idupcomms[i], &reqs[i]);
    }

    /* Overlap pending idups with various comm generation functions */
    /* Comm_dup */
    MPI_Comm_dup(incomm, &outcomm);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_split */
    MPI_Comm_split(incomm, rank % 2, size - rank, &outcomm);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_create, high half of incomm */
    ranges[0][0] = size / 2;
    ranges[0][1] = size - 1;
    ranges[0][2] = 1;
    MPI_Group_range_incl(ingroup, 1, ranges, &high_group);
    MPI_Comm_create(incomm, high_group, &outcomm);
    MPI_Group_free(&high_group);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_create_group, even ranks of incomm */
    /* exclude the odd ranks */
    excl = malloc((size / 2) * sizeof(int));
    for (i = 0; i < size / 2; i++)
        excl[i] = (2 * i) + 1;

    MPI_Group_excl(ingroup, size / 2, excl, &even_group);
    free(excl);

    if (rank % 2 == 0) {
        MPI_Comm_create_group(incomm, even_group, 0, &outcomm);
    }
    else {
        outcomm = MPI_COMM_NULL;
    }
    MPI_Group_free(&even_group);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Intercomm_create & Intercomm_merge */
    MPI_Comm_split(incomm, (rank < size / 2), rank, &local_comm);
    if (rank == 0) {
        rleader = size / 2;
    }
    else if (rank == size / 2) {
        rleader = 0;
    }
    else {
        rleader = -1;
    }
    isLeft = rank < size / 2;

    MPI_Intercomm_create(local_comm, 0, incomm, rleader, 99, &inter_comm);
    MPI_Intercomm_merge(inter_comm, isLeft, &outcomm);
    MPI_Comm_free(&local_comm);

    errs[tid] += MTestTestComm(inter_comm);
    MTestFreeComm(&inter_comm);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    MPI_Waitall(NUM_IDUPS, reqs, MPI_STATUSES_IGNORE);
    for (i = 0; i < NUM_IDUPS; i++) {
        errs[tid] += MTestTestComm(idupcomms[i]);
        MPI_Comm_free(&idupcomms[i]);
    }
    MPI_Group_free(&ingroup);
    return NULL;
}
Example #12
0
int
main (int argc, char **argv)
{
  int nprocs = -1;
  int rank = -1;
  int i, j;
  int *granks;
  char processor_name[128];
  int namelen = 128;
  int buf[buf_size];
  MPI_Status status;
  MPI_Comm temp;
  MPI_Comm intercomm = MPI_COMM_NULL;
  MPI_Comm dcomms[DCOMM_CALL_COUNT];
  MPI_Group world_group, dgroup;
  int intersize, dnprocs[DCOMM_CALL_COUNT], drank[DCOMM_CALL_COUNT];
  int dims[TWOD], periods[TWOD], remain_dims[TWOD];
  int graph_index[] = { 2, 3, 4, 6 };
  int graph_edges[] = { 1, 3, 0, 3, 0, 2 };

  /* init */
  MPI_Init (&argc, &argv);
  MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
  MPI_Comm_rank (MPI_COMM_WORLD, &rank);
  MPI_Get_processor_name (processor_name, &namelen);
  printf ("(%d) is alive on %s\n", rank, processor_name);
  fflush (stdout);

  MPI_Barrier (MPI_COMM_WORLD);

  /* probably want number to be higher... */
  if (nprocs < 4) {
      printf ("not enough tasks\n");
  }
  else {
    if (DCOMM_CALL_COUNT > 0) {
#ifdef RUN_COMM_DUP
      /* create all of the derived communicators... */
      /* simplest is created by MPI_Comm_dup... */
      MPI_Comm_dup (MPI_COMM_WORLD, &dcomms[0]);
#else
      dcomms[0] = MPI_COMM_NULL;
#endif
    }

    if (DCOMM_CALL_COUNT > 1) {
#ifdef RUN_COMM_CREATE
      /* use subset of MPI_COMM_WORLD group for MPI_Comm_create... */
      MPI_Comm_group (MPI_COMM_WORLD, &world_group);
      granks = (int *) malloc (sizeof(int) * (nprocs/2));
      for (i = 0; i < nprocs/2; i++)
	granks [i] = 2 * i;
      MPI_Group_incl (world_group, nprocs/2, granks, &dgroup);
      MPI_Comm_create (MPI_COMM_WORLD, dgroup, &dcomms[1]);
      MPI_Group_free (&world_group);
      MPI_Group_free (&dgroup);
      free (granks);
#else
      dcomms[1] = MPI_COMM_NULL;
#endif
    }

    if (DCOMM_CALL_COUNT > 2) {
#ifdef RUN_COMM_SPLIT
      /* split into thirds with inverted ranks... */
      MPI_Comm_split (MPI_COMM_WORLD, rank % 3, nprocs - rank, &dcomms[2]);
#else
      dcomms[2] = MPI_COMM_NULL;
#endif
    }

#ifdef RUN_INTERCOMM_CREATE
    if ((DCOMM_CALL_COUNT < 2) || (dcomms[2] == MPI_COMM_NULL)) {
      MPI_Comm_split (MPI_COMM_WORLD, rank % 3, nprocs - rank, &temp);
    }
    else {
      temp = dcomms[2];
    }
    if (rank % 3) {
      MPI_Intercomm_create (temp, 0, MPI_COMM_WORLD,
			    (((nprocs % 3) == 2) && ((rank % 3) == 2)) ?
			    nprocs - 1 : nprocs - (rank % 3) - (nprocs % 3),
			    INTERCOMM_CREATE_TAG, &intercomm);
    }
    if ((DCOMM_CALL_COUNT < 2) || (dcomms[2] == MPI_COMM_NULL)) {
      MPI_Comm_free (&temp);
    }
#endif

    if (DCOMM_CALL_COUNT > 3) {
#ifdef RUN_CART_CREATE
      /* create a 2 X nprocs/2 torus topology, allow reordering */
      dims[0] = 2;
      dims[1] = nprocs/2;
      periods[0] = periods[1] = 1;
      MPI_Cart_create (MPI_COMM_WORLD, TWOD, dims, periods, 1, &dcomms[3]);
#else
      dcomms[3] = MPI_COMM_NULL;
#endif
    }

    if (DCOMM_CALL_COUNT > 4) {
#ifdef RUN_GRAPH_CREATE
      /* create the graph on p.268 MPI: The Complete Reference... */
      MPI_Graph_create (MPI_COMM_WORLD, GRAPH_SZ,
			graph_index, graph_edges, 1, &dcomms[4]);
#else
      dcomms[4] = MPI_COMM_NULL;
#endif
    }

    if (DCOMM_CALL_COUNT > 5) {
#ifdef RUN_CART_SUB
#ifndef RUN_CART_CREATE
      /* need to make cartesian communicator temporarily... */
      /* create a 2 X nprocs/2 torus topology, allow reordering */
      dims[0] = 2;
      dims[1] = nprocs/2;
      periods[0] = periods[1] = 1;
      MPI_Cart_create (MPI_COMM_WORLD, TWOD, dims, periods, 1, &dcomms[3]);
#endif
      if (dcomms[3] != MPI_COMM_NULL) {
	/* create 2 1 X nprocs/2 topologies... */
	remain_dims[0] = 0;
	remain_dims[1] = 1;
	MPI_Cart_sub (dcomms[3], remain_dims, &dcomms[5]);
#ifndef RUN_CART_CREATE
	/* free up temporarily created cartesian communicator... */
	MPI_Comm_free (&dcomms[3]);
#endif
      }
      else {
	dcomms[5] = MPI_COMM_NULL;
      }
#else
      dcomms[5] = MPI_COMM_NULL;
#endif
    }

    if (DCOMM_CALL_COUNT > 6) {
#ifdef RUN_INTERCOMM_MERGE
#ifndef RUN_INTERCOMM_CREATE
#ifndef RUN_COMM_SPLIT
      /* need to make split communicator temporarily... */
      /* split into thirds with inverted ranks... */
      MPI_Comm_split (MPI_COMM_WORLD, rank % 3, nprocs - rank, &dcomms[2]);
#endif
#endif
      /* create an intercommunicator and merge it... */
      if (rank % 3) {
#ifndef RUN_INTERCOMM_CREATE
	MPI_Intercomm_create (dcomms[2], 0, MPI_COMM_WORLD,
			      (((nprocs % 3) == 2) && ((rank % 3) == 2)) ?
			      nprocs - 1 : nprocs - (rank % 3) - (nprocs % 3),
			      INTERCOMM_CREATE_TAG, &intercomm);
#endif

	MPI_Intercomm_merge (intercomm, ((rank % 3) == 1), &dcomms[6]);

#ifndef RUN_INTERCOMM_CREATE
	/* we are done with intercomm... */
	MPI_Comm_free (&intercomm);
#endif
      }
      else {
	dcomms[6] = MPI_COMM_NULL;
      }
#ifndef RUN_INTERCOMM_CREATE
#ifndef RUN_COMM_SPLIT
      if (dcomms[2] != MPI_COMM_NULL)
	/* free up temporarily created split communicator... */
	MPI_Comm_free (&dcomms[2]);
#endif
#endif
#else
      dcomms[6] = MPI_COMM_NULL;
#endif
    }

    /* get all of the sizes and ranks... */
    for (i = 0; i < DCOMM_CALL_COUNT; i++) {
      if (dcomms[i] != MPI_COMM_NULL) {
	MPI_Comm_size (dcomms[i], &dnprocs[i]);
	MPI_Comm_rank (dcomms[i], &drank[i]);
      }
      else {
	dnprocs[i] = 0;
	drank[i] = -1;
      }
    }

#ifdef RUN_INTERCOMM_CREATE
    /* get the intercomm remote size... */
    if (rank % 3) {
      MPI_Comm_remote_size (intercomm, &intersize);
    }
#endif

    /* do some point to point on all of the dcomms... */
    for (i = 0; i < DCOMM_CALL_COUNT; i++) {
      if (dnprocs[i] > 1) {
	if (drank[i] == 0) {
	  for (j = 1; j < dnprocs[i]; j++) {
	    MPI_Recv (buf, buf_size, MPI_INT, j, 0, dcomms[i], &status);
	  }
	}
	else {
	  memset (buf, 1, buf_size*sizeof(int));

	  MPI_Send (buf, buf_size, MPI_INT, 0, 0, dcomms[i]);
	}
      }
    }

#ifdef RUN_INTERCOMM_CREATE
    /* do some point to point on the intercomm... */
    if ((rank % 3) == 1) {
      for (j = 0; j < intersize; j++) {
	MPI_Recv (buf, buf_size, MPI_INT, j, 0, intercomm, &status);
      }
    }
    else if ((rank % 3) == 2) {
      for (j = 0; j < intersize; j++) {
	memset (buf, 1, buf_size*sizeof(int));

	MPI_Send (buf, buf_size, MPI_INT, j, 0, intercomm);
      }
    }
#endif

    /* do a bcast on all of the dcomms... */
    for (i = 0; i < DCOMM_CALL_COUNT; i++) {
      /* IBM's implementation gets error with comm over MPI_COMM_NULL... */
      if (dnprocs[i] > 0)
	MPI_Bcast (buf, buf_size, MPI_INT, 0, dcomms[i]);
    }

    /* use any source receives... */
    for (i = 0; i < DCOMM_CALL_COUNT; i++) {
      if (dnprocs[i] > 1) {
	if (drank[i] == 0) {
	  for (j = 1; j < dnprocs[i]; j++) {
	    MPI_Recv (buf, buf_size, MPI_INT,
		      MPI_ANY_SOURCE, 0, dcomms[i], &status);
	  }
	}
	else {
	  memset (buf, 1, buf_size*sizeof(int));

	  MPI_Send (buf, buf_size, MPI_INT, 0, 0, dcomms[i]);
	}
      }
    }

#ifdef RUN_INTERCOMM_CREATE
    /* do any source receives on the intercomm... */
    if ((rank % 3) == 1) {
      for (j = 0; j < intersize; j++) {
	MPI_Recv (buf, buf_size, MPI_INT,
		  MPI_ANY_SOURCE, 0, intercomm, &status);
      }
    }
    else if ((rank % 3) == 2) {
      for (j = 0; j < intersize; j++) {
	memset (buf, 1, buf_size*sizeof(int));

	MPI_Send (buf, buf_size, MPI_INT, j, 0, intercomm);
      }
    }
#endif

    /* do a barrier on all of the dcomms... */
    for (i = 0; i < DCOMM_CALL_COUNT; i++) {
      /* IBM's implementation gets with communication over MPI_COMM_NULL... */
      if (dnprocs[i] > 0)
	MPI_Barrier (dcomms[i]);
    }

    /* free all of the derived communicators... */
    for (i = 0; i < DCOMM_CALL_COUNT; i++) {
      /* freeing MPI_COMM_NULL is explicitly defined as erroneous... */
      if (dnprocs[i] > 0)
	MPI_Comm_free (&dcomms[i]);
    }

#ifdef RUN_INTERCOMM_CREATE
    if (rank % 3)
      /* we are done with intercomm... */
      MPI_Comm_free (&intercomm);
#endif
  }

  MPI_Barrier (MPI_COMM_WORLD);

  MPI_Finalize ();
  printf ("(%d) Finished normally\n", rank);
}
int main( int argc, char* argv[] ) {
	MPI_Comm CommWorld;
	int rank;
	int numProcessors;
	int procToWatch;
	Dictionary* dictionary;
	AbstractContext* abstractContext;
	
	/* Initialise MPI, get world info */
	MPI_Init( &argc, &argv );
	MPI_Comm_dup( MPI_COMM_WORLD, &CommWorld );
	MPI_Comm_size( CommWorld, &numProcessors );
	MPI_Comm_rank( CommWorld, &rank );
	
	BaseFoundation_Init( &argc, &argv );
	BaseIO_Init( &argc, &argv );
	BaseContainer_Init( &argc, &argv );
	BaseAutomation_Init( &argc, &argv );
	BaseExtensibility_Init( &argc, &argv );
	BaseContext_Init( &argc, &argv );
	stream = Journal_Register (Info_Type, "myStream");

	/* Redirect the error stream to stdout, so we can check warnings
	appear correctly */
	Stream_SetFileBranch( Journal_GetTypedStream( ErrorStream_Type ), stJournal->stdOut );

	if( argc >= 2 ) {
		procToWatch = atoi( argv[1] );
	}
	else {
		procToWatch = 0;
	}
	if( rank == procToWatch ) Journal_Printf( (void*) stream, "Watching rank: %i\n", rank );

	/* Read input */
	dictionary = Dictionary_New();
	dictionary->add( dictionary, "rank", Dictionary_Entry_Value_FromUnsignedInt( rank ) );
	dictionary->add( dictionary, "numProcessors", Dictionary_Entry_Value_FromUnsignedInt( numProcessors ) );
	
	/* Build the context */
	abstractContext = _AbstractContext_New( 
		sizeof(AbstractContext), 
		"TestContext", 
		MyDelete, 
		MyPrint, 
		NULL,
		NULL, 
		NULL, 
		_AbstractContext_Build, 
		_AbstractContext_Initialise, 
		_AbstractContext_Execute, 
		_AbstractContext_Destroy, 
		"context", 
		True, 
		MySetDt, 
		0, 
		10, 
		CommWorld, 
		dictionary );

	/* add hooks to existing entry points */
	ContextEP_Append( abstractContext, AbstractContext_EP_Dt, MyDt );

	if( rank == procToWatch ) {
		Stream* stream = Journal_Register( InfoStream_Type, AbstractContext_Type );
		Stg_Component_Build( abstractContext, 0 /* dummy */, False );
		Stg_Component_Initialise( abstractContext, 0 /* dummy */, False );
		Context_PrintConcise( abstractContext, stream );
		Stg_Component_Execute( abstractContext, 0 /* dummy */, False );
		Stg_Component_Destroy( abstractContext, 0 /* dummy */, False );
	}
	
	/* Stg_Class_Delete stuff */
	Stg_Class_Delete( abstractContext );
	Stg_Class_Delete( dictionary );
	
	BaseContext_Finalise();
	BaseExtensibility_Finalise();
	BaseAutomation_Finalise();
	BaseContainer_Finalise();
	BaseIO_Finalise();
	BaseFoundation_Finalise();
	
	/* Close off MPI */
	MPI_Finalize();
	
	return 0; /* success */
}
Example #14
0
int main(int argc, char *argv[])
{
    int errs = 0;
    int attrval;
    int i, key[32], keyval, saveKeyval;
    MPI_Comm comm, dupcomm;
    MTest_Init(&argc, &argv);

    while (MTestGetIntracomm(&comm, 1)) {
        if (comm == MPI_COMM_NULL)
            continue;

        MPI_Comm_create_keyval(copy_fn, delete_fn, &keyval, (void *) 0);
        saveKeyval = keyval;    /* in case we need to free explicitly */
        attrval = 1;
        MPI_Comm_set_attr(comm, keyval, (void *) &attrval);
        /* See MPI-1, 5.7.1.  Freeing the keyval does not remove it if it
         * is in use in an attribute */
        MPI_Comm_free_keyval(&keyval);

        /* We create some dummy keyvals here in case the same keyval
         * is reused */
        for (i = 0; i < 32; i++) {
            MPI_Comm_create_keyval(MPI_NULL_COPY_FN, MPI_NULL_DELETE_FN, &key[i], (void *) 0);
        }

        MPI_Comm_dup(comm, &dupcomm);
        /* Check that the attribute was copied */
        if (attrval != 2) {
            errs++;
            printf("Attribute not incremented when comm dup'ed (%s)\n", MTestGetIntracommName());
        }
        MPI_Comm_free(&dupcomm);
        if (attrval != 1) {
            errs++;
            printf("Attribute not decremented when dupcomm %s freed\n", MTestGetIntracommName());
        }
        /* Check that the attribute was freed in the dupcomm */

        if (comm != MPI_COMM_WORLD && comm != MPI_COMM_SELF) {
            MPI_Comm_free(&comm);
            /* Check that the original attribute was freed */
            if (attrval != 0) {
                errs++;
                printf("Attribute not decremented when comm %s freed\n", MTestGetIntracommName());
            }
        }
        else {
            /* Explicitly delete the attributes from world and self */
            MPI_Comm_delete_attr(comm, saveKeyval);
        }
        /* Free those other keyvals */
        for (i = 0; i < 32; i++) {
            MPI_Comm_free_keyval(&key[i]);
        }
    }
    MTest_Finalize(errs);
    MPI_Finalize();

    /* The attributes on comm self and world were deleted by finalize
     * (see separate test) */

    return 0;

}
int main( int argc, char* argv[] ) {
	MPI_Comm		CommWorld;
	int			rank;
	int			numProcessors;
	int			procToWatch;
	Dictionary*		dictionary;
	Snac_Context*		snacContext;
	Tetrahedra_Index	tetraIndex;
	Element_Index		elementIndex;
	double			minLengthScale;
	
	/* Initialise MPI, get world info */
	MPI_Init( &argc, &argv );
	Snac_Init( &argc, &argv );
	MPI_Comm_dup( MPI_COMM_WORLD, &CommWorld );
	MPI_Comm_size( CommWorld, &numProcessors );
	MPI_Comm_rank( CommWorld, &rank );
	if( argc >= 2 ) {
		procToWatch = atoi( argv[1] );
	}
	else {
		procToWatch = 0;
	}
	if( rank == procToWatch ) printf( "Watching rank: %i\n", rank );
	
	/* Read input */
	dictionary = Dictionary_New();
	dictionary->add( dictionary, "rank", Dictionary_Entry_Value_FromUnsignedInt( rank ) );
	dictionary->add( dictionary, "numProcessors", Dictionary_Entry_Value_FromUnsignedInt( numProcessors ) );
	dictionary->add( dictionary, "meshSizeI", Dictionary_Entry_Value_FromUnsignedInt( 4 ) );
	dictionary->add( dictionary, "meshSizeJ", Dictionary_Entry_Value_FromUnsignedInt( 4 ) );
	dictionary->add( dictionary, "meshSizeK", Dictionary_Entry_Value_FromUnsignedInt( 4 ) );
	dictionary->add( dictionary, "minX", Dictionary_Entry_Value_FromDouble( 0.0f ) );
	dictionary->add( dictionary, "minY", Dictionary_Entry_Value_FromDouble( -300.0f ) );
	dictionary->add( dictionary, "minZ", Dictionary_Entry_Value_FromDouble( 0.0f ) );
	dictionary->add( dictionary, "maxX", Dictionary_Entry_Value_FromDouble( 300.0f ) );
	dictionary->add( dictionary, "maxY", Dictionary_Entry_Value_FromDouble( 0.0f ) );
	dictionary->add( dictionary, "maxZ", Dictionary_Entry_Value_FromDouble( 300.0f ) );

	/* Build the context */
	snacContext = Snac_Context_New( 0.0f, 10.0f, sizeof(Snac_Node), sizeof(Snac_Element), CommWorld, dictionary );
	
	/* Construction phase -----------------------------------------------------------------------------------------------*/
	Stg_Component_Construct( snacContext, 0 /* dummy */, &snacContext, True );
	
	/* Building phase ---------------------------------------------------------------------------------------------------*/
	Stg_Component_Build( snacContext, 0 /* dummy */, False );
	
	/* Initialisaton phase ----------------------------------------------------------------------------------------------*/
	Stg_Component_Initialise( snacContext, 0 /* dummy */, False );

	/* Work out the first element's tetrahedra values, and print them. */
	printf( "Element: 0, Coords: (%g %g %g), (%g %g %g), (%g %g %g), (%g %g %g), (%g %g %g) (%g %g %g) (%g %g %g) (%g %g %g)\n",
		Snac_Element_NodeCoord( snacContext, 0, 0 )[0],
		Snac_Element_NodeCoord( snacContext, 0, 0 )[1],
		Snac_Element_NodeCoord( snacContext, 0, 0 )[2],
		Snac_Element_NodeCoord( snacContext, 0, 1 )[0],
		Snac_Element_NodeCoord( snacContext, 0, 1 )[1],
		Snac_Element_NodeCoord( snacContext, 0, 1 )[2],
		Snac_Element_NodeCoord( snacContext, 0, 3 )[0],
		Snac_Element_NodeCoord( snacContext, 0, 3 )[1],
		Snac_Element_NodeCoord( snacContext, 0, 3 )[2],
		Snac_Element_NodeCoord( snacContext, 0, 2 )[0],
		Snac_Element_NodeCoord( snacContext, 0, 2 )[1],
		Snac_Element_NodeCoord( snacContext, 0, 2 )[2],
		Snac_Element_NodeCoord( snacContext, 0, 4 )[0],
		Snac_Element_NodeCoord( snacContext, 0, 4 )[1],
		Snac_Element_NodeCoord( snacContext, 0, 4 )[2],
		Snac_Element_NodeCoord( snacContext, 0, 5 )[0],
		Snac_Element_NodeCoord( snacContext, 0, 5 )[1],
		Snac_Element_NodeCoord( snacContext, 0, 5 )[2],
		Snac_Element_NodeCoord( snacContext, 0, 7 )[0],
		Snac_Element_NodeCoord( snacContext, 0, 7 )[1],
		Snac_Element_NodeCoord( snacContext, 0, 7 )[2],
		Snac_Element_NodeCoord( snacContext, 0, 6 )[0],
		Snac_Element_NodeCoord( snacContext, 0, 6 )[1],
		Snac_Element_NodeCoord( snacContext, 0, 6 )[2] );

	/* For each element, compare to the first element's */
	for( elementIndex = 0; elementIndex < snacContext->mesh->elementLocalCount; elementIndex++ ) {
		Bool error;
		
		Snac_UpdateElementMomentum( (Context*)snacContext, elementIndex, &minLengthScale );
		
		if( elementIndex == 0 ) {
			for( tetraIndex = 0; tetraIndex < Tetrahedra_Count; tetraIndex++ ) {
				Tetrahedra_Surface_Index faceIndex;

				for( faceIndex = 0; faceIndex < Tetrahedra_Surface_Count; faceIndex++ ) {
					printf( "Element: 0, Tetrahedra: %u, Face: %u, Normal: %g %g %g\n", 
						tetraIndex, 
						faceIndex, 
						Snac_Element_At( snacContext, elementIndex )->tetra[tetraIndex].surface[faceIndex].normal[0],
						Snac_Element_At( snacContext, elementIndex )->tetra[tetraIndex].surface[faceIndex].normal[1],
						Snac_Element_At( snacContext, elementIndex )->tetra[tetraIndex].surface[faceIndex].normal[2] );
				}
			}
		}
		else {
			for( tetraIndex = 0, error = False; tetraIndex < Tetrahedra_Count; tetraIndex++ ) {
				Tetrahedra_Surface_Index faceIndex;

				for( faceIndex = 0; faceIndex < Tetrahedra_Surface_Count; faceIndex++ ) {
					if( Snac_Element_At( snacContext, elementIndex )->tetra[tetraIndex].surface[faceIndex].normal[0] !=
						Snac_Element_At( snacContext, 0 )->tetra[tetraIndex].surface[faceIndex].normal[0] || 
						Snac_Element_At( snacContext, elementIndex )->tetra[tetraIndex].surface[faceIndex].normal[1] !=
						Snac_Element_At( snacContext, 0 )->tetra[tetraIndex].surface[faceIndex].normal[1] ||
						Snac_Element_At( snacContext, elementIndex )->tetra[tetraIndex].surface[faceIndex].normal[2] !=
						Snac_Element_At( snacContext, 0 )->tetra[tetraIndex].surface[faceIndex].normal[2] )
					{
						printf( "Element: %u, Tetrahedra: %u, Face %u, Area: %g %g %g\n", 
							elementIndex, 
							faceIndex, 
							tetraIndex, 
							Snac_Element_At( snacContext, elementIndex )->tetra[tetraIndex].surface[faceIndex].normal[0],
							Snac_Element_At( snacContext, elementIndex )->tetra[tetraIndex].surface[faceIndex].normal[1],
							Snac_Element_At( snacContext, elementIndex )->tetra[tetraIndex].surface[faceIndex].normal[2] );
						error = True;
					}
				}
			}
			if( error ) {
				printf( "Element: 0, Coords: (%g %g %g), (%g %g %g), (%g %g %g), (%g %g %g), (%g %g %g) (%g %g %g) (%g %g %g) (%g %g %g)\n",
					Snac_Element_NodeCoord( snacContext, elementIndex, 0 )[0],
					Snac_Element_NodeCoord( snacContext, elementIndex, 0 )[1],
					Snac_Element_NodeCoord( snacContext, elementIndex, 0 )[2],
					Snac_Element_NodeCoord( snacContext, elementIndex, 1 )[0],
					Snac_Element_NodeCoord( snacContext, elementIndex, 1 )[1],
					Snac_Element_NodeCoord( snacContext, elementIndex, 1 )[2],
					Snac_Element_NodeCoord( snacContext, elementIndex, 2 )[0],
					Snac_Element_NodeCoord( snacContext, elementIndex, 2 )[1],
					Snac_Element_NodeCoord( snacContext, elementIndex, 2 )[2],
					Snac_Element_NodeCoord( snacContext, elementIndex, 3 )[0],
					Snac_Element_NodeCoord( snacContext, elementIndex, 3 )[1],
					Snac_Element_NodeCoord( snacContext, elementIndex, 3 )[2],
					Snac_Element_NodeCoord( snacContext, elementIndex, 4 )[0],
					Snac_Element_NodeCoord( snacContext, elementIndex, 4 )[1],
					Snac_Element_NodeCoord( snacContext, elementIndex, 4 )[2],
					Snac_Element_NodeCoord( snacContext, elementIndex, 5 )[0],
					Snac_Element_NodeCoord( snacContext, elementIndex, 5 )[1],
					Snac_Element_NodeCoord( snacContext, elementIndex, 5 )[2],
					Snac_Element_NodeCoord( snacContext, elementIndex, 6 )[0],
					Snac_Element_NodeCoord( snacContext, elementIndex, 6 )[1],
					Snac_Element_NodeCoord( snacContext, elementIndex, 6 )[2],
					Snac_Element_NodeCoord( snacContext, elementIndex, 7 )[0],
					Snac_Element_NodeCoord( snacContext, elementIndex, 7 )[1],
					Snac_Element_NodeCoord( snacContext, elementIndex, 7 )[2] );
			}
			else {
				printf( "Element %u: has same values as element 0.\n", elementIndex );
			}		
		}
	}
	
	/* Stg_Class_Delete stuff */
	Stg_Class_Delete( snacContext );
	Stg_Class_Delete( dictionary );

	/* Close off MPI */
	MPI_Finalize();

	return 0; /* success */
}
Example #16
0
 /*@@
   @routine    Setup_nProcs
   @date       Tue Apr 18 15:21:42 2000
   @author     Tom Goodale
   @desc
               Setup PUGH_COMM_WORLD communicator and make sure that CCTK_INT
               and CCTK_REAL sizes are the same on all processors.
               Define MPI datatypes for CCTK_REAL and CCTK_COMPLEX variables.
   @enddesc

   @var        pughGH
   @vdesc      Pointer to PUGH grid hierarchy
   @vtype      pGH *
   @vio        in
   @endvar
   @var        dim
   @vdesc      dimension of processor topology to set up
   @vtype      int
   @vio        in
   @endvar

   @returntype int
   @returndesc
               0 for success
   @endreturndesc
@@*/
static int Setup_nProcs (pGH *pughGH, int dim)
{
#ifdef CCTK_MPI
  CCTK_REAL4 sum_sizes [2], compiled_sizes [2];


  /* Set up my communicator. This would allow us to run pugh on
     a subset of processors at a later date if, for instance, we
     were using panda or what not.
   */
  CACTUS_MPI_ERROR (MPI_Comm_dup (MPI_COMM_WORLD, &pughGH->PUGH_COMM_WORLD));

  CACTUS_MPI_ERROR (MPI_Comm_size (pughGH->PUGH_COMM_WORLD, &pughGH->nprocs));
  CACTUS_MPI_ERROR (MPI_Comm_rank (pughGH->PUGH_COMM_WORLD, &pughGH->myproc));

  /* check that all executables uses the same integer and fp precision
     within a metacomputing environment
     NOTE: We cannot use CCTK_INT4s in MPI_Allreduce() since
           (although they have the same size) they might be correspond to
           different MPI datatypes.
           CCTK_REAL4 should always refer to MPI_FLOAT. */
  compiled_sizes [0] = sizeof (CCTK_INT);
  compiled_sizes [1] = sizeof (CCTK_REAL);
  CACTUS_MPI_ERROR (MPI_Allreduce (compiled_sizes, sum_sizes, 2, PUGH_MPI_REAL4,
                                   MPI_SUM, pughGH->PUGH_COMM_WORLD));
  if (compiled_sizes [0] * pughGH->nprocs != sum_sizes [0])
  {
    CCTK_WARN (0, "Cannot run executables with different precision for "
                  "CCTK_INTs within a metacomputing environment !\n"
                  "Please configure with unique CCTK_INTEGER_PRECISION !");
  }

  if (compiled_sizes [1] * pughGH->nprocs != sum_sizes [1])
  {
    CCTK_WARN (0, "Cannot run executables with different precision for "
                  "CCTK_REALs within a metacomputing environment !\n"
                  "Please configure with unique CCTK_REAL_PRECISION !");
  }

  /* define the complex datatype as a concatanation of 2 PUGH_MPI_REALs */
  CACTUS_MPI_ERROR (MPI_Type_contiguous (2, PUGH_MPI_REAL,
                                         &pughGH->PUGH_mpi_complex));
  CACTUS_MPI_ERROR (MPI_Type_commit (&pughGH->PUGH_mpi_complex));

  /* dito for fixed-precision reals */
#ifdef CCTK_REAL4
  CACTUS_MPI_ERROR (MPI_Type_contiguous (2, PUGH_MPI_REAL4,
                                         &pughGH->PUGH_mpi_complex8));
  CACTUS_MPI_ERROR (MPI_Type_commit (&pughGH->PUGH_mpi_complex8));
#endif
#ifdef CCTK_REAL8
  CACTUS_MPI_ERROR (MPI_Type_contiguous (2, PUGH_MPI_REAL8,
                                         &pughGH->PUGH_mpi_complex16));
  CACTUS_MPI_ERROR (MPI_Type_commit (&pughGH->PUGH_mpi_complex16));
#endif
#ifdef CCTK_REAL16
  CACTUS_MPI_ERROR (MPI_Type_contiguous (2, PUGH_MPI_REAL16,
                                         &pughGH->PUGH_mpi_complex32));
  CACTUS_MPI_ERROR (MPI_Type_commit (&pughGH->PUGH_mpi_complex32));
#endif

#else
  pughGH->nprocs = 1;
  pughGH->myproc = 0;
#endif

  pughGH->dim = dim;

  return (0);
}
Example #17
0
int main(int argc, char *argv[])
{
  PetscErrorCode ierr;
  PetscBool      flg;
  PetscMPIInt    rank, size;
  int            i, status;
  idx_t          ni,isize,*vtxdist, *xadj, *adjncy, *vwgt, *part;
  idx_t          wgtflag=0, numflag=0, ncon=1, ndims=3, edgecut=0;
  idx_t          options[5];
  real_t         *xyz, *tpwgts, ubvec[1];
  MPI_Comm       comm;
  FILE           *fp;
  char           fname[PETSC_MAX_PATH_LEN],prefix[PETSC_MAX_PATH_LEN] = "";

  PetscInitialize(&argc,&argv,NULL,help);
#if defined(PETSC_USE_64BIT_INDICES)
  ierr = PetscPrintf(PETSC_COMM_WORLD,"This example only works with 32 bit indices\n");
  PetscFinalize();  
#endif
  MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
  MPI_Comm_size(PETSC_COMM_WORLD,&size);

  ierr = PetscOptionsBegin(PETSC_COMM_WORLD,NULL,"Parmetis test options","");CHKERRQ(ierr);
  ierr = PetscOptionsString("-prefix","Path and prefix of test file","",prefix,prefix,sizeof(prefix),&flg);CHKERRQ(ierr);
  if (!flg) SETERRQ(PETSC_COMM_WORLD,PETSC_ERR_USER,"Must specify -prefix");CHKERRQ(ierr);
  ierr = PetscOptionsEnd();CHKERRQ(ierr);

  ierr = PetscMalloc1(size+1,&vtxdist);CHKERRQ(ierr);

  ierr = PetscSNPrintf(fname,sizeof(fname),"%s.%d.graph",prefix,rank);CHKERRQ(ierr);

  ierr = PetscFOpen(PETSC_COMM_SELF,fname,"r",&fp);CHKERRQ(ierr);

  fread(vtxdist, sizeof(idx_t), size+1, fp);

  ni = vtxdist[rank+1]-vtxdist[rank];

  ierr = PetscMalloc1(ni+1,&xadj);CHKERRQ(ierr);

  fread(xadj, sizeof(idx_t), ni+1, fp);

  ierr = PetscMalloc1(xadj[ni],&adjncy);CHKERRQ(ierr);

  for (i=0; i<ni; i++) fread(&adjncy[xadj[i]], sizeof(idx_t), xadj[i+1]-xadj[i], fp);

  ierr = PetscFClose(PETSC_COMM_SELF,fp);CHKERRQ(ierr);

  ierr = PetscSNPrintf(fname,sizeof(fname),"%s.%d.graph.xyz",prefix,rank);CHKERRQ(ierr);
  ierr = PetscFOpen(PETSC_COMM_SELF,fname,"r",&fp);CHKERRQ(ierr);

  ierr = PetscMalloc3(ni*ndims,&xyz,ni,&part,size,&tpwgts);CHKERRQ(ierr);

  fread(xyz, sizeof(real_t), ndims*ni, fp);
  ierr = PetscFClose(PETSC_COMM_SELF,fp);CHKERRQ(ierr);

  vwgt = NULL;

  for (i = 0; i < size; i++) tpwgts[i] = 1. / size;
  isize = size;

  ubvec[0]   = 1.05;
  options[0] = 1;
  options[1] = 2;
  options[2] = 15;
  options[3] = 0;
  options[4] = 0;

  ierr   = MPI_Comm_dup(MPI_COMM_WORLD, &comm);CHKERRQ(ierr);
  status = ParMETIS_V3_PartGeomKway(vtxdist, xadj, adjncy, vwgt,
                                    NULL, &wgtflag, &numflag, &ndims, xyz, &ncon, &isize, tpwgts, ubvec,
                                    options, &edgecut, part, &comm);CHKERRQPARMETIS(status);
  ierr = MPI_Comm_free(&comm);CHKERRQ(ierr);

  ierr = PetscFree(vtxdist);CHKERRQ(ierr);
  ierr = PetscFree(xadj);CHKERRQ(ierr);
  ierr = PetscFree(adjncy);CHKERRQ(ierr);
  ierr = PetscFree3(xyz,part,tpwgts);CHKERRQ(ierr);
  PetscFinalize();
  return 0;
}
Example #18
0
int OSPU_Comm_split_node(MPI_Comm oldcomm, MPI_Comm * newcomm)
{
    int rc = MPI_SUCCESS;

#if MPI_VERSION >= 3

    rc = MPI_Comm_split_type(oldcomm, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL, newcomm);
    if (rc!=MPI_SUCCESS) return rc;

#elif defined(MPICH2) && (MPICH2_NUMVERSION>10500000)

    rc = MPIX_Comm_split_type(oldcomm, MPIX_COMM_TYPE_SHARED, 0, MPI_INFO_NULL, newcomm);
    if (rc!=MPI_SUCCESS) return rc;

#else

    /* This code was authored by Jim Dinan */

    char my_name[MPI_MAX_PROCESSOR_NAME];
    MPI_Comm node_comm = MPI_COMM_NULL;
    MPI_Comm parent_comm;
    int len;

    /* Dup so we don't leak communicators */
    rc = MPI_Comm_dup(oldcomm, &parent_comm);
    if (rc!=MPI_SUCCESS) return rc;

    rc = MPI_Get_processor_name(my_name, &len);
    if (rc!=MPI_SUCCESS) return rc;

    while (node_comm == MPI_COMM_NULL)
    {
        char root_name[MPI_MAX_PROCESSOR_NAME];
        int  rank;
        MPI_Comm old_parent;

        rc = MPI_Comm_rank(parent_comm, &rank);
        if (rc!=MPI_SUCCESS) return rc;

        if (rank == 0)
        {
            rc = MPI_Bcast(my_name, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, 0, parent_comm);
            if (rc!=MPI_SUCCESS) return rc;
            strncpy(root_name, my_name, MPI_MAX_PROCESSOR_NAME);
        } 
        else 
        {
            rc = MPI_Bcast(root_name, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, 0, parent_comm);
            if (rc!=MPI_SUCCESS) return rc;
        }

        old_parent = parent_comm;

        if (strncmp(my_name, root_name, MPI_MAX_PROCESSOR_NAME) == 0)
        {
            /* My group splits off, I'm done after this */
            rc = MPI_Comm_split(parent_comm, 1, rank, &node_comm);
            if (rc!=MPI_SUCCESS) return rc;
        }
        else
        {
            /* My group keeps going, separate from the others */
            rc = MPI_Comm_split(parent_comm, 0, rank, &parent_comm);
            if (rc!=MPI_SUCCESS) return rc;
        }

        /* Old parent is no longer needed */
        rc = MPI_Comm_free(&old_parent);
        if (rc!=MPI_SUCCESS) return rc;
    }

    *newcomm = node_comm;

#endif

    return rc = MPI_SUCCESS;
}
Example #19
0
int Zoltan_PHG_Initialize_Params(
  ZZ *zz,   /* the Zoltan data structure */
  float *part_sizes,
  PHGPartParams *hgp
)
{
  int err = ZOLTAN_OK;
  char *yo = "Zoltan_PHG_Initialize_Params";
  int nProc;
  int usePrimeComm;
  MPI_Comm communicator;
  char add_obj_weight[MAX_PARAM_STRING_LEN];
  char edge_weight_op[MAX_PARAM_STRING_LEN];
  char cut_objective[MAX_PARAM_STRING_LEN];
  char *package = hgp->hgraph_pkg; 
  char *method = hgp->hgraph_method;
  char buf[1024];

  memset(hgp, 0, sizeof(*hgp)); /* in the future if we forget to initialize
                                   another param at least it will be 0 */
  
  Zoltan_Bind_Param(PHG_params, "HYPERGRAPH_PACKAGE", &hgp->hgraph_pkg);
  Zoltan_Bind_Param(PHG_params, "PHG_MULTILEVEL", &hgp->useMultilevel);
  Zoltan_Bind_Param(PHG_params, "PHG_FROM_GRAPH_METHOD", hgp->convert_str);  
  Zoltan_Bind_Param(PHG_params, "PHG_OUTPUT_LEVEL", &hgp->output_level);
  Zoltan_Bind_Param(PHG_params, "FINAL_OUTPUT", &hgp->final_output); 
  Zoltan_Bind_Param(PHG_params, "CHECK_GRAPH", &hgp->check_graph);   
  Zoltan_Bind_Param(PHG_params, "CHECK_HYPERGRAPH", &hgp->check_graph);   
  Zoltan_Bind_Param(PHG_params, "PHG_NPROC_VERTEX", &hgp->nProc_x_req);
  Zoltan_Bind_Param(PHG_params, "PHG_NPROC_EDGE", &hgp->nProc_y_req);
  Zoltan_Bind_Param(PHG_params, "PHG_COARSENING_LIMIT", &hgp->redl);
  Zoltan_Bind_Param(PHG_params, "PHG_COARSENING_NCANDIDATE", &hgp->nCand);
  Zoltan_Bind_Param(PHG_params, "PHG_COARSENING_METHOD", hgp->redm_str);
  Zoltan_Bind_Param(PHG_params, "PHG_COARSENING_METHOD_FAST", hgp->redm_fast);
  Zoltan_Bind_Param(PHG_params, "PHG_VERTEX_VISIT_ORDER", &hgp->visit_order);
  Zoltan_Bind_Param(PHG_params, "PHG_EDGE_SCALING", &hgp->edge_scaling);
  Zoltan_Bind_Param(PHG_params, "PHG_VERTEX_SCALING", &hgp->vtx_scaling);
  Zoltan_Bind_Param(PHG_params, "PHG_REFINEMENT_METHOD", hgp->refinement_str);
  Zoltan_Bind_Param(PHG_params, "PHG_DIRECT_KWAY", &hgp->kway);
  Zoltan_Bind_Param(PHG_params, "PHG_REFINEMENT_LOOP_LIMIT", 
                                &hgp->fm_loop_limit);
  Zoltan_Bind_Param(PHG_params, "PHG_REFINEMENT_MAX_NEG_MOVE", 
                                &hgp->fm_max_neg_move);  
  Zoltan_Bind_Param(PHG_params, "PHG_REFINEMENT_QUALITY", 
                                &hgp->refinement_quality);  
  Zoltan_Bind_Param(PHG_params, "PHG_COARSEPARTITION_METHOD", 
                                 hgp->coarsepartition_str);
  Zoltan_Bind_Param(PHG_params, "PHG_USE_TIMERS",
                                 (void*) &hgp->use_timers);  
  Zoltan_Bind_Param(PHG_params, "USE_TIMERS",
                                 (void*) &hgp->use_timers);  
  Zoltan_Bind_Param(PHG_params, "PHG_EDGE_SIZE_THRESHOLD",
                                 (void*) &hgp->EdgeSizeThreshold);  
  Zoltan_Bind_Param(PHG_params, "PHG_MATCH_EDGE_SIZE_THRESHOLD",
                                 (void*) &hgp->MatchEdgeSizeThreshold);  
  Zoltan_Bind_Param(PHG_params, "PHG_BAL_TOL_ADJUSTMENT",
                                 (void*) &hgp->bal_tol_adjustment);  
  Zoltan_Bind_Param(PHG_params, "PARKWAY_SERPART",
                                 (void *) hgp->parkway_serpart);
  Zoltan_Bind_Param(PHG_params, "PHG_CUT_OBJECTIVE",
                                 (void *) &cut_objective);
  Zoltan_Bind_Param(PHG_params, "ADD_OBJ_WEIGHT",
                                 (void *) add_obj_weight);
  Zoltan_Bind_Param(PHG_params, "PHG_EDGE_WEIGHT_OPERATION",
                                 (void *) edge_weight_op);
  Zoltan_Bind_Param(PHG_params, "PHG_RANDOMIZE_INPUT",
                                 (void*) &hgp->RandomizeInitDist);  
  Zoltan_Bind_Param(PHG_params, "PHG_PROCESSOR_REDUCTION_LIMIT",
		                 (void*) &hgp->ProRedL);
  Zoltan_Bind_Param(PHG_params, "PHG_REPART_MULTIPLIER",
		                 (void*) &hgp->RepartMultiplier);
  Zoltan_Bind_Param(PHG_params, "PATOH_ALLOC_POOL0",
                                 (void*) &hgp->patoh_alloc_pool0);
  Zoltan_Bind_Param(PHG_params, "PATOH_ALLOC_POOL1",
                                 (void*) &hgp->patoh_alloc_pool1);
  
  
  /* Set default values */
  strncpy(hgp->hgraph_pkg,           "phg", MAX_PARAM_STRING_LEN);
  strncpy(hgp->convert_str,    "neighbors", MAX_PARAM_STRING_LEN);
  strncpy(hgp->redm_str,             "agg", MAX_PARAM_STRING_LEN);
  hgp->match_array_type = 0;
  strncpy(hgp->redm_fast,          "l-ipm", MAX_PARAM_STRING_LEN);
  strncpy(hgp->coarsepartition_str, "auto", MAX_PARAM_STRING_LEN);
  strncpy(hgp->refinement_str,       "fm2", MAX_PARAM_STRING_LEN);
  strncpy(hgp->parkway_serpart,    "patoh", MAX_PARAM_STRING_LEN);
  strncpy(cut_objective,    "connectivity", MAX_PARAM_STRING_LEN);
  strncpy(add_obj_weight,           "none", MAX_PARAM_STRING_LEN);
  strncpy(edge_weight_op,            "max", MAX_PARAM_STRING_LEN);
  /* LB.Approach is initialized to "REPARTITION", and set in Set_Key_Params  */
  strncpy(hgp->hgraph_method,  zz->LB.Approach, MAX_PARAM_STRING_LEN);
  if (!strcasecmp(zz->LB.Approach,"REFINE")) 
    hgp->useMultilevel = 0;
  else
    hgp->useMultilevel = 1;

  hgp->use_timers = 0;
  hgp->LocalCoarsePartition = 0;
  hgp->edge_scaling = 0;
  hgp->vtx_scaling = 0;
  hgp->vtx_scal_size = 0;
  hgp->vtx_scal = NULL;  /* Array for storing vertex degree scale vector. 
                            Should perhaps go in hg structure, not the
                            param struct? */
  hgp->connectivity_cut = 1; 
  hgp->visit_order = 0;  /* Random */
  hgp->check_graph = 0;
  hgp->bal_tol = zz->LB.Imbalance_Tol[0]; /* Make vector for multiconstraint */
  hgp->bal_tol_adjustment = 0.7;
  hgp->nCand = 100;
  hgp->redl = MAX(2*zz->LB.Num_Global_Parts, 100);
  hgp->output_level = PHG_DEBUG_NONE;
  hgp->final_output = 0;
  hgp->nProc_x_req = -1;
  hgp->nProc_y_req = -1;
  hgp->kway = 0;
  hgp->fm_loop_limit = 10;
  hgp->fm_max_neg_move = 250;  
  hgp->refinement_quality = 1;
  hgp->RandomizeInitDist = 0;
  hgp->EdgeSizeThreshold = 0.25;
  hgp->MatchEdgeSizeThreshold = 500;  
  hgp->hybrid_keep_factor = 0.;
  hgp->ProRedL = 0.0; /* UVCUVC: CHECK default set to 0 until we run more experiments */
  hgp->RepartMultiplier = 100.;
  hgp->patoh_alloc_pool0 = 0;
  hgp->patoh_alloc_pool1 = 0;
  hgp->UseFixedVtx = 0;
  hgp->UsePrefPart = 0;
  
  /* Get application values of parameters. */
  err = Zoltan_Assign_Param_Vals(zz->Params, PHG_params, zz->Debug_Level, 
          zz->Proc, zz->Debug_Proc);
  
  nProc = zz->Num_Proc;
  usePrimeComm = 0;

  /* Parse add_obj_weight parameter */

  if (!strcasecmp(add_obj_weight, "none")) {
    hgp->add_obj_weight = PHG_ADD_NO_WEIGHT;
    hgp->part_sizes = part_sizes;
  }
  else if (zz->Obj_Weight_Dim > 0) {
    /* Do not add_obj_weight until multiconstraint PHG is implemented */
    ZOLTAN_PRINT_WARN(zz->Proc, yo,
     "Both application supplied *and* ADD_OBJ_WEIGHT "
     "calculated vertex weights were provided.");
    ZOLTAN_PRINT_WARN(zz->Proc, yo,
      "Only the first application supplied weight per vertex will be used.");
    hgp->add_obj_weight = PHG_ADD_NO_WEIGHT;
    hgp->part_sizes = part_sizes;
  } 
  else {
    if (!strcasecmp(add_obj_weight, "vertices")){
      hgp->add_obj_weight = PHG_ADD_UNIT_WEIGHT;
    } else if (!strcasecmp(add_obj_weight, "unit")){
      hgp->add_obj_weight = PHG_ADD_UNIT_WEIGHT;
    } else if (!strcasecmp(add_obj_weight, "vertex degree")){
      hgp->add_obj_weight = PHG_ADD_PINS_WEIGHT;
    } else if (!strcasecmp(add_obj_weight, "nonzeros")){
      hgp->add_obj_weight = PHG_ADD_PINS_WEIGHT;
    } else if (!strcasecmp(add_obj_weight, "pins")){
      hgp->add_obj_weight = PHG_ADD_PINS_WEIGHT;
    } else{
      ZOLTAN_PRINT_ERROR(zz->Proc, yo, "Invalid ADD_OBJ_WEIGHT parameter.\n");
      err = ZOLTAN_WARN;
    }
    /* Set hgp->part_sizes to new array of part_sizes with added obj weight. */
    if (part_sizes)
      err = Zoltan_LB_Add_Part_Sizes_Weight(zz, 
                          (zz->Obj_Weight_Dim ? zz->Obj_Weight_Dim : 1), 
                          zz->Obj_Weight_Dim+1, 
                          part_sizes, &(hgp->part_sizes));
  }

  if ((zz->Obj_Weight_Dim==0) &&      /* no application supplied weights */
      (hgp->add_obj_weight==PHG_ADD_NO_WEIGHT)){ /* no calculated weight */

    hgp->add_obj_weight = PHG_ADD_UNIT_WEIGHT; /* default object weight */
  }

  if (!strcasecmp(cut_objective, "default")
      || !strcasecmp(cut_objective, "connectivity"))
      hgp->connectivity_cut = 1;
  else if (!strcasecmp(cut_objective, "hyperedges"))
      hgp->connectivity_cut = 0;
  else {
      ZOLTAN_PRINT_ERROR(zz->Proc, yo, "Invalid PHG_CUT_OBJECTIVE parameter.\n");
      goto End;
  }

  if (!strcasecmp(edge_weight_op, "max")){
    hgp->edge_weight_op = PHG_MAX_EDGE_WEIGHTS;
  } else if (!strcasecmp(edge_weight_op, "add")){
    hgp->edge_weight_op = PHG_ADD_EDGE_WEIGHTS;
  } else if (!strcasecmp(edge_weight_op, "error")){
    hgp->edge_weight_op = PHG_FLAG_ERROR_EDGE_WEIGHTS;
  } else{
    ZOLTAN_PRINT_ERROR(zz->Proc, yo,
      "Invalid PHG_EDGE_WEIGHT_OPERATION parameter.\n");
    err = ZOLTAN_WARN;
  }

  if ((strcasecmp(method, "PARTITION")) &&
      (strcasecmp(method, "REPARTITION")) &&
      (strcasecmp(method, "REFINE"))) {
    sprintf(buf,"%s is not a valid hypergraph method\n",method);
    ZOLTAN_PRINT_ERROR (zz->Proc, yo, buf);
    err = ZOLTAN_FATAL;
    goto End;
  }

  /* Adjust refinement parameters using hgp->refinement_quality */
  if (hgp->refinement_quality < 0.5/hgp->fm_loop_limit) 
    /* No refinement */
    strncpy(hgp->refinement_str,      "no",   MAX_PARAM_STRING_LEN);
  else {
    /* Scale FM parameters */
    hgp->fm_loop_limit   *= hgp->refinement_quality;
    hgp->fm_max_neg_move *= hgp->refinement_quality;
  }

  if (!strcasecmp(package, "PHG")){
    /* Test to determine whether we should change the number of processors
       used for partitioning to make more efficient 2D decomposition */

    if (hgp->nProc_x_req != 1 && hgp->nProc_y_req != 1)  /* Want 2D decomp */
      if (zz->Num_Proc > SMALL_PRIME && Zoltan_PHG_isPrime(zz->Num_Proc)) 
        /* 2D data decomposition is requested but we have a prime 
         * number of processors. */
        usePrimeComm = 1;

    if ((!strcasecmp(method, "REPARTITION"))){
        zz->LB.Remap_Flag = 0;
    }

    if ((!strcasecmp(method, "REPARTITION")) ||
        (!strcasecmp(method, "REFINE"))) {
        hgp->fm_loop_limit = 4; /* experimental evaluation showed that for
                                repartitioning/refinement small number of passes
                                is "good enough". These are all heuristics hence
                                it is possible to create a pathological cases; 
                                but in general this seems to be sufficient */
    }
    
    if (!hgp->useMultilevel) {
        /* don't do coarsening */
        strncpy(hgp->redm_str, "no", MAX_PARAM_STRING_LEN);

        /* we have modified all coarse partitioners to handle preferred part
           if user wants to choose one she can choose; otherwise default 
           partitioner
           (greedy growing) does work better than previous default partitioning
           for phg_refine ("no"). */        
        hgp->UsePrefPart = 1;

    }
    if (!strcasecmp(method, "REFINE") && hgp->useMultilevel){
        /* UVCUVC: as a heuristic we prefer local matching;
           in our experiments for IPDPS'07 and WileyChapter multilevel_refine
           didn't prove itself useful; it is too costly even with local matching
           hence it will not be be released yet (i.e. not in v3). */
        strncpy(hgp->redm_str, "l-ipm", MAX_PARAM_STRING_LEN);                
        hgp->UsePrefPart = 1;
    }    
  }
  else if (!strcasecmp(package, "PARKWAY")){
    if (hgp->nProc_x_req>1) {
      err = ZOLTAN_FATAL;
      ZOLTAN_PRINT_ERROR(zz->Proc, yo, "ParKway requires nProc_x=1 or -1.");
      goto End;
    }
    hgp->nProc_x_req = 1;
  } 
  else if (!strcasecmp(package, "PATOH")){
    if (zz->Num_Proc>1) {
      err = ZOLTAN_FATAL;
      ZOLTAN_PRINT_ERROR(zz->Proc, yo, "PaToH only works with Num_Proc=1.");
      goto End;
    }
  }

  if (!usePrimeComm)
    MPI_Comm_dup(zz->Communicator, &communicator);
  else {
    MPI_Group newgrp, zzgrp;
    nProc--;
    MPI_Comm_group(zz->Communicator, &zzgrp);
    MPI_Group_excl(zzgrp, 1, &nProc, &newgrp);
    MPI_Comm_create(zz->Communicator, newgrp, &communicator);
    MPI_Group_free(&newgrp);
    MPI_Group_free(&zzgrp);
  }

  err = Zoltan_PHG_Set_2D_Proc_Distrib(zz, communicator, zz->Proc, 
                                       nProc, hgp->nProc_x_req, 
                                       hgp->nProc_y_req, 
                                       &hgp->globalcomm);
  if (err != ZOLTAN_OK) 
    goto End;

  /* Convert strings to function pointers. */
  err = Zoltan_PHG_Set_Part_Options (zz, hgp);
  
End:
  return err;
}
static plan *mkplan(const solver *ego_, const problem *p_, planner *plnr)
{
     const S *ego = (const S *) ego_;
     const problem_mpi_transpose *p;
     P *pln;
     plan *cld1 = 0, *cld2 = 0, *cld2rest = 0, *cld3 = 0;
     INT b, bt, vn, rest_Ioff, rest_Ooff;
     INT *sbs, *sbo, *rbs, *rbo;
     int pe, my_pe, n_pes, sort_pe = -1, ascending = 1;
     R *I, *O;
     static const plan_adt padt = {
          XM(transpose_solve), awake, print, destroy
     };

     UNUSED(ego);

     if (!applicable(ego, p_, plnr))
          return (plan *) 0;

     p = (const problem_mpi_transpose *) p_;
     vn = p->vn;
     I = p->I; O = p->O;

     MPI_Comm_rank(p->comm, &my_pe);
     MPI_Comm_size(p->comm, &n_pes);

     b = XM(block)(p->nx, p->block, my_pe);
     
     if (!(p->flags & TRANSPOSED_IN)) { /* b x ny x vn -> ny x b x vn */
	  cld1 = X(mkplan_f_d)(plnr, 
			       X(mkproblem_rdft_0_d)(X(mktensor_3d)
						     (b, p->ny * vn, vn,
						      p->ny, vn, b * vn,
						      vn, 1, 1),
						     I, O),
			       0, 0, NO_SLOW);
	  if (XM(any_true)(!cld1, p->comm)) goto nada;
     }
     if (ego->preserve_input || NO_DESTROY_INPUTP(plnr)) I = O;

     if (XM(any_true)(!XM(mkplans_posttranspose)(p, plnr, I, O, my_pe,
						 &cld2, &cld2rest, &cld3,
						 &rest_Ioff, &rest_Ooff),
		      p->comm)) goto nada;

     pln = MKPLAN_MPI_TRANSPOSE(P, &padt, apply);

     pln->cld1 = cld1;
     pln->cld2 = cld2;
     pln->cld2rest = cld2rest;
     pln->rest_Ioff = rest_Ioff;
     pln->rest_Ooff = rest_Ooff;
     pln->cld3 = cld3;
     pln->preserve_input = ego->preserve_input ? 2 : NO_DESTROY_INPUTP(plnr);

     MPI_Comm_dup(p->comm, &pln->comm);

     n_pes = (int) X(imax)(XM(num_blocks)(p->nx, p->block),
			   XM(num_blocks)(p->ny, p->tblock));

     /* Compute sizes/offsets of blocks to exchange between processors */
     sbs = (INT *) MALLOC(4 * n_pes * sizeof(INT), PLANS);
     sbo = sbs + n_pes;
     rbs = sbo + n_pes;
     rbo = rbs + n_pes;
     b = XM(block)(p->nx, p->block, my_pe);
     bt = XM(block)(p->ny, p->tblock, my_pe);
     for (pe = 0; pe < n_pes; ++pe) {
	  INT db, dbt; /* destination block sizes */
	  db = XM(block)(p->nx, p->block, pe);
	  dbt = XM(block)(p->ny, p->tblock, pe);

	  sbs[pe] = b * dbt * vn;
	  sbo[pe] = pe * (b * p->tblock) * vn;
	  rbs[pe] = db * bt * vn;
	  rbo[pe] = pe * (p->block * bt) * vn;

	  if (db * dbt > 0 && db * p->tblock != p->block * dbt) {
	       A(sort_pe == -1); /* only one process should need sorting */
	       sort_pe = pe;
	       ascending = db * p->tblock > p->block * dbt;
	  }
     }
     pln->n_pes = n_pes;
     pln->my_pe = my_pe;
     pln->send_block_sizes = sbs;
     pln->send_block_offsets = sbo;
     pln->recv_block_sizes = rbs;
     pln->recv_block_offsets = rbo;

     if (my_pe >= n_pes) {
	  pln->sched = 0; /* this process is not doing anything */
     }
     else {
	  pln->sched = (int *) MALLOC(n_pes * sizeof(int), PLANS);
	  fill1_comm_sched(pln->sched, my_pe, n_pes);
	  if (sort_pe >= 0)
	       sort1_comm_sched(pln->sched, n_pes, sort_pe, ascending);
     }

     X(ops_zero)(&pln->super.super.ops);
     if (cld1) X(ops_add2)(&cld1->ops, &pln->super.super.ops);
     if (cld2) X(ops_add2)(&cld2->ops, &pln->super.super.ops);
     if (cld2rest) X(ops_add2)(&cld2rest->ops, &pln->super.super.ops);
     if (cld3) X(ops_add2)(&cld3->ops, &pln->super.super.ops);
     /* FIXME: should MPI operations be counted in "other" somehow? */

     return &(pln->super.super);

 nada:
     X(plan_destroy_internal)(cld3);
     X(plan_destroy_internal)(cld2rest);
     X(plan_destroy_internal)(cld2);
     X(plan_destroy_internal)(cld1);
     return (plan *) 0;
}
Example #21
0
/**
 * Create a child group for to the given group.
 *
 * @param[in] n #procs in this group (<= that in group_parent)
 * @param[in] pid_list The list of proc ids (w.r.t. group_parent)
 * @param[out] id_child Handle to store the created group
 * @param[in] id_parent Parent group 
 */
void ARMCI_Group_create_child(
        int n, int *pid_list, ARMCI_Group *id_child, ARMCI_Group *id_parent)
{
    int status;
    int grp_me;
    ARMCI_iGroup *igroup_child = NULL;
    MPI_Group    *group_child = NULL;
    MPI_Comm     *comm_child = NULL;
    ARMCI_iGroup *igroup_parent = NULL;
    MPI_Group    *group_parent = NULL;
    MPI_Comm     *comm_parent = NULL;

    /* create the node in the linked list of groups and */
    /* get the child's MPI_Group and MPI_Comm, to be populated shortly */
    armci_create_group_and_igroup(id_child, &igroup_child);
    group_child = &(igroup_child->group);
    comm_child  = &(igroup_child->comm);

    /* get the parent's MPI_Group and MPI_Comm */
    igroup_parent = armci_get_igroup_from_group(id_parent);
    group_parent = &(igroup_parent->group);
    comm_parent  = &(igroup_parent->comm);

    status = MPI_Group_incl(*group_parent, n, pid_list, group_child);
    if (status != MPI_SUCCESS) {
        armci_die("MPI_Group_incl: Failed ", status);
    }

    {
        MPI_Comm comm, comm1, comm2;
        int lvl=1, local_ldr_pos;
        MPI_Group_rank(*group_child, &grp_me);
        if (grp_me == MPI_UNDEFINED) {
            *comm_child = MPI_COMM_NULL;
            /* FIXME: keeping the group around for now */
            return;
        }
        /* SK: sanity check for the following bitwise operations */
        assert(grp_me>=0);
        MPI_Comm_dup(MPI_COMM_SELF, &comm); /* FIXME: can be optimized away */
        local_ldr_pos = grp_me;
        while(n>lvl) {
            int tag=0;
            int remote_ldr_pos = local_ldr_pos^lvl;
            if (remote_ldr_pos < n) {
                int remote_leader = pid_list[remote_ldr_pos];
                MPI_Comm peer_comm = *comm_parent;
                int high = (local_ldr_pos<remote_ldr_pos)?0:1;
                MPI_Intercomm_create(
                        comm, 0, peer_comm, remote_leader, tag, &comm1);
                MPI_Comm_free(&comm);
                MPI_Intercomm_merge(comm1, high, &comm2);
                MPI_Comm_free(&comm1);
                comm = comm2;
            }
            local_ldr_pos &= ((~0)^lvl);
            lvl<<=1;
        }
        *comm_child = comm;
        /* cleanup temporary group (from MPI_Group_incl above) */
        MPI_Group_free(group_child);
        /* get the actual group associated with comm */
        MPI_Comm_group(*comm_child, group_child);
    }
}
Example #22
0
void comm_actor_duplication(void) {
    MPI_Comm comm_actor = MPI_COMM_NULL;
    MPI_Comm comm_actor_dup = MPI_COMM_NULL;
    MPI_Comm comm_actor_dup_dup = MPI_COMM_NULL;

    MPI_Datatype test_actor_type = gen_test_actor_type();
    MPI_Datatype test_receptionist_type = gen_test_receptionist_type();

    int num_actor_types = 1;
    MPI_Datatype actor_type_list[] = { test_actor_type };

    int test_num_actor_types = 0;
    MPI_Datatype test_actor_type_list[num_actor_types];
    MPI_Datatype test_test_receptionist_type = MPI_DATATYPE_NULL;

    int err;


    /* Create the actor communicator */
    MPI_Actor_create(
        MPI_COMM_WORLD, num_actor_types,
        actor_type_list, test_receptionist_type,
        &comm_actor
    );


    /* Create a duplicate of the original */
    err = MPI_Comm_dup(comm_actor, &comm_actor_dup);
    require_true(
        "MPI_Comm_dup should return MPI_SUCCESS for comm_actor_dup",
        MPI_COMM_WORLD,
        err == MPI_SUCCESS
    );


    test_num_actor_types = 0;
    err = MPI_Actor_get_num(comm_actor_dup, &test_num_actor_types);
    require_true(
        "MPI_Actor_get_num return value should be MPI_SUCCESS"
        " for comm_actor_dup",
        MPI_COMM_WORLD,
        err == MPI_SUCCESS
    );
    require_true(
        "MPI_Actor_get_num should return num_actor_types"
        " for comm_actor_dup",
        MPI_COMM_WORLD,
        test_num_actor_types == num_actor_types
    );

    test_actor_type_list[0] = MPI_DATATYPE_NULL;
    test_test_receptionist_type = MPI_DATATYPE_NULL;
    err = MPI_Actor_get(
        comm_actor_dup, num_actor_types, test_actor_type_list,
        &test_test_receptionist_type
    );
    require_true(
        "MPI_Actor_get should return MPI_SUCCESS for comm_actor_dup",
        MPI_COMM_WORLD,
        err == MPI_SUCCESS
    );

    require_true(
        "MPI_Actor_get should return the test_actor_type for comm_actor_dup",
        MPI_COMM_WORLD,
        test_actor_type_list[0] == test_actor_type
    );

    require_true(
        "MPI_Actor_get should return the test_receptionist_type"
        " for comm_actor_dup",
        MPI_COMM_WORLD,
        test_test_receptionist_type == test_receptionist_type
    );


    /* Free the original communicator, and tests should still pass */
    MPI_Comm_free(&comm_actor);


    test_num_actor_types = 0;
    err = MPI_Actor_get_num(comm_actor_dup, &test_num_actor_types);
    require_true(
        "MPI_Actor_get_num return value should be MPI_SUCCESS"
        " for comm_actor_dup after free",
        MPI_COMM_WORLD,
        err == MPI_SUCCESS
    );
    require_true(
        "MPI_Actor_get_num should return num_actor_types for comm_actor_dup"
        " after free",
        MPI_COMM_WORLD,
        test_num_actor_types == num_actor_types
    );

    test_actor_type_list[0] = MPI_DATATYPE_NULL;
    test_test_receptionist_type = MPI_DATATYPE_NULL;
    err = MPI_Actor_get(
        comm_actor_dup, num_actor_types, test_actor_type_list,
        &test_test_receptionist_type
    );
    require_true(
        "MPI_Actor_get should return MPI_SUCCESS"
        " for comm_actor_dup after free",
        MPI_COMM_WORLD,
        err == MPI_SUCCESS
    );

    require_true(
        "MPI_Actor_get should return the test_actor_type for comm_actor_dup"
        " after free",
        MPI_COMM_WORLD,
        test_actor_type_list[0] == test_actor_type
    );

    require_true(
        "MPI_Actor_get should return the test_receptionist_type"
        " for comm_actor_dup after free",
        MPI_COMM_WORLD,
        test_test_receptionist_type == test_receptionist_type
    );


    /* Create a duplicate of the duplicate */
    MPI_Comm_dup(comm_actor_dup, &comm_actor_dup_dup);
    require_true(
        "MPI_Comm_dup should return MPI_SUCCESS for comm_actor_dup_dup",
        MPI_COMM_WORLD,
        err == MPI_SUCCESS
    );

    /* All previous tests should still pass */

    test_num_actor_types = 0;
    err = MPI_Actor_get_num(comm_actor_dup_dup, &test_num_actor_types);
    require_true(
        "MPI_Actor_get_num return value should be MPI_SUCCESS"
        " for comm_actor_dup_dup",
        MPI_COMM_WORLD,
        err == MPI_SUCCESS
    );
    require_true(
        "MPI_Actor_get_num should return num_actor_types"
        " for comm_actor_dup_dup",
        MPI_COMM_WORLD,
        test_num_actor_types == num_actor_types
    );

    test_actor_type_list[0] = MPI_DATATYPE_NULL;
    test_test_receptionist_type = MPI_DATATYPE_NULL;
    err = MPI_Actor_get(
        comm_actor_dup_dup, num_actor_types, test_actor_type_list,
        &test_test_receptionist_type
    );
    require_true(
        "MPI_Actor_get should return MPI_SUCCESS for comm_actor_dup_dup",
        MPI_COMM_WORLD,
        err == MPI_SUCCESS
    );

    require_true(
        "MPI_Actor_get should return the test_actor_type for"
        " comm_actor_dup_dup",
        MPI_COMM_WORLD,
        test_actor_type_list[0] == test_actor_type
    );

    require_true(
        "MPI_Actor_get should return the test_receptionist_type for"
        " comm_actor_dup_dup",
        MPI_COMM_WORLD,
        test_test_receptionist_type == test_receptionist_type
    );


    /* Free the first duplicate communicator, and tests should still pass */
    MPI_Comm_free(&comm_actor_dup);

    test_num_actor_types = 0;
    err = MPI_Actor_get_num(comm_actor_dup_dup, &test_num_actor_types);
    require_true(
        "MPI_Actor_get_num return value should be MPI_SUCCESS"
        " for comm_actor_dup_dup after free",
        MPI_COMM_WORLD,
        err == MPI_SUCCESS
    );
    require_true(
        "MPI_Actor_get_num should return num_actor_types"
        " for comm_actor_dup_dup after free",
        MPI_COMM_WORLD,
        test_num_actor_types == num_actor_types
    );

    test_actor_type_list[0] = MPI_DATATYPE_NULL;
    test_test_receptionist_type = MPI_DATATYPE_NULL;
    err = MPI_Actor_get(
        comm_actor_dup_dup, num_actor_types, test_actor_type_list,
        &test_test_receptionist_type
    );
    require_true(
        "MPI_Actor_get should return MPI_SUCCESS for comm_actor_dup_dup"
        " after free",
        MPI_COMM_WORLD,
        err == MPI_SUCCESS
    );

    require_true(
        "MPI_Actor_get should return the test_actor_type for"
        " comm_actor_dup_dup after free",
        MPI_COMM_WORLD,
        test_actor_type_list[0] == test_actor_type
    );

    require_true(
        "MPI_Actor_get should return the test_receptionist_type for"
        " comm_actor_dup_dup after free",
        MPI_COMM_WORLD,
        test_test_receptionist_type == test_receptionist_type
    );

    /* Free the dup dup communicator */
    MPI_Comm_free(&comm_actor_dup_dup);

}
Example #23
0
/*MC
      TSSUNDIALS - ODE solver using the LLNL CVODE/SUNDIALS package (now called SUNDIALS)

   Options Database:
+    -ts_sundials_type <bdf,adams>
.    -ts_sundials_gramschmidt_type <modified, classical> - type of orthogonalization inside GMRES
.    -ts_sundials_atol <tol> - Absolute tolerance for convergence
.    -ts_sundials_rtol <tol> - Relative tolerance for convergence
.    -ts_sundials_linear_tolerance <tol> 
.    -ts_sundials_gmres_restart <restart> - Number of GMRES orthogonalization directions
.    -ts_sundials_exact_final_time - Interpolate output to stop exactly at the final time
-    -ts_sundials_monitor_steps - Monitor SUNDIALS internel steps


    Notes: This uses its own nonlinear solver and Krylov method so PETSc SNES and KSP options do not apply
           only PETSc PC options

    Level: beginner

.seealso:  TSCreate(), TS, TSSetType(), TSSundialsSetType(), TSSundialsSetGMRESRestart(), TSSundialsSetLinearTolerance(),
           TSSundialsSetGramSchmidtType(), TSSundialsSetTolerance(), TSSundialsGetPC(), TSSundialsGetIterations(), TSSundialsSetExactFinalTime()

M*/
EXTERN_C_BEGIN
#undef __FUNCT__  
#define __FUNCT__ "TSCreate_Sundials"
PetscErrorCode PETSCTS_DLLEXPORT TSCreate_Sundials(TS ts)
{
  TS_Sundials *cvode;
  PetscErrorCode ierr;

  PetscFunctionBegin;
  if (ts->problem_type != TS_NONLINEAR) {
    SETERRQ(PETSC_ERR_SUP,"Only support for nonlinear problems");
  }
  ts->ops->destroy        = TSDestroy_Sundials;
  ts->ops->view           = TSView_Sundials;
  ts->ops->setup          = TSSetUp_Sundials_Nonlinear;  
  ts->ops->step           = TSStep_Sundials_Nonlinear;
  ts->ops->setfromoptions = TSSetFromOptions_Sundials_Nonlinear;

  ierr = PetscNewLog(ts,TS_Sundials,&cvode);CHKERRQ(ierr);
  ierr = PCCreate(((PetscObject)ts)->comm,&cvode->pc);CHKERRQ(ierr);
  ierr = PetscLogObjectParent(ts,cvode->pc);CHKERRQ(ierr);
  ts->data          = (void*)cvode;
  cvode->cvode_type = SUNDIALS_BDF;
  cvode->gtype      = SUNDIALS_CLASSICAL_GS;
  cvode->restart    = 5;
  cvode->linear_tol = .05;

  cvode->exact_final_time = PETSC_TRUE;
  cvode->monitorstep      = PETSC_FALSE;

  ierr = MPI_Comm_dup(((PetscObject)ts)->comm,&(cvode->comm_sundials));CHKERRQ(ierr);
  /* set tolerance for Sundials */
  cvode->reltol = 1e-6;
  cvode->abstol = 1e-6;

  ierr = PetscObjectComposeFunctionDynamic((PetscObject)ts,"TSSundialsSetType_C","TSSundialsSetType_Sundials",
                    TSSundialsSetType_Sundials);CHKERRQ(ierr);
  ierr = PetscObjectComposeFunctionDynamic((PetscObject)ts,"TSSundialsSetGMRESRestart_C",
                    "TSSundialsSetGMRESRestart_Sundials",
                    TSSundialsSetGMRESRestart_Sundials);CHKERRQ(ierr);
  ierr = PetscObjectComposeFunctionDynamic((PetscObject)ts,"TSSundialsSetLinearTolerance_C",
                    "TSSundialsSetLinearTolerance_Sundials",
                     TSSundialsSetLinearTolerance_Sundials);CHKERRQ(ierr);
  ierr = PetscObjectComposeFunctionDynamic((PetscObject)ts,"TSSundialsSetGramSchmidtType_C",
                    "TSSundialsSetGramSchmidtType_Sundials",
                     TSSundialsSetGramSchmidtType_Sundials);CHKERRQ(ierr);
  ierr = PetscObjectComposeFunctionDynamic((PetscObject)ts,"TSSundialsSetTolerance_C",
                    "TSSundialsSetTolerance_Sundials",
                     TSSundialsSetTolerance_Sundials);CHKERRQ(ierr);
  ierr = PetscObjectComposeFunctionDynamic((PetscObject)ts,"TSSundialsGetPC_C",
                    "TSSundialsGetPC_Sundials",
                     TSSundialsGetPC_Sundials);CHKERRQ(ierr);
  ierr = PetscObjectComposeFunctionDynamic((PetscObject)ts,"TSSundialsGetIterations_C",
                    "TSSundialsGetIterations_Sundials",
                     TSSundialsGetIterations_Sundials);CHKERRQ(ierr);
  ierr = PetscObjectComposeFunctionDynamic((PetscObject)ts,"TSSundialsSetExactFinalTime_C",
                    "TSSundialsSetExactFinalTime_Sundials",
                     TSSundialsSetExactFinalTime_Sundials);CHKERRQ(ierr);
  ierr = PetscObjectComposeFunctionDynamic((PetscObject)ts,"TSSundialsMonitorInternalSteps_C",
                    "TSSundialsMonitorInternalSteps_Sundials",
                     TSSundialsMonitorInternalSteps_Sundials);CHKERRQ(ierr);

  PetscFunctionReturn(0);
}
Example #24
0
/************************************************************************* 
 * method: initpympi
 * This is called right after python has been initialized.  MPI has already
 * been initialized here 
 * ************************************************************************/
void initpympi() 
{
    PyObject* mpiName = 0;
    char versionString[32];
    PyObject* lastWish = 0;
    int version;
    int subversion;
    PyObject* pickleModule = 0;
    PyObject* pickleDict = 0;
    PyObject* docString = 0;
    PyObject* pyWorld = 0;
    PyObject* member = 0;
    PyMethodDef* methodPtr = 0;
    char* docExtra = 0;
    int myRank = 0;
    int result = MPI_Comm_rank(MPI_COMM_WORLD, &myRank );

    /* ----------------------------------------------- */
    /* The IBM poe environment is brain dead           */
    /* ----------------------------------------------- */
#ifdef _AIX
    Py_InteractiveFlag++;
#endif

    /* ----------------------------------------------- */
    /* Cover our butts on assumptions                  */
    /* ----------------------------------------------- */
    Assert( sizeof(MPI_Comm) <= sizeof(long) );

    /* ----------------------------------------------- */
    /* We subvert the input stream to handle broadcast */
    /* ----------------------------------------------- */
    Original_ReadlineFunctionPointer = PyOS_ReadlineFunctionPointer;
    if ( !Original_ReadlineFunctionPointer ) {
        Original_ReadlineFunctionPointer = PyOS_StdioReadline;
    }

    PyOS_ReadlineFunctionPointer = MPI_ReadlineFunctionPointer;

    /* ----------------------------------------------- */
    /* Setup the initial mpi module                    */
    /* ----------------------------------------------- */
    module = Py_InitModule("mpi",MPI_methods);
    Assert(module);
    PyMPI_dictionary = PyModule_GetDict(module);
    Assert(PyMPI_dictionary);

    /* ----------------------------------------------- */
    /* Set up a docstring for the mpi module itself    */
    /* ----------------------------------------------- */
    docExtra = DocStringFromMethods(MPI_methods,"mpi\n\nBasic mpi calls\n\n");
    Assert(docExtra);
    docString = PyString_FromString(docExtra);
    free(docExtra);

    /* ----------------------------------------------- */
    /* We start off with errors handled with flag      */
    /* ----------------------------------------------- */
    if ( MPI_Errhandler_set(MPI_COMM_WORLD,MPI_ERRORS_RETURN)
            != MPI_SUCCESS ) {
        PYCHECK( PyErr_SetString(PyExc_SystemError,"MPI Failure -- MPI_Errhandler_set()") );
    }

    /* ----------------------------------------------- */
    /* See if we conform!                              */
    /* ----------------------------------------------- */
    MPICHECKCOMMLESS( MPI_Get_version(&version,&subversion) );
    Assert(version == MPI_VERSION && subversion == MPI_SUBVERSION);

    /* ----------------------------------------------- */
    /* We have some cleanup work to do on exit         */
    /* ----------------------------------------------- */
    PYCHECK( lastWish = PyCFunction_New(&lastwishMethods,module) );
    Assert(lastWish);
    sysExitfunc = PySys_GetObject("exitfunc");
    PyErr_Clear();
    PYCHECK( PySys_SetObject("exitfunc",lastWish) );

    /* ----------------------------------------------- */
    /* Set common attributes                           */
    /* ----------------------------------------------- */
    PYCHECK( PyDict_SetItemString(PyMPI_dictionary,"stdout",Py_None) );
    Py_INCREF(Py_None);

    PYCHECK( PyString_ConcatFromString(&docString,"name: Name of MPI model (MPICH, LAM, mpi)") );
#ifdef MPICH_NAME
    PYCHECK( mpiName = PyString_FromString("MPICH") );
#else
# ifdef LAM_MPI
    PYCHECK( mpiName = PyString_FromString("LAM") );
# else
    PYCHECK( mpiName = PyString_FromString("mpi") );
# endif
#endif
    PYCHECK( PyDict_SetItemString(PyMPI_dictionary,"name",mpiName) );

    PYCHECK( PyString_ConcatFromString(&docString,"rank: Rank of MPI_COMM_WORLD communicator\n") );
    MPICHECK( MPI_COMM_WORLD,
            MPI_Comm_rank(MPI_COMM_WORLD,&worldRank));
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"rank",
                PyInt_FromLong((long)worldRank)));

    PYCHECK(PyString_ConcatFromString(&docString,"procs: Size of MPI_COMM_WORLD communicator\n"));
    MPICHECK( MPI_COMM_WORLD,
            MPI_Comm_size(MPI_COMM_WORLD,&worldProcs));
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"procs",
                PyInt_FromLong((long)worldProcs)));

    PYCHECK(PyString_ConcatFromString(&docString,"tick: Tick size of high-resolution timer\n"));
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"tick",
                PyFloat_FromDouble(MPI_Wtick())));

    PYCHECK(PyString_ConcatFromString(&docString,"version: String showing mpi version\n"));
    sprintf(versionString,"%d.%d",MPI_VERSION,MPI_SUBVERSION);
#if defined(MPICH_NAME) && MPI_VERSION == 1 && MPI_SUBVERSION == 1 && MPI_STATUS_SIZE == 5
    strcat(versionString,".2"); /* MPICH 1.1.2 is evil */
#endif
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"version",
                PyString_FromString(versionString)));

    PYCHECK(PyString_ConcatFromString(&docString,"COMM_WORLD: MPI_COMM_WORLD communicator\n"));
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"COMM_WORLD",
                PyMPI_Comm(MPI_COMM_WORLD)));

    PYCHECK(PyString_ConcatFromString(&docString,"COMM_NULL: MPI_COMM_NULL communicator (non-functional)\n"));
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"COMM_NULL",
                PyMPI_Comm(MPI_COMM_NULL)));

    PYCHECK(PyString_ConcatFromString(&docString,"MAX: MPI_MAX\n"));
    /*PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"MAX",
      PyInt_FromLong((long)MPI_MAX)));*/
    reduceOpLookup[eMaxOp] = MPI_MAX;
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"MAX",
                PyInt_FromLong((long)eMaxOp)));

    PYCHECK(PyString_ConcatFromString(&docString,"MIN: MPI_MIN\n"));

    /*PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"MIN",
      PyInt_FromLong((long)MPI_MIN)));*/
    reduceOpLookup[eMinOp] = MPI_MIN;
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"MIN",
                PyInt_FromLong((long)eMinOp)));

    PYCHECK(   PyString_ConcatFromString(&docString,"SUM: MPI_SUM\n"));
    /*PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"SUM",
      PyInt_FromLong((long)MPI_SUM)));*/
    reduceOpLookup[eSumOp] = MPI_SUM;
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"SUM",
                PyInt_FromLong((long)eSumOp)));

    PYCHECK(   PyString_ConcatFromString(&docString,"PROD: MPI_PROD\n"));
    /*PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"PROD",
      PyInt_FromLong((long)MPI_PROD)));*/
    reduceOpLookup[eProdOp] = MPI_PROD;
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"PROD",
                PyInt_FromLong((long)eProdOp)));

    PYCHECK(   PyString_ConcatFromString(&docString,"LAND: MPI_LAND\n"));
    /*PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"LAND",
      PyInt_FromLong((long)MPI_LAND)));*/
    reduceOpLookup[eLandOp] = MPI_LAND;
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"LAND",
                PyInt_FromLong((long)eLandOp)));

    PYCHECK(   PyString_ConcatFromString(&docString,"BAND: MPI_BAND\n"));
    /*PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"BAND",
      PyInt_FromLong((long)MPI_BAND)));*/
    reduceOpLookup[eBandOp] = MPI_BAND;
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"BAND",
                PyInt_FromLong((long)eBandOp)));

    PYCHECK(   PyString_ConcatFromString(&docString,"LOR: MPI_LOR\n"));
    /*PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"LOR",
      PyInt_FromLong((long)MPI_LOR)));*/
    reduceOpLookup[eLorOp] = MPI_LOR;
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"LOR",
                PyInt_FromLong((long)eLorOp)));

    PYCHECK(   PyString_ConcatFromString(&docString,"BOR: MPI_BOR\n"));
    /*   PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"BOR",
         PyInt_FromLong((long)MPI_BOR)));*/
    reduceOpLookup[eBorOp] = MPI_BOR;
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"BOR",
                PyInt_FromLong((long)eBorOp)));

    PYCHECK(   PyString_ConcatFromString(&docString,"LXOR: MPI_LXOR\n"));
    /*   PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"LXOR",
         PyInt_FromLong((long)MPI_LXOR)));*/
    reduceOpLookup[eLxorOp] = MPI_LXOR;
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"LXOR",
                PyInt_FromLong((long)eLxorOp)));

    PYCHECK(   PyString_ConcatFromString(&docString,"BXOR: MPI_BXOR\n"));
    /*   PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"BXOR",
         PyInt_FromLong((long)MPI_BXOR)));*/
    reduceOpLookup[eBxorOp] = MPI_BXOR;
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"BXOR",
                PyInt_FromLong((long)eBxorOp)));

    PYCHECK(   PyString_ConcatFromString(&docString,"MINLOC: MPI_MINLOC\n"));
    /*   PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"MINLOC",
         PyInt_FromLong((long)MPI_MINLOC)));*/
    reduceOpLookup[eMinlocOp] = MPI_MINLOC;
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"MINLOC",
                PyInt_FromLong((long)eMinlocOp)));

    PYCHECK(   PyString_ConcatFromString(&docString,"MAXLOC: MPI_MAXLOC\n"));
    /*   PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"MAXLOC",
         PyInt_FromLong((long)MPI_MAXLOC)));*/
    reduceOpLookup[eMaxlocOp] = MPI_MAXLOC;
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"MAXLOC",
                PyInt_FromLong((long)eMaxlocOp)));

    PYCHECK(   PyString_ConcatFromString(&docString,"ANY_SOURCE: MPI_ANY_SOURCE (used for untargeted recv)\n"));
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"ANY_SOURCE",
                PyInt_FromLong((long)MPI_ANY_SOURCE)));

    PYCHECK(   PyString_ConcatFromString(&docString,"ANY_TAG: MPI_ANY_TAG (used for untargeted recv)\n"));
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"ANY_TAG",
                PyInt_FromLong((long)MPI_ANY_TAG)));

    /* ----------------------------------------------- */
    /* We set up an internal communicator for PYTHON   */
    /* messaging and another for Python I/O            */
    /* ----------------------------------------------- */
    MPICHECK( MPI_COMM_WORLD,
            MPI_Comm_dup(MPI_COMM_WORLD,&PyMPI_COMM_WORLD));
    MPICHECK( MPI_COMM_WORLD,
            MPI_Comm_dup(MPI_COMM_WORLD,&PyMPI_COMM_INPUT));
    PYCHECK( PyString_ConcatFromString(&docString,"WORLD: Internal Python communicator\n") );
    PYCHECK( pyWorld = PyMPI_Comm(PyMPI_COMM_WORLD) );
    PYCHECK( PyDict_SetItemString(PyMPI_dictionary,"WORLD",pyWorld) );

    /* ----------------------------------------------- */
    /* Fetch member functions to appear as mpi.xxxx    */
    /* Make pyWorld immortal to avoid dealloc issues   */
    /* Magic Rating: 10/10                             */
    /* ----------------------------------------------- */
    Py_INCREF(pyWorld);
    for(methodPtr = PyMPIMethods_Comm; methodPtr->ml_name; ++methodPtr) {
        PYCHECK( member = PyObject_GetAttrString(pyWorld,methodPtr->ml_name) );
        Py_INCREF(member);
        PYCHECK( PyDict_SetItemString(PyMPI_dictionary,methodPtr->ml_name,member) );
    }

    /* ----------------------------------------------- */
    /* Set up the overloaded input                     */
    /* ----------------------------------------------- */
    PYCHECK( overloadedInput = PyMPI_File(worldRank != 0,PySys_GetObject("stdin"),PyMPI_COMM_INPUT) );
    Py_INCREF(overloadedInput);
    PyDict_SetItemString(PyMPI_dictionary,"stdin",overloadedInput);
    PySys_SetObject("stdin",overloadedInput);

    /* ----------------------------------------------- */
    /* Initial model is no output throttle             */
    /* ----------------------------------------------- */
    PYCHECK( PyMPI_UnrestrictedOutput(3) );

    /* ----------------------------------------------- */
    /* Have to set up some stuff for communicating     */
    /* arbitrary python objects                        */
    /* ----------------------------------------------- */

    /* -- set up PyMPI_pythonPickleType  -- */
    MPICHECKCOMMLESS( MPI_Type_contiguous(1,MPI_CHAR,&PyMPI_pythonPickleType) );
    MPICHECKCOMMLESS( MPI_Type_commit(&PyMPI_pythonPickleType) );
    Assert( PyMPI_pythonPickleType != MPI_CHAR );
    /* -- set up PyMPI_pythonFuncPickleType  -- */
    MPICHECKCOMMLESS( MPI_Type_contiguous(1,MPI_CHAR,&PyMPI_pythonFuncPickleType) );
    MPICHECKCOMMLESS( MPI_Type_commit(&PyMPI_pythonFuncPickleType) );
    Assert( PyMPI_pythonFuncPickleType != MPI_CHAR );

    /* -- set up PyMPI_pythonFuncPickleType  -- */
    pickleModule = PyImport_ImportModule("cPickle");
    if ( !pickleModule || PyErr_Occurred() ) {
        PyErr_Clear();
    } else {
        PYCHECK( pickleDict = PyModule_GetDict(pickleModule) );
        PYCHECK( PyMPI_pickleDumperFunction = PyDict_GetItemString(pickleDict,"dumps") );
        PYCHECK( PyMPI_pickleLoaderFunction = PyDict_GetItemString(pickleDict,"loads") );
    }

    /* ----------------------------------------------- */
    /* Set up the __doc__ string of the communicator   */
    /* type with more info (based on list)             */
    /* ----------------------------------------------- */
    PyMPIObject_Communicator_Type.tp_doc =
        DocStringFromMethods(PyMPIMethods_Comm,
                PyMPIObject_Communicator_Type.tp_doc);

    /* ----------------------------------------------- */
    /* Set up same info in module doc                  */
    /* ----------------------------------------------- */
    docExtra = 
        DocStringFromMethods(PyMPIMethods_Comm,
                "\nAnd these communicator methods map to the Python world communicator (not MPI_COMM_WORLD)\n\n");
    Assert(docExtra);
    PYCHECK( PyString_ConcatFromString(&docString,docExtra) );
    free(docExtra);

    /* ----------------------------------------------- */
    /* Stick in the doc string and we're ready to go   */
    /* ----------------------------------------------- */
    PYCHECK( PyDict_SetItemString(PyMPI_dictionary, "__doc__", docString) );

    return;

pythonError:
    Assert( PyErr_Occurred() );
    return; /* We have set a Python exception, let Python handle it */
}
Example #25
0
int Zoltan_DD_Create (
 Zoltan_DD_Directory **dd,    /* contains directory state and pointers */
 MPI_Comm comm,               /* Dup'ed and saved for future use       */
 int num_gid,                 /* Number of entries in a global ID.     */
 int num_lid,                 /* Number of entries in a local ID.      
                                 If zero, ignore LIDs                  */
 int user_length,             /* Optional user data length, 0 ignore   */
 int table_length,            /* sizeof hash table, use default if 0   */
 int debug_level)             /* control actions to errors, normally 0 */
   {
   int size;
   int my_proc;
   int array[3], max_array[3], min_array[3];
   char *yo = "Zoltan_DD_Create";

   if (MPI_Comm_rank(comm, &my_proc) != MPI_SUCCESS)  {
      ZOLTAN_PRINT_ERROR (-1, yo, "MPI_Comm_rank failed");
      return ZOLTAN_FATAL;
   }

   /* input sanity check */
   if (dd == NULL || num_gid < 1 || table_length < 0 || num_lid < 0)  {
      ZOLTAN_PRINT_ERROR (my_proc, yo, "Invalid input argument");
      return ZOLTAN_FATAL;
   }
   if (debug_level > 4)
      ZOLTAN_TRACE_IN (my_proc, yo, NULL);

   /* insure all processors are using the same GID, LID, USER lengths */
   array[0] = num_gid;
   array[1] = num_lid;
   array[2] = user_length;
   MPI_Allreduce (array, max_array, 3, MPI_INT, MPI_MAX, comm);
   MPI_Allreduce (array, min_array, 3, MPI_INT, MPI_MIN, comm);
   if (max_array[0] != min_array[0] || max_array[1] != min_array[1]
    || max_array[2] != min_array[2])  {
       ZOLTAN_PRINT_ERROR(-1,yo,"LID, GID, USER data lengths differ globally");
       return ZOLTAN_FATAL;
   }

   /* malloc memory for the directory structure + hash table */
   size = (table_length) ? table_length: ZOLTAN_DD_HASH_TABLE_COUNT;
   *dd  = (Zoltan_DD_Directory*) ZOLTAN_MALLOC (sizeof (Zoltan_DD_Directory)
        + size * sizeof(DD_Node*));
   if (*dd == NULL)  {
      ZOLTAN_PRINT_ERROR (my_proc, yo, "Can not malloc hash table");
      if (debug_level > 4)
        ZOLTAN_TRACE_OUT(my_proc, yo, NULL);
      return ZOLTAN_MEMERR;
   }

   /* NULL heads of link list in hash table */
   memset ((char*) (*dd)->table, '\0', size * sizeof(DD_Node*));

   /* save useful constants into directory for convenience */
   (*dd)->debug_level      = debug_level;  /* [0,3], default 0          */
   (*dd)->gid_length       = num_gid;      /* saved input Num_GID       */
   (*dd)->lid_length       = num_lid;      /* saved input Num_LIB       */
   (*dd)->table_length     = size;         /* # of linked list heads    */
   (*dd)->user_data_length = user_length;  /* optional user data length */
   (*dd)->hash             = Zoltan_DD_Hash2;/* default hash algorithm   */
   (*dd)->cleanup          = NULL;         /* user registered cleanup   */
   (*dd)->max_id_length    = (num_gid > num_lid) ? num_gid : num_lid;

   /* frequently used dynamic allocation computed sizes */
   size = (num_gid + num_lid + user_length) * sizeof(ZOLTAN_ID_TYPE);
   (*dd)->node_size       = size + sizeof(DD_Node);
   (*dd)->update_msg_size = size + sizeof(DD_Update_Msg);

   size = num_gid * sizeof(ZOLTAN_ID_TYPE);
   (*dd)->remove_msg_size = size + sizeof(DD_Remove_Msg);

   size = (user_length + (*dd)->max_id_length) * sizeof(ZOLTAN_ID_TYPE);
   (*dd)->find_msg_size   = size + sizeof(DD_Find_Msg);

   /* force alignment */
   (*dd)->update_msg_size = Zoltan_Align((*dd)->update_msg_size);
   (*dd)->remove_msg_size = Zoltan_Align((*dd)->remove_msg_size);
   (*dd)->find_msg_size   = Zoltan_Align((*dd)->find_msg_size);

   /* duplicate MPI comm to prevent future comm changes from disrupting  */
   /* directory communications & save the associated comm size & rank    */
   if (MPI_Comm_dup  (comm,  &((*dd)->comm))    != MPI_SUCCESS
    || MPI_Comm_size (comm,  &((*dd)->nproc))   != MPI_SUCCESS
    || MPI_Comm_rank (comm,  &((*dd)->my_proc)) != MPI_SUCCESS)  {
         ZOLTAN_PRINT_ERROR (my_proc, yo, "MPI Problem, unable to continue");
         return ZOLTAN_FATAL;
   }

   if (debug_level > 4)
      ZOLTAN_TRACE_OUT (my_proc, yo, NULL);
   return ZOLTAN_OK;
   }
Example #26
0
int test_communicators(void)
{
    MPI_Comm dup_comm_world, d2;
    int world_rank, world_size, key_1;
    int err, errs = 0;
    MPI_Aint value;

    MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
    MPI_Comm_size(MPI_COMM_WORLD, &world_size);
#ifdef DEBUG
    if (world_rank == 0) {
        printf("*** Attribute copy/delete return codes ***\n");
    }
#endif

    MPI_Comm_dup(MPI_COMM_WORLD, &dup_comm_world);
    MPI_Barrier(dup_comm_world);

    MPI_Errhandler_set(dup_comm_world, MPI_ERRORS_RETURN);

    value = -11;
    if ((err = MPI_Comm_create_keyval(copybomb_fn, deletebomb_fn, &key_1, &value)))
        abort_msg("Keyval_create", err);

    err = MPI_Comm_set_attr(dup_comm_world, key_1, (void *) (MPI_Aint) world_rank);
    if (err) {
        errs++;
        printf("Error with first put\n");
    }

    err = MPI_Comm_set_attr(dup_comm_world, key_1, (void *) (MPI_Aint) (2 * world_rank));
    if (err == MPI_SUCCESS) {
        errs++;
        printf("delete function return code was MPI_SUCCESS in put\n");
    }

    /* Because the attribute delete function should fail, the attribute
     * should *not be removed* */
    err = MPI_Comm_delete_attr(dup_comm_world, key_1);
    if (err == MPI_SUCCESS) {
        errs++;
        printf("delete function return code was MPI_SUCCESS in delete\n");
    }

    err = MPI_Comm_dup(dup_comm_world, &d2);
    if (err == MPI_SUCCESS) {
        errs++;
        printf("copy function return code was MPI_SUCCESS in dup\n");
    }
    if (err != MPI_ERR_OTHER) {
        int lerrclass;
        MPI_Error_class(err, &lerrclass);
        if (lerrclass != MPI_ERR_OTHER) {
            errs++;
            printf("dup did not return an error code of class ERR_OTHER; ");
            printf("err = %d, class = %d\n", err, lerrclass);
        }
    }
#ifndef USE_STRICT_MPI
    /* Another interpretation is to leave d2 unchanged on error */
    if (err && d2 != MPI_COMM_NULL) {
        errs++;
        printf("dup did not return MPI_COMM_NULL on error\n");
    }
#endif

    delete_flag = 1;
    MPI_Comm_free(&dup_comm_world);

    MPI_Comm_free_keyval(&key_1);

    return errs;
}
Example #27
0
int main(int argc, char *argv[])
{
    int np=1, rank=0;
    int splitrank, splitsize;
    int rc = 0;
    nssi_service injection_svc;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &np);

    MPI_Barrier(MPI_COMM_WORLD);

    Teuchos::oblackholestream blackhole;
    std::ostream &out = ( rank == 0 ? std::cout : blackhole );

    struct injection_args args;

    const int num_io_methods = 2;
    const int io_method_vals[] = {
            INJECTION_EMPTY_REQUEST_SYNC, INJECTION_EMPTY_REQUEST_ASYNC};
    const char * io_method_names[] = {
            "empty-request-sync", "empty-request-async"};

    const int num_nssi_transports = 4;
    const int nssi_transport_vals[] = {
            NSSI_RPC_PTL,
            NSSI_RPC_IB,
            NSSI_RPC_GEMINI,
            NSSI_RPC_MPI};
    const char * nssi_transport_names[] = {
            "ptl",
            "ib",
            "gni",
            "mpi"
    };


    // Initialize arguments
    args.transport=NSSI_DEFAULT_TRANSPORT;
    args.delay = 1;
    args.io_method = INJECTION_EMPTY_REQUEST_SYNC;
    args.debug_level = LOG_WARN;
    args.num_trials = 1;
    args.num_reqs = 1;
    args.result_file_mode = "a";
    args.result_file = "";
    args.url_file = "";
    args.logfile = "";
    args.client_flag = true;
    args.server_flag = true;
    args.timeout = 500;
    args.num_retries = 5;
    args.server_url = "";

    bool success = true;

    /**
     * We make extensive use of the \ref Teuchos::CommandLineProcessor for command-line
     * options to control the behavior of the test code.   To evaluate performance,
     * the "num-trials", "num-reqs", and "len" options control the amount of data transferred
     * between client and server.  The "io-method" selects the type of data transfer.  The
     * server-url specifies the URL of the server.  If running as a server, the server-url
     * provides a recommended URL when initializing the network transport.
     */
    try {

        //out << Teuchos::Teuchos_Version() << std::endl << std::endl;

        // Creating an empty command line processor looks like:
        Teuchos::CommandLineProcessor parser;
        parser.setDocString(
                "This example program demonstrates a simple data-transfer service "
                "built using the NEtwork Scalable Service Interface (Nessie)."
        );

        /* To set and option, it must be given a name and default value.  Additionally,
           each option can be given a help std::string.  Although it is not necessary, a help
           std::string aids a users comprehension of the acceptable command line arguments.
           Some examples of setting command line options are:
         */

        parser.setOption("delay", &args.delay, "time(s) for client to wait for server to start" );
        parser.setOption("timeout", &args.timeout, "time(ms) to wait for server to respond" );
        parser.setOption("server", "no-server", &args.server_flag, "Run the server" );
        parser.setOption("client", "no-client", &args.client_flag, "Run the client");
        parser.setOption("debug",(int*)(&args.debug_level), "Debug level");
        parser.setOption("logfile", &args.logfile, "log file");
        parser.setOption("num-trials", &args.num_trials, "Number of trials (experiments)");
        parser.setOption("num-reqs", &args.num_reqs, "Number of reqs/trial");
        parser.setOption("result-file", &args.result_file, "Where to store results");
        parser.setOption("result-file-mode", &args.result_file_mode, "Write mode for the result");
        parser.setOption("server-url", &args.server_url, "URL client uses to find the server");
        parser.setOption("server-url-file", &args.url_file, "File that has URL client uses to find server");

        // Set an enumeration command line option for the io_method

        parser.setOption("io-method", &args.io_method, num_io_methods, io_method_vals, io_method_names,
                "I/O Methods for the example: \n"
                "\t\t\tempty-request-sync : Send an empty request - synchronous\n"
                "\t\t\tempty-request-async: Send an empty request - asynchronous");

        // Set an enumeration command line option for the io_method
        parser.setOption("transport", &args.transport, num_nssi_transports, nssi_transport_vals, nssi_transport_names,
                "NSSI transports (not all are available on every platform): \n"
                "\t\t\tportals : Cray or Schutt\n"
                "\t\t\tinfiniband : libibverbs\n"
                "\t\t\tgemini : Cray\n"
                "\t\t\tmpi : isend/irecv implementation\n"
                );



        /* There are also two methods that control the behavior of the
           command line processor.  First, for the command line processor to
           allow an unrecognized a command line option to be ignored (and
           only have a warning printed), use:
         */
        parser.recogniseAllOptions(true);

        /* Second, by default, if the parser finds a command line option it
           doesn't recognize or finds the --help option, it will throw an
           std::exception.  If you want prevent a command line processor from
           throwing an std::exception (which is important in this program since
           we don't have an try/catch around this) when it encounters a
           unrecognized option or help is printed, use:
         */
        parser.throwExceptions(false);

        /* We now parse the command line where argc and argv are passed to
           the parse method.  Note that since we have turned off std::exception
           throwing above we had better grab the return argument so that
           we can see what happened and act accordingly.
         */
        Teuchos::CommandLineProcessor::EParseCommandLineReturn parseReturn= parser.parse( argc, argv );

        if( parseReturn == Teuchos::CommandLineProcessor::PARSE_HELP_PRINTED ) {
            return 0;
        }

        if( parseReturn != Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL   ) {
            return 1; // Error!

        }

        // Here is where you would use these command line arguments but for this example program
        // we will just print the help message with the new values of the command-line arguments.
        //if (rank == 0)
        //    out << "\nPrinting help message with new values of command-line arguments ...\n\n";

        //parser.printHelpMessage(argv[0],out);

    }

    TEUCHOS_STANDARD_CATCH_STATEMENTS(true,std::cerr,success);

    log_debug(args.debug_level, "%d: Finished processing arguments", rank);


    if (!success) {
        MPI_Abort(MPI_COMM_WORLD, 1);
    }


    if (!args.server_flag && args.client_flag) {
        /* initialize logger */
        if (args.logfile.empty()) {
            logger_init(args.debug_level, NULL);
        } else {
            char fn[1024];
            sprintf(fn, "%s.client.%03d.log", args.logfile.c_str(), rank);
            logger_init(args.debug_level, fn);
        }
    } else if (args.server_flag && !args.client_flag) {
        /* initialize logger */
        if (args.logfile.empty()) {
            logger_init(args.debug_level, NULL);
        } else {
            char fn[1024];
            sprintf(fn, "%s.server.%03d.log", args.logfile.c_str(), rank);
            logger_init(args.debug_level, fn);
        }
    } else if (args.server_flag && args.client_flag) {
        /* initialize logger */
        if (args.logfile.empty()) {
            logger_init(args.debug_level, NULL);
        } else {
            char fn[1024];
            sprintf(fn, "%s.%03d.log", args.logfile.c_str(), rank);
            logger_init(args.debug_level, fn);
        }
    }

    log_level debug_level = args.debug_level;

    // Communicator used for both client and server (may split if using client and server)
    MPI_Comm comm;

    log_debug(debug_level, "%d: Starting injection-service test", rank);

    /**
     * Since this test can be run as a server, client, or both, we need to play some fancy
     * MPI games to get the communicators working correctly.  If we're executing as both
     * a client and a server, we split the communicator so that the client thinks its
     * running by itself.
     */
    if (args.client_flag && args.server_flag) {
        if (np < 2) {
            log_error(debug_level, "Must use at least 2 MPI processes for client and server mode");
            MPI_Abort(MPI_COMM_WORLD, -1);
        }

        // Split the communicators. Processors with color=0 are servers.

        int color = (rank == 0) ? 0 : 1; // only one server
        MPI_Comm_split(MPI_COMM_WORLD, color, rank, &comm);

        MPI_Comm_rank(comm, &splitrank);
        MPI_Comm_size(comm, &splitsize);

        //    std::cout << "rank=" << rank << "/" << np << ", color=" << color <<
        //            ", new_rank=" << newrank << "/" << newsize << std::endl << std::endl;
        //
        //    std::cout << "my_url=" << my_url <<  ", server_url=" << args.server_url << std::endl;
    }
    else {
        MPI_Comm_dup(MPI_COMM_WORLD, &comm);
    }

    /**
     * Initialize the Nessie interface by specifying a transport, encoding scheme, and a
     * recommended URL.  \ref NSSI_DEFAULT_TRANSPORT is usually the best choice, since it
     * is often the case that only one type of transport exists on a particular platform.
     * Currently supported transports are \ref NSSI_RPC_PTL, \ref NSSI_RPC_GNI, and
     * \ref NSSI_RPC_IB.  We only support one type of encoding scheme so NSSI_DEFAULT_ENCODE
     * should always be used for the second argument.   The URL can be specified (as we did for
     * the server, or NULL (as we did for the client).  This is a recommended value.  Use the
     * \ref nssi_get_url function to find the actual value.
     */
    if (args.server_flag && !args.server_url.empty()) {
        // use the server URL as suggested URL
        nssi_rpc_init((nssi_rpc_transport)args.transport, NSSI_DEFAULT_ENCODE, args.server_url.c_str());
    }
    else {
        nssi_rpc_init((nssi_rpc_transport)args.transport, NSSI_DEFAULT_ENCODE, NULL);
    }

    // Get the Server URL
    std::string my_url(NSSI_URL_LEN, '\0');
    nssi_get_url((nssi_rpc_transport)args.transport, &my_url[0], NSSI_URL_LEN);

    // Broadcast the server URL to all the clients
    args.server_url.resize(NSSI_URL_LEN, '\0');
    if (args.server_flag && args.client_flag) {
        args.server_url = my_url;
        MPI_Bcast(&args.server_url[0], args.server_url.size(), MPI_CHAR, 0, MPI_COMM_WORLD);
    }

    else if (!args.server_flag && args.client_flag){
        if (args.server_url.empty()) {

            // check to see if we're supposed to get the URL from a file
            if (!args.url_file.empty()) {
                // Fetch the server URL from a file
                sleep(1);
                log_debug(debug_level, "Reading from file %s", args.url_file.c_str());
                std::ifstream urlfile (args.url_file.c_str());
                if (urlfile.is_open()) {
                    if (urlfile.good())
                        getline(urlfile, args.server_url);
                }
                else {
                    log_error(debug_level, "Failed to open server_url_file=%s", args.url_file.c_str());
                    exit(1);
                }
                urlfile.close();
                log_debug(debug_level, "URL = %s", args.server_url.c_str());
            }
            else {
                log_error(debug_level, "Need to set --server-url=[ADDR] or --server-url-file=[PATH]");
            }
        }
    }

    else if (args.server_flag && !args.client_flag) {
        args.server_url = my_url;

        // If the url_file value is set, write the url to a file
        if (!args.url_file.empty()) {
            std::ofstream urlfile (args.url_file.c_str());
            if (urlfile.is_open()) {
                urlfile << args.server_url.c_str() << std::endl;
            }
            urlfile.close();
            log_debug(debug_level, "Wrote url to file %s", args.url_file.c_str());
        }
    }



    // Set the debug level for the injection service.
    injection_debug_level = args.debug_level;

    // Print the arguments after they've all been set.
    args.io_method_name = io_method_names[args.io_method];
    args.transport_name = nssi_transport_names[args.transport];
    print_args(out, args, "%");


    //------------------------------------------------------------------------------
    /** If we're running this job with a server, the server always executes on node 0.
     *  In this example, the server is a single process.
     */
    if (args.server_flag && (rank == 0)) {
        rc = injection_server_main(args, comm);
        log_debug(debug_level, "Server is finished");
    }

    // ------------------------------------------------------------------------------
     /**  The parallel client will execute this branch.  The root node, node 0, of the client connects
      *   connects with the server, using the \ref nssi_get_service function.  Then the root
      *   broadcasts the service description to the other clients before starting the main
      *   loop of the client code by calling \ref injection_client_main.
      */
    else {
        int i;
        int client_rank;

        // get rank within the client communicator
        MPI_Comm_rank(comm, &client_rank);

        nssi_init((nssi_rpc_transport)args.transport);

        // Only one process needs to connect to the service
        // TODO: Make get_service a collective call (some transports do not need a connection)
        //if (client_rank == 0) {
        {

            sleep(args.delay);  // give server time to get started

            // connect to remote server
            for (i=0; i < args.num_retries; i++) {
                log_debug(debug_level, "Try to connect to server: attempt #%d", i);
                rc=nssi_get_service((nssi_rpc_transport)args.transport, args.server_url.c_str(), args.timeout, &injection_svc);
                if (rc == NSSI_OK)
                    break;
                else if (rc != NSSI_ETIMEDOUT) {
                    log_error(injection_debug_level, "could not get svc description: %s",
                            nssi_err_str(rc));
                    break;
                }
            }
        }

        //MPI_Bcast(&rc, 1, MPI_INT, 0, comm);

        if (rc == NSSI_OK) {
            if (client_rank == 0) log_debug(debug_level, "Connected to service on attempt %d\n", i);

            // Broadcast the service description to the other clients
            //log_debug(injection_debug_level, "Bcasting svc to other clients");
            //MPI_Bcast(&injection_svc, sizeof(nssi_service), MPI_BYTE, 0, comm);

            log_debug(debug_level, "Starting client main");
            // Start the client code
            injection_client_main(args, injection_svc, comm);


            MPI_Barrier(comm);

            // Tell one of the clients to kill the server
            if (client_rank == 0) {
                log_debug(debug_level, "%d: Halting injection service", rank);
                rc = nssi_kill(&injection_svc, 0, 5000);
            }
        }

        else {
            if (client_rank == 0)
                log_error(debug_level, "Failed to connect to service after %d attempts: ABORTING", i);
            success = false;
            //MPI_Abort(MPI_COMM_WORLD, -1);
        }

        nssi_fini((nssi_rpc_transport)args.transport);

    }

    log_debug(debug_level, "%d: clean up nssi", rank);
    MPI_Barrier(MPI_COMM_WORLD);

    // Clean up nssi_rpc
    rc = nssi_rpc_fini((nssi_rpc_transport)args.transport);
    if (rc != NSSI_OK)
        log_error(debug_level, "Error in nssi_rpc_fini");

    log_debug(debug_level, "%d: MPI_Finalize()", rank);
    MPI_Finalize();

    logger_fini();

    if(success && (rc == NSSI_OK))
      out << "\nEnd Result: TEST PASSED" << std::endl;
    else
        out << "\nEnd Result: TEST FAILED" << std::endl;

    return ((success && (rc==NSSI_OK)) ? 0 : 1 );
}
int main(int argc, char* argv[]) 
{
//
	//MPI_Comm    comm;
	MPI_Init(&argc, &argv);
	MPI_Comm_size(MPI_COMM_WORLD, &size);
	MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
	MPI_Comm_dup(MPI_COMM_WORLD, &comm);

	int p,t,n; // processorts_count, thread_count, element_count
	int *input;
	int * inputList = (int *) malloc(n * sizeof(int));
	
	int * localList;
	// Time calculation - average of wtime() execution
	double startTime,endTime;
	double wtime_overhead = 0.0;
    for (int i = 0; i < 200; i++) {
        startTime = MPI_Wtime();
        endTime = MPI_Wtime();
        wtime_overhead = wtime_overhead + (startTime - endTime);
    }
    wtime_overhead = wtime_overhead/200.0;
	//
	//printf("i m started\n");
	
	if(my_rank == 0)
	{
		input = ReceiveInput(&t,&n);
		inputList = input;
		p = size;		//uncomment then
		printf("Value of p n t %d %d %d \n",p,n,t);
		
		//Start timer..
		startTime = MPI_Wtime();
		
		//Here we received all inputList array, processors and threadCount.
		/*for(int i=0;i<n;i++)
		{
			printf("%d ",*(inputList+i));
		}*/
		for(int i=1;i<p;i++)
		{
			MPI_Send(&n,1,MPI_INT,i,2,comm);
		}
		localList = (int *) malloc((n/p) * sizeof(int));
		for(int i=0;i<n/p;i++)
		{
			*(localList + i) = *(inputList + i);
		}
		
		distribute(inputList,p,t,n);
	}
	else
	{
		//receive n
		MPI_Recv(&n, 1, MPI_INT, 0, 2, comm,&status);
		p=size;
		
		localList = (int *) malloc((n/p) * sizeof(int));
		//recieve localList for all slave nodes.
		MPI_Recv(localList, (n/p), MPI_INT, 0, 1, comm,&status);
	}
	//Display received local copies..
	sort(localList,n/p);
	//MPI_Barrier(comm);
	//printf("\nlocalcopy sorted at %d \n",my_rank);
	//for(int i=0;i<(n/p);i++)
	//{
	//		printf("rank %d %d ",my_rank,*(localList+i));
	//}
	//printf("\n");
	//Find spliters at each node.
	bool isPsplitter = true;;
	int * splitterArray = (int *) malloc(p*sizeof(int));
	splitterArray = localSplitter(localList,(n/p),p,isPsplitter);
	//MPI_Barrier(comm);
	//printf("\nSplitters for %d are : \n",my_rank);
	//for(int i=0;i<p;i++)
	//{
	//		printf("rank %d %d ",my_rank,*(splitterArray+i));
	//}
	//All nodes should send splitterArray to root 0 for merging.
	int * sampleArray = (int *) malloc(p*p*sizeof(int));
MPI_Barrier(comm);	
	MPI_Allgather(splitterArray,p,MPI_INT,sampleArray,p,MPI_INT,comm);
MPI_Barrier(comm);
	//printf("\nSampleArray at %d :\n",my_rank);
	//for(int i=0;i<(p*p);i++)
	//{
	//	printf("%d ",*(sampleArray+i));
	//}
	//printf("\n");
	//Sort sample array.
	sort(sampleArray,(p*p));
	//Find final (p-1) splitter array for entire range of numbers.
	isPsplitter = false;
	int * FinalsplitterArray = (int *) malloc((p-1)*sizeof(int));
	FinalsplitterArray = localSplitter(sampleArray,(p*p),p,isPsplitter);
	//MPI_Barrier(comm);
	//printf("FinalsplitterArray at %d :\n",my_rank);
	//for(int i=0;i<(p-1);i++)
	//{
	//	printf("%d ",*(FinalsplitterArray+i));
	//}
	//printf("\n");
	//
	//**********************************************************
	//This will prepare scounts, sdispls, rcounts, rdispls for each process..
	int * scounts = (int *) malloc(p*sizeof(int));
	int * sdispls = (int *) malloc(p*sizeof(int));
	//Initialization
	for(int i=0;i<p;i++)
	{
		*(scounts + i) = 0;
		*(sdispls + i) = 0;
	}
	
	allToAllCalculation(localList,FinalsplitterArray,scounts,sdispls,n/p);
	//creating rcounts & rdispls with max n length
	int * rcounts = (int *)malloc(p*sizeof(int));
	int * rdispls = (int *)malloc(p*sizeof(int));
	//Initialization
	for(int i=0;i<p;i++)
	{
		*(rcounts + i) = 0;
		*(rdispls + i) = 0;
	}
	//This will move scount from all to all processed and each process will 
	// get rcounts updated.
MPI_Barrier(comm);	
	MPI_Alltoall(scounts, 1, MPI_INT, rcounts, 1, MPI_INT, comm);
MPI_Barrier(comm);
	//printf("\n");
	//for(int i=0;i<p;i++)
	//{
	//	printf("rcounts rank %d :  %d",my_rank,rcounts[i]);
	//}
	//printf("\n");
	//Generate rdispl list.
	rdispls[0] = 0;
	for(int i=1;i<p;i++)
	{
		//if(rcounts[i] != 0)
		{
			rdispls[i] = rdispls[i-1] + rcounts[i-1];
		}	
	}
	//for(int i=0;i<p;i++)
	//{
	//	printf("rdisp rank %d : %d ",my_rank,rdispls[i]);
	//}
	//printf("\n");
	//***********************************************************************
	//This will do MPI_Alltoallv communication with vectors
	int bucketSize = 0;
	for(int i=0;i<p;i++)
	{
		bucketSize += rcounts[i];
	}
	//printf("bucketSize rank %d : %d \n",my_rank,bucketSize);
	int * localBucket = (int *)malloc(bucketSize*sizeof(int));
MPI_Barrier(comm);	
	MPI_Alltoallv(localList,scounts,sdispls,MPI_INT,localBucket,rcounts,rdispls,MPI_INT,comm);
MPI_Barrier(comm);
	//printf("\n");
	//for(int i=0;i<bucketSize;i++)
	//{
	//	printf("bucket @ rank %d :  %d , ",my_rank,localBucket[i]);
	//}
	//printf("\n");
	//Sort bucket on every node. (Merge algorithm is used)
	int * sortedBucket = (int *) malloc(bucketSize * sizeof(int));
	merge(localBucket,rdispls,bucketSize,p,sortedBucket);
	//for(int i=0;i<bucketSize;i++)
	//{
	//	printf("sortedBucket %d: %d ",my_rank,sortedBucket[i]);
	//}
	//printf("\n");
	//***********************************************************************
	//Final result formation
	//send sortedBucket to root node 0.
	int * finalRcvCounts = (int *) malloc(p * sizeof(int));
	int * finalRcvDispls = (int *) malloc(p * sizeof(int));
	int * finalResults   = (int *) malloc(n * sizeof(int));
	//Gather counts of every process bucket.
MPI_Barrier(comm);	
	MPI_Gather(&bucketSize,1,MPI_INT,finalRcvCounts,1,MPI_INT,0,comm);
MPI_Barrier(comm);	
	
	if(my_rank == 0)
	{
		/*for(int i=0;i<p;i++)
		{
			printf("finalRcvCounts %d ",finalRcvCounts[i]);
		}
		printf("\n");
		finalRcvDispls[0] = 0;
		for(int i=1;i<p;i++)
		{
			if(finalRcvDispls[i] != 0)
			{
				finalRcvDispls[i] = finalRcvDispls[i-1] + finalRcvCounts[i-1];
			}	
		}
		for(int i=0;i<p;i++)
		{
			printf("finalRcvDispls %d ",finalRcvDispls[i]);
		}
		printf("\n");*/
		prepareGatherv(finalRcvCounts,finalRcvDispls,p);
	}
	//receive all to one gather vectors at root node 0.
MPI_Barrier(comm);	
	MPI_Gatherv(sortedBucket,bucketSize,MPI_INT,finalResults,finalRcvCounts,finalRcvDispls,MPI_INT,0,comm);
MPI_Barrier(comm);	
	
	if(my_rank == 0)
	{
		//Stop timer.
		endTime = MPI_Wtime();
		
		printf("\nTHE SORTED OUTPUT IS - \n");
		for(int i=0;i<n;i++)
		{
			printf(" %d ",finalResults[i]);
		}
		printf("\n");
		printf("Approx. Time for execution : %f ",(endTime - startTime - wtime_overhead));
	}
	
	MPI_Finalize();
	return 0;
}
Example #29
0
int main(int argc, char** argv)
{
    MPI_Init(&argc, &argv);

    int rank;
    int size;

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);

    MPI_Comm dup;

    MPI_Comm_dup(MPI_COMM_WORLD, &dup);

    int dest_rank = (rank + 1) % size;
    int src_rank = (rank + size - 1) % size;

    WARNING("%d dest %d src %d", rank, dest_rank, src_rank);
    //Send a message with the same proc/tag on each comm at the same time.
    // Give them different data.
    MPI_Request sreqs[2];

    for(int i = 0; i < 10; i++) {
        int sbuf[2] = {17, 29};
        int rbuf[2] = {-1, -1};

        MPI_Isend(&sbuf[0], 1, MPI_INT, dest_rank, 15, MPI_COMM_WORLD, &sreqs[0]);
        MPI_Isend(&sbuf[1], 1, MPI_INT, dest_rank, 15, dup, &sreqs[1]);

        MPI_Recv(&rbuf[0], 1, MPI_INT, src_rank, 15, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
        MPI_Recv(&rbuf[1], 1, MPI_INT, src_rank, 15, dup, MPI_STATUS_IGNORE);

        MPI_Waitall(2, sreqs, MPI_STATUSES_IGNORE);

        if(rbuf[0] != 17) {
            ERROR("rbuf[0] = %d should be 17", rbuf[0]);
        }

        if(rbuf[1] != 29) {
            ERROR("rbuf[1] = %d should be 17", rbuf[0]);
        }
    }

    //Send in a different order from receiving.
    for(int i = 0; i < 10; i++) {
        int sbuf[2] = {17, 29};
        int rbuf[2] = {-1, -1};

        MPI_Isend(&sbuf[1], 1, MPI_INT, dest_rank, 15, dup, &sreqs[1]);
        MPI_Isend(&sbuf[0], 1, MPI_INT, dest_rank, 15, MPI_COMM_WORLD, &sreqs[0]);

        MPI_Recv(&rbuf[0], 1, MPI_INT, src_rank, 15, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
        MPI_Recv(&rbuf[1], 1, MPI_INT, src_rank, 15, dup, MPI_STATUS_IGNORE);

        MPI_Waitall(2, sreqs, MPI_STATUSES_IGNORE);

        if(rbuf[0] != 17) {
            ERROR("rbuf[0] = %d should be 17", rbuf[0]);
        }

        if(rbuf[1] != 29) {
            ERROR("rbuf[1] = %d should be 17", rbuf[0]);
        }
    }

    MPI_Comm_free(&dup);

    //Make sure COMM_WORLD still works.
    for(int i = 0; i < 10; i++) {
        int sbuf[2] = {17, 29};
        int rbuf[2] = {-1, -1};

        MPI_Isend(&sbuf[0], 1, MPI_INT, dest_rank, 14, MPI_COMM_WORLD, &sreqs[0]);
        MPI_Isend(&sbuf[1], 1, MPI_INT, dest_rank, 15, MPI_COMM_WORLD, &sreqs[1]);

        MPI_Recv(&rbuf[0], 1, MPI_INT, src_rank, 14, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
        MPI_Recv(&rbuf[1], 1, MPI_INT, src_rank, 15, MPI_COMM_WORLD, MPI_STATUS_IGNORE);

        MPI_Waitall(2, sreqs, MPI_STATUSES_IGNORE);

        if(rbuf[0] != 17) {
            ERROR("rbuf[0] = %d should be 17", rbuf[0]);
        }

        if(rbuf[1] != 29) {
            ERROR("rbuf[1] = %d should be 17", rbuf[0]);
        }
    }

    //if(rank == 0) {
        WARNING("PASS!");
    //}
    MPI_Finalize();
    return 0;
}
Example #30
0
int main(int argc, char *argv[])
{
    int provided, wrank, wsize, nmsg, i, tag;
    int *(buf[MAX_TARGETS]), bufsize[MAX_TARGETS];
    MPI_Request r[MAX_TARGETS];
    MPI_Comm commDup, commEven;

    MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &provided);
    MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
    MPI_Comm_size(MPI_COMM_WORLD, &wsize);

    if (wsize < 4) {
        fprintf(stderr, "This test requires at least 4 processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    /* Create several communicators */
    MPI_Comm_dup(MPI_COMM_WORLD, &commDup);
    MPI_Comm_set_name(commDup, "User dup of comm world");

    MPI_Comm_split(MPI_COMM_WORLD, wrank & 0x1, wrank, &commEven);
    if (wrank & 0x1)
        MPI_Comm_free(&commEven);
    else
        MPI_Comm_set_name(commEven, "User split to even ranks");

    /* Create a collection of pending sends and receives
     * We use tags on the sends and receives (when ANY_TAG isn't used)
     * to provide an easy way to check that the proper requests are present.
     * TAG values use fields, in decimal (for easy reading):
     * 0-99: send/recv type:
     * 0 - other
     * 1 - irecv
     * 2 - isend
     * 3 - issend
     * 4 - ibsend
     * 5 - irsend
     * 6 - persistent recv
     * 7 - persistent send
     * 8 - persistent ssend
     * 9 - persistent rsend
     * 10 - persistent bsend
     * 100-999: destination (for send) or source, if receive.  999 = any-source
     * (rank is value/100)
     * 1000-2G: other values
     */
    /* Create the send/receive buffers */
    nmsg = 10;
    for (i = 0; i < nmsg; i++) {
        bufsize[i] = i;
        if (i) {
            buf[i] = (int *) calloc(bufsize[i], sizeof(int));
            if (!buf[i]) {
                fprintf(stderr, "Unable to allocate %d words\n", bufsize[i]);
                MPI_Abort(MPI_COMM_WORLD, 2);
            }
        } else
            buf[i] = 0;
    }

    /* Partial implementation */
    if (wrank == 0) {
        nmsg = 0;
        tag = 2 + 1 * 100;
        MPI_Isend(buf[0], bufsize[0], MPI_INT, 1, tag, MPI_COMM_WORLD, &r[nmsg++]);
        tag = 3 + 2 * 100;
        MPI_Issend(buf[1], bufsize[1], MPI_INT, 2, tag, MPI_COMM_WORLD, &r[nmsg++]);
        tag = 1 + 3 * 100;
        MPI_Irecv(buf[2], bufsize[2], MPI_INT, 3, tag, MPI_COMM_WORLD, &r[nmsg++]);
    } else if (wrank == 1) {
    } else if (wrank == 2) {
    } else if (wrank == 3) {
    }

    /* provide a convenient place to wait */
    MPI_Barrier(MPI_COMM_WORLD);
    printf("Barrier 1 finished\n");

    /* Match up (or cancel) the requests */
    if (wrank == 0) {
        MPI_Waitall(nmsg, r, MPI_STATUSES_IGNORE);
    } else if (wrank == 1) {
        tag = 2 + 1 * 100;
        MPI_Recv(buf[0], bufsize[0], MPI_INT, 0, tag, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
    } else if (wrank == 2) {
        tag = 3 + 2 * 100;
        MPI_Recv(buf[1], bufsize[1], MPI_INT, 0, tag, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
    } else if (wrank == 3) {
        tag = 1 + 3 * 100;
        MPI_Send(buf[2], bufsize[2], MPI_INT, 0, tag, MPI_COMM_WORLD);
    }

    MPI_Barrier(MPI_COMM_WORLD);
    printf("Barrier 2 finished\n");

    MPI_Comm_free(&commDup);
    if (commEven != MPI_COMM_NULL)
        MPI_Comm_free(&commEven);

    MPI_Finalize();
    return 0;
}