Пример #1
0
/**
 ** @brief Send a hint to the MPI-IO library 
 **
 */
int PIOc_set_hint(const int iosysid, char hint[], const char hintval[])
{
  iosystem_desc_t *ios;

  ios = pio_get_iosystem_from_id(iosysid);
  if(ios == NULL)
    return PIO_EBADID;
  if(ios->ioproc)
    CheckMPIReturn( MPI_Info_set(ios->info, hint, hintval), __FILE__,__LINE__);

  return PIO_NOERR;

}
Пример #2
0
int PIOc_finalize(const int iosysid)
{
  iosystem_desc_t *ios, *nios;
  int msg;
  int mpierr;

  ios = pio_get_iosystem_from_id(iosysid);
  if(ios == NULL)
    return PIO_EBADID;
  
  /* If asynch IO is in use, send the PIO_MSG_EXIT message from the
   * comp master to the IO processes. */
  if (ios->async_interface && !ios->comp_rank)
  {
    msg = PIO_MSG_EXIT;
    mpierr = MPI_Send(&msg, 1, MPI_INT, ios->ioroot, 1, ios->union_comm);
    CheckMPIReturn(mpierr, __FILE__, __LINE__);		      
  }

  /* Free this memory that was allocated in init_intracomm. */
  if (ios->ioranks)
      free(ios->ioranks);

  /* Free the buffer pool. */
  free_cn_buffer_pool(*ios);

  /* Free the MPI groups. */
  if (ios->compgroup != MPI_GROUP_NULL)
    MPI_Group_free(&ios->compgroup);

  if (ios->iogroup != MPI_GROUP_NULL)
    MPI_Group_free(&(ios->iogroup));

  /* Free the MPI communicators. my_comm is just a copy (but not an
   * MPI copy), so does not have to have an MPI_Comm_free() call. */
  if(ios->intercomm != MPI_COMM_NULL){
    MPI_Comm_free(&(ios->intercomm));
  }
  if(ios->io_comm != MPI_COMM_NULL){
    MPI_Comm_free(&(ios->io_comm));
  }
  if(ios->comp_comm != MPI_COMM_NULL){
    MPI_Comm_free(&(ios->comp_comm));
  }
  if(ios->union_comm != MPI_COMM_NULL){
    MPI_Comm_free(&(ios->union_comm));
  }

  /* Delete the iosystem_desc_t data associated with this id. */
  return pio_delete_iosystem_from_list(iosysid);
}
Пример #3
0
int PIOc_InitDecomp(const int iosysid, const int basetype,const int ndims, const int dims[], 
		    const int maplen, const PIO_Offset *compmap, int *ioidp,const int *rearranger,  
		    const PIO_Offset *iostart,const PIO_Offset *iocount)
{
  iosystem_desc_t *ios;
  io_desc_t *iodesc;
  int mpierr;
  int ierr;
  int iosize;
  int ndisp;



  for(int i=0;i<ndims;i++){
    if(dims[i]<=0){
      piodie("Invalid dims argument",__FILE__,__LINE__);
    }
  }
  ios = pio_get_iosystem_from_id(iosysid);
  if(ios == NULL)
    return PIO_EBADID;
  

  if(PIO_Save_Decomps){
    char filename[30];
    if(ios->num_comptasks < 100) {
      sprintf(filename, "piodecomp%2.2dtasks%2.2ddims%2.2d.dat",ios->num_comptasks,ndims,counter);
    }else if(ios->num_comptasks < 10000) {
      sprintf(filename, "piodecomp%4.4dtasks%2.2ddims%2.2d.dat",ios->num_comptasks,ndims,counter);
    }else{
      sprintf(filename, "piodecomp%6.6dtasks%2.2ddims%2.2d.dat",ios->num_comptasks,ndims,counter);
    }
    PIOc_writemap(filename,ndims,dims,maplen,compmap,ios->comp_comm);
    counter++;
  }


  iodesc = malloc_iodesc(basetype, ndims);
  if(rearranger == NULL)
    iodesc->rearranger = ios->default_rearranger;
  else
    iodesc->rearranger = *rearranger;
    
  if(iodesc->rearranger==PIO_REARR_SUBSET){
    if((iostart != NULL) && (iocount != NULL)){ 
      fprintf(stderr,"%s %s\n","Iostart and iocount arguments to PIOc_InitDecomp",
	      "are incompatable with subset rearrange method and will be ignored");
    }
    iodesc->num_aiotasks = ios->num_iotasks;
    ierr = subset_rearrange_create( *ios, maplen, compmap, dims, ndims, iodesc);
  }else{   
      if(ios->ioproc){
      //  Unless the user specifies the start and count for each IO task compute it.    
	if((iostart != NULL) && (iocount != NULL)){ 
	  //	  printf("iocount[0] = %ld %ld\n",iocount[0], iocount);
	  iodesc->maxiobuflen=1;
	  for(int i=0;i<ndims;i++){
	    iodesc->firstregion->start[i] = iostart[i];
	    iodesc->firstregion->count[i] = iocount[i];
	    compute_maxIObuffersize(ios->io_comm, iodesc);
	    
	  }
	  iodesc->num_aiotasks = ios->num_iotasks;
	}else{
	  iodesc->num_aiotasks = CalcStartandCount(basetype, ndims, dims, 
						   ios->num_iotasks, ios->io_rank,
						   iodesc->firstregion->start, iodesc->firstregion->count);
      }
      compute_maxIObuffersize(ios->io_comm, iodesc);

    }
    // Depending on array size and io-blocksize the actual number of io tasks used may vary
    CheckMPIReturn(MPI_Bcast(&(iodesc->num_aiotasks), 1, MPI_INT, ios->ioroot,
			     ios->my_comm),__FILE__,__LINE__);
    // Compute the communications pattern for this decomposition
    if(iodesc->rearranger==PIO_REARR_BOX){   
      ierr = box_rearrange_create( *ios, maplen, compmap, dims, ndims, iodesc);
    }
    /*
    if(ios->ioproc){
      io_region *ioregion = iodesc->firstregion;
      while(ioregion != NULL){
	for(int i=0;i<ndims;i++)
	  printf("%s %d i %d dim %d start %ld count %ld\n",__FILE__,__LINE__,i,dims[i],ioregion->start[i],ioregion->count[i]);
	ioregion = ioregion->next;
      }
    }
    */
  }

  *ioidp = pio_add_to_iodesc_list(iodesc);

  performance_tune_rearranger(*ios, iodesc);
  
  return PIO_NOERR;
}
Пример #4
0
int PIOc_Init_Intracomm(const MPI_Comm comp_comm, 
			const int num_iotasks, const int stride, 
			const int base,const int rearr, int *iosysidp)
{
  iosystem_desc_t *iosys;
  int ierr = PIO_NOERR;
  int ustride;
  int lbase;
  int mpierr;

  iosys = (iosystem_desc_t *) malloc(sizeof(iosystem_desc_t));

  /* Copy the computation communicator into union_comm. */
  mpierr = MPI_Comm_dup(comp_comm, &iosys->union_comm);
  CheckMPIReturn(mpierr, __FILE__, __LINE__);		
  if (mpierr)
      ierr = PIO_EIO;

  /* Copy the computation communicator into comp_comm. */
  if (!ierr)
  {
      mpierr = MPI_Comm_dup(comp_comm, &iosys->comp_comm);
      CheckMPIReturn(mpierr, __FILE__, __LINE__);		
      if (mpierr)
	  ierr = PIO_EIO;
  }

  if (!ierr)
  {
      iosys->my_comm = iosys->comp_comm;
      iosys->io_comm = MPI_COMM_NULL;
      iosys->intercomm = MPI_COMM_NULL;
      iosys->error_handler = PIO_INTERNAL_ERROR;
      iosys->async_interface= false;
      iosys->compmaster = 0;
      iosys->iomaster = 0;
      iosys->ioproc = false;
      iosys->default_rearranger = rearr;
      iosys->num_iotasks = num_iotasks;

      ustride = stride;

      /* Find MPI rank and number of tasks in comp_comm communicator. */
      CheckMPIReturn(MPI_Comm_rank(iosys->comp_comm, &(iosys->comp_rank)),__FILE__,__LINE__);
      CheckMPIReturn(MPI_Comm_size(iosys->comp_comm, &(iosys->num_comptasks)),__FILE__,__LINE__);
      if(iosys->comp_rank==0)
	  iosys->compmaster = MPI_ROOT;  

      /* Ensure that settings for number of computation tasks, number
       * of IO tasks, and the stride are reasonable. */
      if((iosys->num_comptasks == 1) && (num_iotasks*ustride > 1)) {
	  // This is a serial run with a bad configuration. Set up a single task.
	  fprintf(stderr, "PIO_TP PIOc_Init_Intracomm reset stride and tasks.\n");
	  iosys->num_iotasks = 1;
	  ustride = 1;
      }
      if((iosys->num_iotasks < 1) || ((iosys->num_iotasks*ustride) > iosys->num_comptasks)){
	  fprintf(stderr, "PIO_TP PIOc_Init_Intracomm error\n");
	  fprintf(stderr, "num_iotasks=%d, ustride=%d, num_comptasks=%d\n", num_iotasks, ustride, iosys->num_comptasks);
	  return PIO_EBADID;
      }

      /* Create an array that holds the ranks of the tasks to be used for IO. */
      iosys->ioranks = (int *) calloc(sizeof(int), iosys->num_iotasks);
      for(int i=0;i< iosys->num_iotasks; i++){
	  iosys->ioranks[i] = (base + i*ustride) % iosys->num_comptasks;
	  if(iosys->ioranks[i] == iosys->comp_rank)
	      iosys->ioproc = true;
      }
      iosys->ioroot = iosys->ioranks[0];

      /* Create an MPI info object. */
      CheckMPIReturn(MPI_Info_create(&(iosys->info)),__FILE__,__LINE__);
      iosys->info = MPI_INFO_NULL;

      if(iosys->comp_rank == iosys->ioranks[0])
	  iosys->iomaster = MPI_ROOT;

      /* Create a group for the computation tasks. */
      CheckMPIReturn(MPI_Comm_group(iosys->comp_comm, &(iosys->compgroup)),__FILE__,__LINE__);
			
      /* Create a group for the IO tasks. */
      CheckMPIReturn(MPI_Group_incl(iosys->compgroup, iosys->num_iotasks, iosys->ioranks,
				    &(iosys->iogroup)),__FILE__,__LINE__);

      /* Create an MPI communicator for the IO tasks. */
      CheckMPIReturn(MPI_Comm_create(iosys->comp_comm, iosys->iogroup, &(iosys->io_comm)),__FILE__,__LINE__);

      /* For the tasks that are doing IO, get their rank. */
      if(iosys->ioproc)
	  CheckMPIReturn(MPI_Comm_rank(iosys->io_comm, &(iosys->io_rank)),__FILE__,__LINE__);
      else
	  iosys->io_rank = -1;

      iosys->union_rank = iosys->comp_rank;

      /* Add this iosys struct to the list in the PIO library. */
      *iosysidp = pio_add_to_iosystem_list(iosys);

      pio_get_env();

      /* allocate buffer space for compute nodes */
      compute_buffer_init(*iosys);
  }
  
  return ierr;
}
Пример #5
0
Файл: pioc.c Проект: mt5555/cime
int PIOc_Init_Intracomm(const MPI_Comm comp_comm, 
			const int num_iotasks, const int stride, 
			const int base,const int rearr, int *iosysidp)
{
  iosystem_desc_t *iosys;
  int ierr;
  int ustride;
  int lbase;
  iosys = (iosystem_desc_t *) malloc(sizeof(iosystem_desc_t));

  iosys->union_comm = comp_comm;
  iosys->comp_comm = comp_comm;
  iosys->my_comm = comp_comm;
  iosys->io_comm = MPI_COMM_NULL;
  iosys->intercomm = MPI_COMM_NULL;
  iosys->error_handler = PIO_INTERNAL_ERROR;
  iosys->async_interface= false;
  iosys->compmaster = false;
  iosys->iomaster = false;
  iosys->ioproc = false;
  iosys->default_rearranger = rearr;
  iosys->num_iotasks = num_iotasks;

  ustride = stride;
 
  CheckMPIReturn(MPI_Comm_rank(comp_comm, &(iosys->comp_rank)),__FILE__,__LINE__);
  CheckMPIReturn(MPI_Comm_size(comp_comm, &(iosys->num_comptasks)),__FILE__,__LINE__);
  if(iosys->comp_rank==0)
    iosys->compmaster = true;  

#ifdef BGQxxx
  lbase = base;
  determineiotasks(comp_comm, &(iosys->num_iotasks), &lbase, &stride, &rearr, &(iosys->ioproc));
  if(iosys->comp_rank==0)
    printf("%s %d %d\n",__FILE__,__LINE__,iosys->num_iotasks);
  if(iosys->ioproc)
    printf("%s %d %d\n",__FILE__,__LINE__,iosys->comp_rank);
    
#else
  if((iosys->num_comptasks == 1) && (num_iotasks*ustride > 1)) {
    // This is a serial run with a bad configuration. Set up a single task.
    fprintf(stderr, "PIO_TP PIOc_Init_Intracomm reset stride and tasks.\n");
    iosys->num_iotasks = 1;
    ustride = 1;
  }
  if((iosys->num_iotasks < 1) || ((iosys->num_iotasks*ustride) > iosys->num_comptasks)){
    fprintf(stderr, "PIO_TP PIOc_Init_Intracomm error\n");
    fprintf(stderr, "num_iotasks=%d, ustride=%d, num_comptasks=%d\n", num_iotasks, ustride, iosys->num_comptasks);
    return PIO_EBADID;
  }
  iosys->ioranks = (int *) calloc(sizeof(int), iosys->num_iotasks);
  for(int i=0;i< iosys->num_iotasks; i++){
    iosys->ioranks[i] = (base + i*ustride) % iosys->num_comptasks;
    if(iosys->ioranks[i] == iosys->comp_rank)
      iosys->ioproc = true;
  }
  iosys->ioroot = iosys->ioranks[0];
#endif

  CheckMPIReturn(MPI_Info_create(&(iosys->info)),__FILE__,__LINE__);
  iosys->info = MPI_INFO_NULL;

  if(iosys->comp_rank == iosys->ioranks[0])
    iosys->iomaster = true;

  CheckMPIReturn(MPI_Comm_group(comp_comm, &(iosys->compgroup)),__FILE__,__LINE__);
			
  CheckMPIReturn(MPI_Group_incl(iosys->compgroup, iosys->num_iotasks, iosys->ioranks,
				&(iosys->iogroup)),__FILE__,__LINE__);

  CheckMPIReturn(MPI_Comm_create(comp_comm, iosys->iogroup, &(iosys->io_comm)),__FILE__,__LINE__);
  if(iosys->ioproc)
    CheckMPIReturn(MPI_Comm_rank(iosys->io_comm, &(iosys->io_rank)),__FILE__,__LINE__);
  else
    iosys->io_rank = -1;

  iosys->union_rank = iosys->comp_rank;

  *iosysidp = pio_add_to_iosystem_list(iosys);

  pio_get_env();

  /* allocate buffer space for compute nodes */
  compute_buffer_init(*iosys);

  return PIO_NOERR;
}