//--------------------------------------------------------------------------
//
// Reads a decomposition from a file
//
// filename: input filename
// swap_bytes: whether to swap bytes for endian conversion
// glo_num__blocks: total number of blocks in the global domain (output)
// loc_num_blocks: local number of blocks on this process (output)
//
// returns: id of this domain (< 0 if error)
//
int DIY_Read_decomposed(char *filename, int swap_bytes,
			int *glo_num_blocks, int *loc_num_blocks) {

  int tot_blocks, nblocks; // global and local number of blocks
  int given[3] = {0, 0, 0}; // no constraints on decomposition in {x, y, z}
  int ghost[6] = {0, 0, 0, 0, 0, 0}; // -x, +x, -y, +y, -z, +z ghost
  MPI_File fd; // file descriptor

  int retval = MPI_File_open(comm, (char *)filename, MPI_MODE_RDONLY,
			     MPI_INFO_NULL, &fd);
  assert(retval == MPI_SUCCESS);

  IO::ReadInfo(fd, swap_bytes, comm, tot_blocks, nblocks);

  MPI_File_close(&fd);

  // only works for regular grids with share_face = 1
  // todo: need a complete solution where  everything needed to create a 
  // decomposition is included in the file
  int did = DIY_Decompose(CONTIGUOUS_ORDER, tot_blocks, &nblocks, 1, 
			  ghost, given);
  *glo_num_blocks = tot_blocks;
  *loc_num_blocks = nblocks;

  return did;

}
Beispiel #2
0
//-----------------------------------------------------------------------
//
// Init
//
// inits the app
//
void Init() {

  bool track_seed_ids = false;
#ifdef TRACK_SEED_ID
  track_seed_ids = true;
#endif

  int myproc, nproc; // usual MPI
  int i;

  MPI_Comm_rank(MPI_COMM_WORLD, &myproc);
  MPI_Comm_size(MPI_COMM_WORLD, &nproc);

  assert(nspart * ntpart >= nproc);
  assert(ntpart <= tsize);
  if(seed_file[0] == '!')
    assert(tf > 0);

  // partition domain
  // todo: don't create partition if there is a part file?
  int data_size[4] = {size[0], size[1], size[2], tsize};
  int given[4] = {0, 0, 0, ntpart}; // constraints in x, y, z, t
  int ghost[8] = {1, 1, 1, 1, 1, 1, 0, 0}; // -x, +x, -y, +y, -z, +z, -t, +t
  if (tsize > 1) 
    ghost[7] = 1;
  DIY_Init(4, data_size, 1, MPI_COMM_WORLD);
  DIY_Decompose(ROUND_ROBIN_ORDER, nspart * ntpart, &nblocks, 1, ghost, given);

  // create osuflow object for each block
  // todo: switch to vectors and get rid of memory management
  osuflow = (OSUFlow**)malloc(nblocks * sizeof(OSUFlow));
  assert(osuflow != NULL);
  for (i = 0; i < nblocks; i++)
    osuflow[i] = new OSUFlow;

  // Seeds and fieldline list
  Seeds.resize(nblocks);
  sl_list = new list<vtListTimeSeedTrace*>[nblocks];

  // create remaining classes
  // todo: rename

  // edited TP 10/12/12
//   blocks = new Blocks(blocking, assign, (void *)osuflow, OSUFLOW, 
// 		      dataset_files, num_dataset_files, data_mode, ghost);
//   parflow = new ParFlow(blocking, assign, blocks, osuflow, sl_list, 
// 			&pt, &npt, &tot_ntrace, nblocks, 0);
  blocks = new Blocks(nblocks, (void *)osuflow, OSUFLOW, 
		      dataset_files, num_dataset_files, data_mode);

  parflow = new ParFlow(blocks, osuflow, sl_list, 
			&pt, &npt, &tot_ntrace, nblocks, 0);
  // end TP

  parflow->SetMaxError(maxError);
  parflow->SetInitialStepSize(initialStepSize);
  parflow->SetMinStepSize(minStepSize);
  parflow->SetMaxStepSize(maxStepSize);
  parflow->SetLowerAngleAccuracy(lowerAngleAccuracy);
  parflow->SetUpperAngleAccuracy(upperAngleAccuracy);
  parflow->SetIntegrationOrder(integrationOrder);
  parflow->SetUseAdaptiveStepSize(useAdaptiveStepSize);

  TotParticles = nspart * tf;

  if(seed_file[0] != '!') {
    LoadSeedsFromFile();
    TotParticles = seed_file_num;
    assert(seed_file_num > 0);
  }

}