Ejemplo n.º 1
0
void createTargetMesh(dccrg::Dccrg<SpatialCell,dccrg::Cartesian_Geometry>& mpiGrid,CellID cellID,int dim,
                      bool isRemoteCell) {
   const size_t popID = 0;

   // Get the immediate spatial face neighbors of this cell 
   // in the direction of propagation
   CellID cells[3];
   switch (dim) {
    case 0:
      cells[0] = get_spatial_neighbor(mpiGrid,cellID,true,-1,0,0);
      cells[1] = cellID;
      cells[2] = get_spatial_neighbor(mpiGrid,cellID,true,+1,0,0);
      break;
    case 1:
      cells[0] = get_spatial_neighbor(mpiGrid,cellID,true,0,-1,0);
      cells[1] = cellID;
      cells[2] = get_spatial_neighbor(mpiGrid,cellID,true,0,+1,0);
      break;
    case 2:
      cells[0] = get_spatial_neighbor(mpiGrid,cellID,true,0,0,-1);
      cells[1] = cellID;
      cells[2] = get_spatial_neighbor(mpiGrid,cellID,true,0,0,+1);
      break;
    default:
      std::cerr << "create error" << std::endl;
      exit(1);
      break;
   }

   // Remote (buffered) cells do not consider other remote cells as source cells,
   // i.e., only cells local to this process are translated
   if (isRemoteCell == true) {
      if (mpiGrid.is_local(cells[0]) == false) cells[0] = INVALID_CELLID;
      if (mpiGrid.is_local(cells[2]) == false) cells[2] = INVALID_CELLID;
   }

   SpatialCell* spatial_cell = mpiGrid[cellID];
   vmesh::VelocityMesh<vmesh::GlobalID,vmesh::LocalID>& vmesh    = spatial_cell->get_velocity_mesh_temporary();
   vmesh::VelocityBlockContainer<vmesh::LocalID>& blockContainer = spatial_cell->get_velocity_blocks_temporary();

   // At minimum the target mesh will be an identical copy of the existing mesh
   if (isRemoteCell == false) vmesh = spatial_cell->get_velocity_mesh(popID);
   else vmesh.clear();
   
   // Add or refine blocks arriving from the upstream
   addUpstreamBlocks<-1>(mpiGrid,cells[0],dim,vmesh);
   addUpstreamBlocks<+1>(mpiGrid,cells[2],dim,vmesh);

   // Target mesh generated, set block parameters
   blockContainer.setSize(vmesh.size());
   for (size_t b=0; b<vmesh.size(); ++b) {
      vmesh::GlobalID blockGID = vmesh.getGlobalID(b);
      Real* blockParams = blockContainer.getParameters(b);
      blockParams[BlockParams::VXCRD] = spatial_cell->get_velocity_block_vx_min(blockGID);
      blockParams[BlockParams::VYCRD] = spatial_cell->get_velocity_block_vy_min(blockGID);
      blockParams[BlockParams::VZCRD] = spatial_cell->get_velocity_block_vz_min(blockGID);
      vmesh.getCellSize(blockGID,&(blockParams[BlockParams::DVX]));
   }
}
Ejemplo n.º 2
0
/*!
\brief Read in state from a vlsv file in order to restart simulations
\param mpiGrid Vlasiator's grid
\param name Name of the restart file e.g. "restart.00052.vlsv"
 \return Returns true if the operation was successful
 \sa readGrid
 */
bool exec_readGrid(dccrg::Dccrg<SpatialCell,dccrg::Cartesian_Geometry>& mpiGrid,
      FsGrid< std::array<Real, fsgrids::bfield::N_BFIELD>, 2>& perBGrid,
      FsGrid< std::array<Real, fsgrids::efield::N_EFIELD>, 2>& EGrid,
      FsGrid< fsgrids::technical, 2>& technicalGrid,
                   const std::string& name) {
   vector<CellID> fileCells; /*< CellIds for all cells in file*/
   vector<size_t> nBlocks;/*< Number of blocks for all cells in file*/
   bool success=true;
   int myRank,processes;

#warning Spatial grid name hard-coded here
   const string meshName = "SpatialGrid";
   
   // Attempt to open VLSV file for reading:
   MPI_Comm_rank(MPI_COMM_WORLD,&myRank);
   MPI_Comm_size(MPI_COMM_WORLD,&processes);

   phiprof::start("readGrid");

   vlsv::ParallelReader file;
   MPI_Info mpiInfo = MPI_INFO_NULL;

   if (file.open(name,MPI_COMM_WORLD,MASTER_RANK,mpiInfo) == false) {
      success=false;
   }
   exitOnError(success,"(RESTART) Could not open file",MPI_COMM_WORLD);

   // Around May 2015 time was renamed from "t" to "time", we try to read both, 
   // new way is read first
   if (readScalarParameter(file,"time",P::t,MASTER_RANK,MPI_COMM_WORLD) == false)
     if (readScalarParameter(file,"t", P::t,MASTER_RANK,MPI_COMM_WORLD) == false) 
       success=false;
   P::t_min=P::t;

   // Around May 2015 timestep was renamed from "tstep" to "timestep", we to read
   // both, new way is read first
   if (readScalarParameter(file,"timestep",P::tstep,MASTER_RANK,MPI_COMM_WORLD) == false)
     if (readScalarParameter(file,"tstep", P::tstep,MASTER_RANK,MPI_COMM_WORLD) ==false) 
       success = false;
   P::tstep_min=P::tstep;

   if(readScalarParameter(file,"dt",P::dt,MASTER_RANK,MPI_COMM_WORLD) ==false) success=false;

   if(readScalarParameter(file,"fieldSolverSubcycles",P::fieldSolverSubcycles,MASTER_RANK,MPI_COMM_WORLD) ==false) {
      // Legacy restarts do not have this field, it "should" be safe for one or two steps...
      P::fieldSolverSubcycles = 1.0;
      cout << " No P::fieldSolverSubcycles found in restart, setting 1." << endl;
   }
   MPI_Bcast(&(P::fieldSolverSubcycles),1,MPI_Type<Real>(),MASTER_RANK,MPI_COMM_WORLD);
   



   checkScalarParameter(file,"xmin",P::xmin,MASTER_RANK,MPI_COMM_WORLD);
   checkScalarParameter(file,"ymin",P::ymin,MASTER_RANK,MPI_COMM_WORLD);
   checkScalarParameter(file,"zmin",P::zmin,MASTER_RANK,MPI_COMM_WORLD);
   checkScalarParameter(file,"xmax",P::xmax,MASTER_RANK,MPI_COMM_WORLD);
   checkScalarParameter(file,"ymax",P::ymax,MASTER_RANK,MPI_COMM_WORLD);
   checkScalarParameter(file,"zmax",P::zmax,MASTER_RANK,MPI_COMM_WORLD);
   checkScalarParameter(file,"xcells_ini",P::xcells_ini,MASTER_RANK,MPI_COMM_WORLD);
   checkScalarParameter(file,"ycells_ini",P::ycells_ini,MASTER_RANK,MPI_COMM_WORLD);
   checkScalarParameter(file,"zcells_ini",P::zcells_ini,MASTER_RANK,MPI_COMM_WORLD);

   phiprof::start("readDatalayout");
   if (success == true) success = readCellIds(file,fileCells,MASTER_RANK,MPI_COMM_WORLD);

   // Check that the cellID lists are identical in file and grid
   if (myRank==0){
      vector<CellID> allGridCells=mpiGrid.get_all_cells();
      if (fileCells.size() != allGridCells.size()){
         success=false;
      }
   }
   
   exitOnError(success,"(RESTART) Wrong number of cells in restart file",MPI_COMM_WORLD);

   // Read the total number of velocity blocks in each spatial cell.
   // Note that this is a sum over all existing particle species.
   if (success == true) {
      success = readNBlocks(file,meshName,nBlocks,MASTER_RANK,MPI_COMM_WORLD);
   }

   //make sure all cells are empty, we will anyway overwrite everything and 
   // in that case moving cells is easier...
     {
        const vector<CellID>& gridCells = getLocalCells();
        for (size_t i=0; i<gridCells.size(); i++) {
           for (uint popID=0; popID<getObjectWrapper().particleSpecies.size(); ++popID)
             mpiGrid[gridCells[i]]->clear(popID);
        }
     }

   uint64_t totalNumberOfBlocks=0;
   unsigned int numberOfBlocksPerProcess;
   for(uint i=0; i<nBlocks.size(); ++i){
      totalNumberOfBlocks += nBlocks[i];
   }
   numberOfBlocksPerProcess= 1 + totalNumberOfBlocks/processes;

   uint64_t localCellStartOffset=0; // This is where local cells start in file-list after migration.
   uint64_t localCells=0;
   uint64_t numberOfBlocksCount=0;
   
   // Pin local cells to remote processes, we try to balance number of blocks so that 
   // each process has the same amount of blocks, more or less.
   for (size_t i=0; i<fileCells.size(); ++i) {
      numberOfBlocksCount += nBlocks[i];
      int newCellProcess = numberOfBlocksCount/numberOfBlocksPerProcess;
      if (newCellProcess == myRank) {
         if (localCells == 0)
            localCellStartOffset=i; //here local cells start
         ++localCells;
      }
      if (mpiGrid.is_local(fileCells[i])) {
         mpiGrid.pin(fileCells[i],newCellProcess);
      }
   }

   SpatialCell::set_mpi_transfer_type(Transfer::ALL_SPATIAL_DATA);

   //Do initial load balance based on pins. Need to transfer at least sysboundaryflags
   mpiGrid.balance_load(false);

   //update list of local gridcells
   recalculateLocalCellsCache();

   //get new list of local gridcells
   const vector<CellID>& gridCells = getLocalCells();

   // Unpin cells, otherwise we will never change this initial bad balance
   for (size_t i=0; i<gridCells.size(); ++i) {
      mpiGrid.unpin(gridCells[i]);
   }

   // Check for errors, has migration succeeded
   if (localCells != gridCells.size() ) {
      success=false;
   } 

   if (success == true) {
      for (uint64_t i=localCellStartOffset; i<localCellStartOffset+localCells; ++i) {
         if(mpiGrid.is_local(fileCells[i]) == false) {
            success = false;
         }
      }
   }

   exitOnError(success,"(RESTART) Cell migration failed",MPI_COMM_WORLD);

   // Set cell coordinates based on cfg (mpigrid) information
   for (size_t i=0; i<gridCells.size(); ++i) {
      array<double, 3> cell_min = mpiGrid.geometry.get_min(gridCells[i]);
      array<double, 3> cell_length = mpiGrid.geometry.get_length(gridCells[i]);

      mpiGrid[gridCells[i]]->parameters[CellParams::XCRD] = cell_min[0];
      mpiGrid[gridCells[i]]->parameters[CellParams::YCRD] = cell_min[1];
      mpiGrid[gridCells[i]]->parameters[CellParams::ZCRD] = cell_min[2];
      mpiGrid[gridCells[i]]->parameters[CellParams::DX  ] = cell_length[0];
      mpiGrid[gridCells[i]]->parameters[CellParams::DY  ] = cell_length[1];
      mpiGrid[gridCells[i]]->parameters[CellParams::DZ  ] = cell_length[2];
   }

   // Where local data start in the blocklists
   //uint64_t localBlocks=0;
   //for(uint64_t i=localCellStartOffset; i<localCellStartOffset+localCells; ++i) {
   //  localBlocks += nBlocks[i];
   //}
   phiprof::stop("readDatalayout");

   //todo, check file datatype, and do not just use double
   phiprof::start("readCellParameters");
   if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"moments",CellParams::RHOM,5,mpiGrid); }
   if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"moments_dt2",CellParams::RHOM_DT2,5,mpiGrid); }
   if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"moments_r",CellParams::RHOM_R,5,mpiGrid); }
   if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"moments_v",CellParams::RHOM_V,5,mpiGrid); }
   if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"pressure",CellParams::P_11,3,mpiGrid); }
   if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"pressure_dt2",CellParams::P_11_DT2,3,mpiGrid); }
   if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"pressure_r",CellParams::P_11_R,3,mpiGrid); }
   if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"pressure_v",CellParams::P_11_V,3,mpiGrid); }
   if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"LB_weight",CellParams::LBWEIGHTCOUNTER,1,mpiGrid); }
   if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"max_v_dt",CellParams::MAXVDT,1,mpiGrid); }
   if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"max_r_dt",CellParams::MAXRDT,1,mpiGrid); }
   if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"max_fields_dt",CellParams::MAXFDT,1,mpiGrid); }
// Backround B has to be set, there are also the derivatives that should be written/read if we wanted to only read in background field
   phiprof::stop("readCellParameters");

   phiprof::start("readBlockData");
   if (success == true) {
      success = readBlockData(file,meshName,fileCells,localCellStartOffset,localCells,mpiGrid); 
   }
   phiprof::stop("readBlockData");

   mpiGrid.update_copies_of_remote_neighbors(FULL_NEIGHBORHOOD_ID);
   
   // Read fsgrid data back in
   int fsgridInputRanks=0;
   if(readScalarParameter(file,"numWritingRanks",fsgridInputRanks, MASTER_RANK, MPI_COMM_WORLD) == false) {
      exitOnError(false, "(RESTART) FSGrid writing rank number not found in restart file", MPI_COMM_WORLD);
   }
   
   success = readFsGridVariable(file, "fg_PERB", fsgridInputRanks, perBGrid);
   success = readFsGridVariable(file, "fg_E", fsgridInputRanks, EGrid);
   
   success = file.close();
   phiprof::stop("readGrid");

   exitOnError(success,"(RESTART) Other failure",MPI_COMM_WORLD);
   return success;
}