template <typename T, int stencil> void computeCoupling(dccrg::Dccrg<SpatialCell,dccrg::Cartesian_Geometry>& mpiGrid, const std::vector<CellID>& cells, FsGrid< T, stencil>& momentsGrid, std::map<int, std::set<CellID> >& onDccrgMapRemoteProcess, std::map<int, std::set<CellID> >& onFsgridMapRemoteProcess, std::map<CellID, std::vector<int64_t> >& onFsgridMapCells ) { //sorted list of dccrg cells. cells is typicall already sorted, but just to make sure.... std::vector<CellID> dccrgCells = cells; std::sort(dccrgCells.begin(), dccrgCells.end()); //make sure the datastructures are clean onDccrgMapRemoteProcess.clear(); onFsgridMapRemoteProcess.clear(); onFsgridMapCells.clear(); //size of fsgrid local part const std::array<int, 3> gridDims(momentsGrid.getLocalSize()); //Compute what we will receive, and where it should be stored for (int k=0; k<gridDims[2]; k++) { for (int j=0; j<gridDims[1]; j++) { for (int i=0; i<gridDims[0]; i++) { const std::array<int, 3> globalIndices = momentsGrid.getGlobalIndices(i,j,k); const dccrg::Types<3>::indices_t indices = {{(uint64_t)globalIndices[0], (uint64_t)globalIndices[1], (uint64_t)globalIndices[2]}}; //cast to avoid warnings CellID dccrgCell = mpiGrid.get_existing_cell(indices, 0, mpiGrid.mapping.get_maximum_refinement_level()); int process = mpiGrid.get_process(dccrgCell); int64_t fsgridLid = momentsGrid.LocalIDForCoords(i,j,k); int64_t fsgridGid = momentsGrid.GlobalIDForCoords(i,j,k); onFsgridMapRemoteProcess[process].insert(dccrgCell); //cells are ordered (sorted) in set onFsgridMapCells[dccrgCell].push_back(fsgridLid); } } } // Compute where to send data and what to send for(int i=0; i< dccrgCells.size(); i++) { //compute to which processes this cell maps std::vector<CellID> fsCells = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgCells[i]); //loop over fsgrid cells which this dccrg cell maps to for (auto const &fsCellID : fsCells) { int process = momentsGrid.getTaskForGlobalID(fsCellID).first; //process on fsgrid onDccrgMapRemoteProcess[process].insert(dccrgCells[i]); //add to map } } }
> void solve( const std::vector<uint64_t>& cell_ids, dccrg::Dccrg<Cell_T, dccrg::Cartesian_Geometry>& game_grid ) { for (auto cell_id: cell_ids) { Cell_T* current_data = game_grid[cell_id]; if (current_data == NULL) { std::cerr << __FILE__ << ":" << __LINE__ << std::endl; abort(); } const std::vector<uint64_t>* const neighbors = game_grid.get_neighbors_of(cell_id); for (auto neighbor_id: *neighbors) { if (neighbor_id == dccrg::error_cell) { continue; } Cell_T* neighbor_data = game_grid[neighbor_id]; if (neighbor_data == NULL) { std::cerr << __FILE__ << ":" << __LINE__ << std::endl; abort(); } if ((*neighbor_data)[Is_Alive_T()]) { (*current_data)[Live_Neighbors_T()]++; } } } }
void createTargetMesh(dccrg::Dccrg<SpatialCell,dccrg::Cartesian_Geometry>& mpiGrid,CellID cellID,int dim, bool isRemoteCell) { const size_t popID = 0; // Get the immediate spatial face neighbors of this cell // in the direction of propagation CellID cells[3]; switch (dim) { case 0: cells[0] = get_spatial_neighbor(mpiGrid,cellID,true,-1,0,0); cells[1] = cellID; cells[2] = get_spatial_neighbor(mpiGrid,cellID,true,+1,0,0); break; case 1: cells[0] = get_spatial_neighbor(mpiGrid,cellID,true,0,-1,0); cells[1] = cellID; cells[2] = get_spatial_neighbor(mpiGrid,cellID,true,0,+1,0); break; case 2: cells[0] = get_spatial_neighbor(mpiGrid,cellID,true,0,0,-1); cells[1] = cellID; cells[2] = get_spatial_neighbor(mpiGrid,cellID,true,0,0,+1); break; default: std::cerr << "create error" << std::endl; exit(1); break; } // Remote (buffered) cells do not consider other remote cells as source cells, // i.e., only cells local to this process are translated if (isRemoteCell == true) { if (mpiGrid.is_local(cells[0]) == false) cells[0] = INVALID_CELLID; if (mpiGrid.is_local(cells[2]) == false) cells[2] = INVALID_CELLID; } SpatialCell* spatial_cell = mpiGrid[cellID]; vmesh::VelocityMesh<vmesh::GlobalID,vmesh::LocalID>& vmesh = spatial_cell->get_velocity_mesh_temporary(); vmesh::VelocityBlockContainer<vmesh::LocalID>& blockContainer = spatial_cell->get_velocity_blocks_temporary(); // At minimum the target mesh will be an identical copy of the existing mesh if (isRemoteCell == false) vmesh = spatial_cell->get_velocity_mesh(popID); else vmesh.clear(); // Add or refine blocks arriving from the upstream addUpstreamBlocks<-1>(mpiGrid,cells[0],dim,vmesh); addUpstreamBlocks<+1>(mpiGrid,cells[2],dim,vmesh); // Target mesh generated, set block parameters blockContainer.setSize(vmesh.size()); for (size_t b=0; b<vmesh.size(); ++b) { vmesh::GlobalID blockGID = vmesh.getGlobalID(b); Real* blockParams = blockContainer.getParameters(b); blockParams[BlockParams::VXCRD] = spatial_cell->get_velocity_block_vx_min(blockGID); blockParams[BlockParams::VYCRD] = spatial_cell->get_velocity_block_vy_min(blockGID); blockParams[BlockParams::VZCRD] = spatial_cell->get_velocity_block_vz_min(blockGID); vmesh.getCellSize(blockGID,&(blockParams[BlockParams::DVX])); } }
/* Calculate the number of cells on the maximum refinement level overlapping the list of dccrg cells in cells. */ int getNumberOfCellsOnMaxRefLvl(dccrg::Dccrg<SpatialCell,dccrg::Cartesian_Geometry>& mpiGrid, const std::vector<CellID>& cells) { int nCells = 0; auto maxRefLvl = mpiGrid.mapping.get_maximum_refinement_level(); for (auto cellid : cells) { auto refLvl = mpiGrid.get_refinement_level(cellid); nCells += pow(pow(2,maxRefLvl-refLvl),3); } return nCells; }
std::vector<CellID> mapDccrgIdToFsGridGlobalID(dccrg::Dccrg<SpatialCell,dccrg::Cartesian_Geometry>& mpiGrid, CellID dccrgID) { const auto maxRefLvl = mpiGrid.get_maximum_refinement_level(); const auto refLvl = mpiGrid.get_refinement_level(dccrgID); const auto cellLength = pow(2,maxRefLvl-refLvl); const auto topLeftIndices = mpiGrid.mapping.get_indices(dccrgID); std::array<int,3> fsgridDims; fsgridDims[0] = P::xcells_ini * pow(2,mpiGrid.get_maximum_refinement_level()); fsgridDims[1] = P::ycells_ini * pow(2,mpiGrid.get_maximum_refinement_level()); fsgridDims[2] = P::zcells_ini * pow(2,mpiGrid.get_maximum_refinement_level()); std::vector<CellID> fsgridIDs(cellLength * cellLength * cellLength); for (uint k = 0; k < cellLength; ++k) { for (uint j = 0; j < cellLength; ++j) { for (uint i = 0; i < cellLength; ++i) { const std::array<uint64_t,3> indices = {{topLeftIndices[0] + i,topLeftIndices[1] + j,topLeftIndices[2] + k}}; fsgridIDs[k*cellLength*cellLength + j*cellLength + i] = indices[0] + indices[1] * fsgridDims[0] + indices[2] * fsgridDims[1] * fsgridDims[0]; } } } return fsgridIDs; }
/*! \brief Read in state from a vlsv file in order to restart simulations \param mpiGrid Vlasiator's grid \param name Name of the restart file e.g. "restart.00052.vlsv" \return Returns true if the operation was successful \sa readGrid */ bool exec_readGrid(dccrg::Dccrg<SpatialCell,dccrg::Cartesian_Geometry>& mpiGrid, FsGrid< std::array<Real, fsgrids::bfield::N_BFIELD>, 2>& perBGrid, FsGrid< std::array<Real, fsgrids::efield::N_EFIELD>, 2>& EGrid, FsGrid< fsgrids::technical, 2>& technicalGrid, const std::string& name) { vector<CellID> fileCells; /*< CellIds for all cells in file*/ vector<size_t> nBlocks;/*< Number of blocks for all cells in file*/ bool success=true; int myRank,processes; #warning Spatial grid name hard-coded here const string meshName = "SpatialGrid"; // Attempt to open VLSV file for reading: MPI_Comm_rank(MPI_COMM_WORLD,&myRank); MPI_Comm_size(MPI_COMM_WORLD,&processes); phiprof::start("readGrid"); vlsv::ParallelReader file; MPI_Info mpiInfo = MPI_INFO_NULL; if (file.open(name,MPI_COMM_WORLD,MASTER_RANK,mpiInfo) == false) { success=false; } exitOnError(success,"(RESTART) Could not open file",MPI_COMM_WORLD); // Around May 2015 time was renamed from "t" to "time", we try to read both, // new way is read first if (readScalarParameter(file,"time",P::t,MASTER_RANK,MPI_COMM_WORLD) == false) if (readScalarParameter(file,"t", P::t,MASTER_RANK,MPI_COMM_WORLD) == false) success=false; P::t_min=P::t; // Around May 2015 timestep was renamed from "tstep" to "timestep", we to read // both, new way is read first if (readScalarParameter(file,"timestep",P::tstep,MASTER_RANK,MPI_COMM_WORLD) == false) if (readScalarParameter(file,"tstep", P::tstep,MASTER_RANK,MPI_COMM_WORLD) ==false) success = false; P::tstep_min=P::tstep; if(readScalarParameter(file,"dt",P::dt,MASTER_RANK,MPI_COMM_WORLD) ==false) success=false; if(readScalarParameter(file,"fieldSolverSubcycles",P::fieldSolverSubcycles,MASTER_RANK,MPI_COMM_WORLD) ==false) { // Legacy restarts do not have this field, it "should" be safe for one or two steps... P::fieldSolverSubcycles = 1.0; cout << " No P::fieldSolverSubcycles found in restart, setting 1." << endl; } MPI_Bcast(&(P::fieldSolverSubcycles),1,MPI_Type<Real>(),MASTER_RANK,MPI_COMM_WORLD); checkScalarParameter(file,"xmin",P::xmin,MASTER_RANK,MPI_COMM_WORLD); checkScalarParameter(file,"ymin",P::ymin,MASTER_RANK,MPI_COMM_WORLD); checkScalarParameter(file,"zmin",P::zmin,MASTER_RANK,MPI_COMM_WORLD); checkScalarParameter(file,"xmax",P::xmax,MASTER_RANK,MPI_COMM_WORLD); checkScalarParameter(file,"ymax",P::ymax,MASTER_RANK,MPI_COMM_WORLD); checkScalarParameter(file,"zmax",P::zmax,MASTER_RANK,MPI_COMM_WORLD); checkScalarParameter(file,"xcells_ini",P::xcells_ini,MASTER_RANK,MPI_COMM_WORLD); checkScalarParameter(file,"ycells_ini",P::ycells_ini,MASTER_RANK,MPI_COMM_WORLD); checkScalarParameter(file,"zcells_ini",P::zcells_ini,MASTER_RANK,MPI_COMM_WORLD); phiprof::start("readDatalayout"); if (success == true) success = readCellIds(file,fileCells,MASTER_RANK,MPI_COMM_WORLD); // Check that the cellID lists are identical in file and grid if (myRank==0){ vector<CellID> allGridCells=mpiGrid.get_all_cells(); if (fileCells.size() != allGridCells.size()){ success=false; } } exitOnError(success,"(RESTART) Wrong number of cells in restart file",MPI_COMM_WORLD); // Read the total number of velocity blocks in each spatial cell. // Note that this is a sum over all existing particle species. if (success == true) { success = readNBlocks(file,meshName,nBlocks,MASTER_RANK,MPI_COMM_WORLD); } //make sure all cells are empty, we will anyway overwrite everything and // in that case moving cells is easier... { const vector<CellID>& gridCells = getLocalCells(); for (size_t i=0; i<gridCells.size(); i++) { for (uint popID=0; popID<getObjectWrapper().particleSpecies.size(); ++popID) mpiGrid[gridCells[i]]->clear(popID); } } uint64_t totalNumberOfBlocks=0; unsigned int numberOfBlocksPerProcess; for(uint i=0; i<nBlocks.size(); ++i){ totalNumberOfBlocks += nBlocks[i]; } numberOfBlocksPerProcess= 1 + totalNumberOfBlocks/processes; uint64_t localCellStartOffset=0; // This is where local cells start in file-list after migration. uint64_t localCells=0; uint64_t numberOfBlocksCount=0; // Pin local cells to remote processes, we try to balance number of blocks so that // each process has the same amount of blocks, more or less. for (size_t i=0; i<fileCells.size(); ++i) { numberOfBlocksCount += nBlocks[i]; int newCellProcess = numberOfBlocksCount/numberOfBlocksPerProcess; if (newCellProcess == myRank) { if (localCells == 0) localCellStartOffset=i; //here local cells start ++localCells; } if (mpiGrid.is_local(fileCells[i])) { mpiGrid.pin(fileCells[i],newCellProcess); } } SpatialCell::set_mpi_transfer_type(Transfer::ALL_SPATIAL_DATA); //Do initial load balance based on pins. Need to transfer at least sysboundaryflags mpiGrid.balance_load(false); //update list of local gridcells recalculateLocalCellsCache(); //get new list of local gridcells const vector<CellID>& gridCells = getLocalCells(); // Unpin cells, otherwise we will never change this initial bad balance for (size_t i=0; i<gridCells.size(); ++i) { mpiGrid.unpin(gridCells[i]); } // Check for errors, has migration succeeded if (localCells != gridCells.size() ) { success=false; } if (success == true) { for (uint64_t i=localCellStartOffset; i<localCellStartOffset+localCells; ++i) { if(mpiGrid.is_local(fileCells[i]) == false) { success = false; } } } exitOnError(success,"(RESTART) Cell migration failed",MPI_COMM_WORLD); // Set cell coordinates based on cfg (mpigrid) information for (size_t i=0; i<gridCells.size(); ++i) { array<double, 3> cell_min = mpiGrid.geometry.get_min(gridCells[i]); array<double, 3> cell_length = mpiGrid.geometry.get_length(gridCells[i]); mpiGrid[gridCells[i]]->parameters[CellParams::XCRD] = cell_min[0]; mpiGrid[gridCells[i]]->parameters[CellParams::YCRD] = cell_min[1]; mpiGrid[gridCells[i]]->parameters[CellParams::ZCRD] = cell_min[2]; mpiGrid[gridCells[i]]->parameters[CellParams::DX ] = cell_length[0]; mpiGrid[gridCells[i]]->parameters[CellParams::DY ] = cell_length[1]; mpiGrid[gridCells[i]]->parameters[CellParams::DZ ] = cell_length[2]; } // Where local data start in the blocklists //uint64_t localBlocks=0; //for(uint64_t i=localCellStartOffset; i<localCellStartOffset+localCells; ++i) { // localBlocks += nBlocks[i]; //} phiprof::stop("readDatalayout"); //todo, check file datatype, and do not just use double phiprof::start("readCellParameters"); if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"moments",CellParams::RHOM,5,mpiGrid); } if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"moments_dt2",CellParams::RHOM_DT2,5,mpiGrid); } if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"moments_r",CellParams::RHOM_R,5,mpiGrid); } if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"moments_v",CellParams::RHOM_V,5,mpiGrid); } if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"pressure",CellParams::P_11,3,mpiGrid); } if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"pressure_dt2",CellParams::P_11_DT2,3,mpiGrid); } if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"pressure_r",CellParams::P_11_R,3,mpiGrid); } if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"pressure_v",CellParams::P_11_V,3,mpiGrid); } if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"LB_weight",CellParams::LBWEIGHTCOUNTER,1,mpiGrid); } if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"max_v_dt",CellParams::MAXVDT,1,mpiGrid); } if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"max_r_dt",CellParams::MAXRDT,1,mpiGrid); } if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"max_fields_dt",CellParams::MAXFDT,1,mpiGrid); } // Backround B has to be set, there are also the derivatives that should be written/read if we wanted to only read in background field phiprof::stop("readCellParameters"); phiprof::start("readBlockData"); if (success == true) { success = readBlockData(file,meshName,fileCells,localCellStartOffset,localCells,mpiGrid); } phiprof::stop("readBlockData"); mpiGrid.update_copies_of_remote_neighbors(FULL_NEIGHBORHOOD_ID); // Read fsgrid data back in int fsgridInputRanks=0; if(readScalarParameter(file,"numWritingRanks",fsgridInputRanks, MASTER_RANK, MPI_COMM_WORLD) == false) { exitOnError(false, "(RESTART) FSGrid writing rank number not found in restart file", MPI_COMM_WORLD); } success = readFsGridVariable(file, "fg_PERB", fsgridInputRanks, perBGrid); success = readFsGridVariable(file, "fg_E", fsgridInputRanks, EGrid); success = file.close(); phiprof::stop("readGrid"); exitOnError(success,"(RESTART) Other failure",MPI_COMM_WORLD); return success; }
/** Read velocity block data of all existing particle species. * @param file VLSV reader. * @param meshName Name of the spatial mesh. * @param fileCells Vector containing spatial cell IDs. * @param localCellStartOffset Offset into fileCells, determines where the cells belonging * to this process start. * @param localCells Number of spatial cells assigned to this process. * @param mpiGrid Parallel grid library. * @return If true, velocity block data was read successfully.*/ bool readBlockData( vlsv::ParallelReader& file, const string& meshName, const vector<CellID>& fileCells, const uint64_t localCellStartOffset, const uint64_t localCells, dccrg::Dccrg<SpatialCell,dccrg::Cartesian_Geometry>& mpiGrid ) { bool success = true; const uint64_t bytesReadStart = file.getBytesRead(); int N_processes; MPI_Comm_size(MPI_COMM_WORLD,&N_processes); uint64_t arraySize; uint64_t vectorSize; vlsv::datatype::type dataType; uint64_t byteSize; uint64_t* offsetArray = new uint64_t[N_processes]; for (uint popID=0; popID<getObjectWrapper().particleSpecies.size(); ++popID) { const string& popName = getObjectWrapper().particleSpecies[popID].name; // Create a cellID remapping lambda that can renumber our velocity space, should it's size have changed. // By default, this is a no-op that keeps the blockIDs untouched. std::function<vmesh::GlobalID(vmesh::GlobalID)> blockIDremapper = [](vmesh::GlobalID oldID) -> vmesh::GlobalID {return oldID;}; // Check that velocity space extents and DV matches the grids we have created list<pair<string,string> > attribs; attribs.push_back(make_pair("mesh",popName)); std::array<unsigned int, 6> fileMeshBBox; unsigned int* bufferpointer = &fileMeshBBox[0]; if (file.read("MESH_BBOX",attribs,0,6,bufferpointer,false) == false) { logFile << "(RESTART) ERROR: Failed to read MESH_BBOX at " << __FILE__ << ":" << __LINE__ << endl << write; success = false; } const size_t meshID = getObjectWrapper().particleSpecies[popID].velocityMesh; const vmesh::MeshParameters& ourMeshParams = getObjectWrapper().velocityMeshes[meshID]; if(fileMeshBBox[0] != ourMeshParams.gridLength[0] || fileMeshBBox[1] != ourMeshParams.gridLength[1] || fileMeshBBox[2] != ourMeshParams.gridLength[2]) { logFile << "(RESTART) INFO: velocity mesh sizes don't match:" << endl << " restart file has " << fileMeshBBox[0] << " x " << fileMeshBBox[1] << " x " << fileMeshBBox[2] << "," << endl << " config specifies " << ourMeshParams.gridLength[0] << " x " << ourMeshParams.gridLength[1] << " x " << ourMeshParams.gridLength[2] << endl << write; if(ourMeshParams.gridLength[0] < fileMeshBBox[0] || ourMeshParams.gridLength[1] < fileMeshBBox[1] || ourMeshParams.gridLength[2] < fileMeshBBox[2]) { logFile << "(RESTART) ERROR: trying to shrink velocity space." << endl << write; abort(); } // If we are mismatched, we have to iterate through the velocity coords to see if we have a // chance at renumbering. std::vector<Real> fileVelCoordsX(fileMeshBBox[0]*fileMeshBBox[3]+1); std::vector<Real> fileVelCoordsY(fileMeshBBox[1]*fileMeshBBox[4]+1); std::vector<Real> fileVelCoordsZ(fileMeshBBox[2]*fileMeshBBox[5]+1); Real* tempPointer = fileVelCoordsX.data(); if (file.read("MESH_NODE_CRDS_X",attribs,0,fileMeshBBox[0]*fileMeshBBox[3]+1,tempPointer,false) == false) { logFile << "(RESTART) ERROR: Failed to read MESH_NODE_CRDS_X at " << __FILE__ << ":" << __LINE__ << endl << write; success = false; } tempPointer = fileVelCoordsY.data(); if (file.read("MESH_NODE_CRDS_Y",attribs,0,fileMeshBBox[1]*fileMeshBBox[4]+1,tempPointer,false) == false) { logFile << "(RESTART) ERROR: Failed to read MESH_NODE_CRDS_X at " << __FILE__ << ":" << __LINE__ << endl << write; success = false; } tempPointer = fileVelCoordsZ.data(); if (file.read("MESH_NODE_CRDS_Z",attribs,0,fileMeshBBox[2]*fileMeshBBox[5]+1,tempPointer,false) == false) { logFile << "(RESTART) ERROR: Failed to read MESH_NODE_CRDS_X at " << __FILE__ << ":" << __LINE__ << endl << write; success = false; } const Real dVx = getObjectWrapper().velocityMeshes[meshID].cellSize[0]; for(const auto& c : fileVelCoordsX) { Real cellindex = (c - getObjectWrapper().velocityMeshes[meshID].meshMinLimits[0]) / dVx; if(fabs(nearbyint(cellindex) - cellindex) > 1./10000.) { logFile << "(RESTART) ERROR: Can't resize velocity space as cell coordinates don't match." << endl << " (X coordinate " << c << " = " << cellindex <<" * " << dVx << " + " << getObjectWrapper().velocityMeshes[meshID].meshMinLimits[0] << endl << " coordinate = cellindex * dV + meshMinLimits)" << endl << write; abort(); } } const Real dVy = getObjectWrapper().velocityMeshes[meshID].cellSize[1]; for(const auto& c : fileVelCoordsY) { Real cellindex = (c - getObjectWrapper().velocityMeshes[meshID].meshMinLimits[1]) / dVy; if(fabs(nearbyint(cellindex) - cellindex) > 1./10000.) { logFile << "(RESTART) ERROR: Can't resize velocity space as cell coordinates don't match." << endl << " (Y coordinate " << c << " = " << cellindex <<" * " << dVy << " + " << getObjectWrapper().velocityMeshes[meshID].meshMinLimits[1] << endl << " coordinate = cellindex * dV + meshMinLimits)" << endl << write; abort(); } } const Real dVz = getObjectWrapper().velocityMeshes[meshID].cellSize[2]; for(const auto& c : fileVelCoordsY) { Real cellindex = (c - getObjectWrapper().velocityMeshes[meshID].meshMinLimits[2]) / dVz; if(fabs(nearbyint(cellindex) - cellindex) > 1./10000.) { logFile << "(RESTART) ERROR: Can't resize velocity space as cell coordinates don't match." << endl << " (Z coordinate " << c << " = " << cellindex <<" * " << dVz << " + " << getObjectWrapper().velocityMeshes[meshID].meshMinLimits[2] << endl << " coordinate = cellindex * dV + meshMinLimits)" << endl << write; abort(); } } // If we haven't aborted above, we can apparently renumber our // cellIDs. Build an approprita blockIDremapper lambda for this purpose. std::array<int, 3> velGridOffset; velGridOffset[0] = (fileVelCoordsX[0] - getObjectWrapper().velocityMeshes[meshID].meshMinLimits[0]) / dVx; velGridOffset[1] = (fileVelCoordsY[0] - getObjectWrapper().velocityMeshes[meshID].meshMinLimits[1]) / dVy; velGridOffset[2] = (fileVelCoordsZ[0] - getObjectWrapper().velocityMeshes[meshID].meshMinLimits[2]) / dVz; if((velGridOffset[0] % ourMeshParams.blockLength[0] != 0) || (velGridOffset[1] % ourMeshParams.blockLength[1] != 0) || (velGridOffset[2] % ourMeshParams.blockLength[2] != 0)) { logFile << "(RESTART) ERROR: resizing velocity space on restart must end up with the old velocity space" << endl << " at a block boundary of the new space!" << endl << " (It now starts at cell [" << velGridOffset[0] << ", " << velGridOffset[1] << "," << velGridOffset[2] << "])" << endl << write; abort(); } velGridOffset[0] /= ourMeshParams.blockLength[0]; velGridOffset[1] /= ourMeshParams.blockLength[1]; velGridOffset[2] /= ourMeshParams.blockLength[2]; blockIDremapper = [fileMeshBBox,velGridOffset,ourMeshParams](vmesh::GlobalID oldID) -> vmesh::GlobalID { unsigned int x,y,z; x = oldID % fileMeshBBox[0]; y = (oldID / fileMeshBBox[0]) % fileMeshBBox[1]; z = oldID / (fileMeshBBox[0] * fileMeshBBox[1]); x += velGridOffset[0]; y += velGridOffset[1]; z += velGridOffset[2]; //logFile << " Remapping " << oldID << "(" << x << "," << y << "," << z << ") to " << x + y * ourMeshParams.gridLength[0] + z* ourMeshParams.gridLength[0] * ourMeshParams.gridLength[1] << endl << write; return x + y * ourMeshParams.gridLength[0] + z* ourMeshParams.gridLength[0] * ourMeshParams.gridLength[1]; }; logFile << " => Resizing velocity space by renumbering GlobalIDs." << endl << endl << write; } // In restart files each spatial cell has an entry in CELLSWITHBLOCKS. // Each process calculates how many velocity blocks it has for this species. attribs.clear(); attribs.push_back(make_pair("mesh",meshName)); attribs.push_back(make_pair("name",popName)); vmesh::LocalID* blocksPerCell = NULL; if (file.read("BLOCKSPERCELL",attribs,localCellStartOffset,localCells,blocksPerCell,true) == false) { logFile << "(RESTART) ERROR: Failed to read BLOCKSPERCELL at " << __FILE__ << ":" << __LINE__ << endl << write; success = false; } // Count how many velocity blocks this process gets uint64_t blockSum = 0; for (uint64_t i=0; i<localCells; ++i){ blockSum += blocksPerCell[i]; } // Gather all block sums to master process who will them broadcast // the values to everyone MPI_Allgather(&blockSum,1,MPI_Type<uint64_t>(),offsetArray,1,MPI_Type<uint64_t>(),MPI_COMM_WORLD); // Calculate the offset from which this process starts reading block data uint64_t myOffset = 0; for (int64_t i=0; i<mpiGrid.get_rank(); ++i) myOffset += offsetArray[i]; if (file.getArrayInfo("BLOCKVARIABLE",attribs,arraySize,vectorSize,dataType,byteSize) == false) { logFile << "(RESTART) ERROR: Failed to read BLOCKVARIABLE INFO" << endl << write; return false; } // Call _readBlockData if (dataType == vlsv::datatype::type::FLOAT) { switch (byteSize) { case sizeof(double): if (_readBlockData<double>(file,meshName,fileCells,localCellStartOffset,localCells,blocksPerCell, myOffset,blockSum,mpiGrid,blockIDremapper,popID) == false) success = false; break; case sizeof(float): if (_readBlockData<float>(file,meshName,fileCells,localCellStartOffset,localCells,blocksPerCell, myOffset,blockSum,mpiGrid,blockIDremapper,popID) == false) success = false; break; } } else if (dataType == vlsv::datatype::type::UINT) { switch (byteSize) { case sizeof(uint32_t): if (_readBlockData<uint32_t>(file,meshName,fileCells,localCellStartOffset,localCells,blocksPerCell, myOffset,blockSum,mpiGrid,blockIDremapper,popID) == false) success = false; break; case sizeof(uint64_t): if (_readBlockData<uint64_t>(file,meshName,fileCells,localCellStartOffset,localCells,blocksPerCell, myOffset,blockSum,mpiGrid,blockIDremapper,popID) == false) success = false; break; } } else if (dataType == vlsv::datatype::type::INT) { switch (byteSize) { case sizeof(int32_t): if (_readBlockData<int32_t>(file,meshName,fileCells,localCellStartOffset,localCells,blocksPerCell, myOffset,blockSum,mpiGrid,blockIDremapper,popID) == false) success = false; break; case sizeof(int64_t): if (_readBlockData<int64_t>(file,meshName,fileCells,localCellStartOffset,localCells,blocksPerCell, myOffset,blockSum,mpiGrid,blockIDremapper,popID) == false) success = false; break; } } else { logFile << "(RESTART) ERROR: Failed to read data type at readCellParamsVariable" << endl << write; success = false; } delete [] blocksPerCell; blocksPerCell = NULL; } // for-loop over particle species delete [] offsetArray; offsetArray = NULL; const uint64_t bytesReadEnd = file.getBytesRead() - bytesReadStart; logFile << "Velocity meshes and data read, approximate data rate is "; logFile << vlsv::printDataRate(bytesReadEnd,file.getReadTime()) << endl << write; return success; }
setOfPencils buildPencils( dccrg::Dccrg<grid_data> grid, setOfPencils &pencils, vector<CellID> idsOut, vector<CellID> idsIn, int dimension, vector<pair<bool,bool>> path) { // Not necessary since c++ passes a copy by default. // Copy the input ids to a working set of ids // vector<int> ids( idsIn ); // Copy the already computed pencil to the output list // vector<int> idsOut( idsInPencil ); uint i = 0; uint length = idsIn.size(); // Walk along the input pencil. Initially length is equal to the length of the // Unrefined pencil. When refined cells are encountered, the length is increased // accordingly to go through the entire pencil. while (i < length) { uint i1 = i + 1; uint id = idsIn[i]; vector<CellID> children = grid.get_all_children(id); bool hasChildren = ( grid.get_parent(children[0]) == id ); // Check if the current cell contains refined cells if (hasChildren) { // Check if we have encountered this refinement level before and stored // the path this builder followed if (path.size() > grid.get_refinement_level(id)) { // Get children using the stored path vector<CellID> myChildren = getMyChildren(children,dimension, path[grid.get_refinement_level(id)].first, path[grid.get_refinement_level(id)].second); // Add the children to the working set at index i1 insertVectorIntoVector(idsIn,myChildren,i1); length += myChildren.size(); } else { // Spawn new builders to construct pencils at the new refinement level for (bool left : { true, false }) { for (bool up : { true, false }) { // Store the path this builder has chosen vector < pair <bool,bool>> myPath = path; myPath.push_back(pair<bool, bool>(up,left)); // Get children along my path. vector<CellID> myChildren = getMyChildren(children,dimension,up,left); // Get the ids that have not been processed yet. vector<CellID> remainingIds(idsIn.begin() + i1, idsIn.end()); // The current builder continues along the bottom-right path. // Other paths will spawn a new builder. if (!up && !left) { // Add the children to the working set. Next iteration of the // main loop (over idsIn) will start on the first child // Add the children to the working set at index i1 insertVectorIntoVector(idsIn,myChildren,i1); length += myChildren.size(); path = myPath; } else { // Create a new working set by adding the remainder of the old // working set to the end of the current children list myChildren.insert(myChildren.end(),remainingIds.begin(),remainingIds.end()); buildPencils(grid,pencils,idsOut,myChildren,dimension,myPath); }; }; }; }; } else { // Add unrefined cells to the pencil directly idsOut.push_back(id); }; // closes if(isRefined) // Move to the next cell i++; }; // closes loop over ids pencils.addPencil(idsOut,0.0,0.0); return pencils; } // closes function
> void initialize( const Geometries& geometries, Init_Cond& initial_conditions, const Background_Magnetic_Field& bg_B, dccrg::Dccrg<Cell, Geometry>& grid, const std::vector<uint64_t>& cells, const double time, const double adiabatic_index, const double vacuum_permeability, const double proton_mass, const bool verbose, const Mass_Density_Getter Mas, const Momentum_Density_Getter Mom, const Total_Energy_Density_Getter Nrj, const Magnetic_Field_Getter Mag, const Background_Magnetic_Field_Pos_X_Getter Bg_B_Pos_X, const Background_Magnetic_Field_Pos_Y_Getter Bg_B_Pos_Y, const Background_Magnetic_Field_Pos_Z_Getter Bg_B_Pos_Z, const Mass_Density_Flux_Getter Mas_f, const Momentum_Density_Flux_Getter Mom_f, const Total_Energy_Density_Flux_Getter Nrj_f, const Magnetic_Field_Flux_Getter Mag_f ) { if (verbose and grid.get_rank() == 0) { std::cout << "Setting default MHD state... "; std::cout.flush(); } // set default state for (const auto cell_id: cells) { auto* const cell_data = grid[cell_id]; if (cell_data == nullptr) { std::cerr << __FILE__ << "(" << __LINE__ << ") No data for cell: " << cell_id << std::endl; abort(); } // zero fluxes and background fields Mas_f(*cell_data) = Nrj_f(*cell_data) = Mom_f(*cell_data)[0] = Mom_f(*cell_data)[1] = Mom_f(*cell_data)[2] = Mag_f(*cell_data)[0] = Mag_f(*cell_data)[1] = Mag_f(*cell_data)[2] = 0; const auto c = grid.geometry.get_center(cell_id); const auto r = sqrt(c[0]*c[0] + c[1]*c[1] + c[2]*c[2]); const auto lat = asin(c[2] / r), lon = atan2(c[1], c[0]); const auto mass_density = proton_mass * initial_conditions.get_default_data( Number_Density(), time, c[0], c[1], c[2], r, lat, lon ); const auto velocity = initial_conditions.get_default_data( Velocity(), time, c[0], c[1], c[2], r, lat, lon ); const auto pressure = initial_conditions.get_default_data( Pressure(), time, c[0], c[1], c[2], r, lat, lon ); const auto magnetic_field = initial_conditions.get_default_data( Magnetic_Field(), time, c[0], c[1], c[2], r, lat, lon ); Mas(*cell_data) = mass_density; Mom(*cell_data) = mass_density * velocity; Mag(*cell_data) = magnetic_field; Nrj(*cell_data) = get_total_energy_density( mass_density, velocity, pressure, magnetic_field, adiabatic_index, vacuum_permeability ); const auto cell_end = grid.geometry.get_max(cell_id); Bg_B_Pos_X(*cell_data) = bg_B.get_background_field( {cell_end[0], c[1], c[2]}, vacuum_permeability ); Bg_B_Pos_Y(*cell_data) = bg_B.get_background_field( {c[0], cell_end[1], c[2]}, vacuum_permeability ); Bg_B_Pos_Z(*cell_data) = bg_B.get_background_field( {c[0], c[1], cell_end[2]}, vacuum_permeability ); } // set non-default initial conditions if (verbose and grid.get_rank() == 0) { std::cout << "done\nSetting non-default initial MHD state... "; std::cout.flush(); } /* Set non-default initial conditions */ // mass density for ( size_t i = 0; i < initial_conditions.get_number_of_regions(Number_Density()); i++ ) { const auto& init_cond = initial_conditions.get_initial_condition(Number_Density(), i); const auto& geometry_id = init_cond.get_geometry_id(); const auto& cells = geometries.get_cells(geometry_id); for (const auto& cell: cells) { const auto c = grid.geometry.get_center(cell); const auto r = sqrt(c[0]*c[0] + c[1]*c[1] + c[2]*c[2]); const auto lat = asin(c[2] / r), lon = atan2(c[1], c[0]); const auto mass_density = proton_mass * initial_conditions.get_data( Number_Density(), geometry_id, time, c[0], c[1], c[2], r, lat, lon ); auto* const cell_data = grid[cell]; if (cell_data == NULL) { std::cerr << __FILE__ << "(" << __LINE__ << std::endl; abort(); } Mas(*cell_data) = mass_density; } } // velocity for ( size_t i = 0; i < initial_conditions.get_number_of_regions(Velocity()); i++ ) { const auto& init_cond = initial_conditions.get_initial_condition(Velocity(), i); const auto& geometry_id = init_cond.get_geometry_id(); const auto& cells = geometries.get_cells(geometry_id); for (const auto& cell: cells) { const auto c = grid.geometry.get_center(cell); const auto r = sqrt(c[0]*c[0] + c[1]*c[1] + c[2]*c[2]); const auto lat = asin(c[2] / r), lon = atan2(c[1], c[0]); const auto velocity = initial_conditions.get_data( Velocity(), geometry_id, time, c[0], c[1], c[2], r, lat, lon ); auto* const cell_data = grid[cell]; if (cell_data == NULL) { std::cerr << __FILE__ << "(" << __LINE__ << ") No data for cell: " << cell << std::endl; abort(); } Mom(*cell_data) = Mas(*cell_data) * velocity; } } // magnetic field for ( size_t i = 0; i < initial_conditions.get_number_of_regions(Magnetic_Field()); i++ ) { const auto& init_cond = initial_conditions.get_initial_condition(Magnetic_Field(), i); const auto& geometry_id = init_cond.get_geometry_id(); const auto& cells = geometries.get_cells(geometry_id); for (const auto& cell: cells) { const auto c = grid.geometry.get_center(cell); const auto r = sqrt(c[0]*c[0] + c[1]*c[1] + c[2]*c[2]); const auto lat = asin(c[2] / r), lon = atan2(c[1], c[0]); const auto magnetic_field = initial_conditions.get_data( Magnetic_Field(), geometry_id, time, c[0], c[1], c[2], r, lat, lon ); auto* const cell_data = grid[cell]; if (cell_data == NULL) { std::cerr << __FILE__ << "(" << __LINE__ << ") No data for cell: " << cell << std::endl; abort(); } Mag(*cell_data) = magnetic_field; } } // pressure for ( size_t i = 0; i < initial_conditions.get_number_of_regions(Pressure()); i++ ) { std::cout << std::endl; const auto& init_cond = initial_conditions.get_initial_condition(Pressure(), i); const auto& geometry_id = init_cond.get_geometry_id(); std::cout << geometry_id << std::endl; const auto& cells = geometries.get_cells(geometry_id); std::cout << cells.size() << std::endl; for (const auto& cell: cells) { const auto c = grid.geometry.get_center(cell); const auto r = sqrt(c[0]*c[0] + c[1]*c[1] + c[2]*c[2]); const auto lat = asin(c[2] / r), lon = atan2(c[1], c[0]); const auto pressure = initial_conditions.get_data( Pressure(), geometry_id, time, c[0], c[1], c[2], r, lat, lon ); auto* const cell_data = grid[cell]; if (cell_data == NULL) { std::cerr << __FILE__ << "(" << __LINE__ << ") No data for cell: " << cell << std::endl; abort(); } Nrj(*cell_data) = get_total_energy_density( Mas(*cell_data), Mom(*cell_data) / Mas(*cell_data), pressure, Mag(*cell_data), adiabatic_index, vacuum_permeability ); } } if (verbose and grid.get_rank() == 0) { std::cout << "done" << std::endl; } }