void peanoclaw::runners::PeanoClawLibraryRunner::initializePeano(
  tarch::la::Vector<DIMENSIONS, double> domainOffset,
  tarch::la::Vector<DIMENSIONS, double> domainSize
) {
  //Initialize heap data
  CellDescriptionHeap::getInstance().setName("CellDescription");
  DataHeap::getInstance().setName("Data");
  LevelStatisticsHeap::getInstance().setName("LevelStatistics");

  assertionEquals(CellDescriptionHeap::getInstance().getNumberOfAllocatedEntries(), 0);
  assertionEquals(DataHeap::getInstance().getNumberOfAllocatedEntries(), 0);
}
void peanoclaw::parallel::MasterWorkerAndForkJoinCommunicator::receivePatch(int localCellDescriptionIndex) {
  logTraceInWith3Arguments("receivePatch", localCellDescriptionIndex, _position, _level);
  #ifdef Parallel

  std::vector<CellDescription> remoteCellDescriptionVector = CellDescriptionHeap::getInstance().receiveData(_remoteRank, _position, _level, _messageType);
  assertionEquals2(remoteCellDescriptionVector.size(), 1, _position, _level);
  CellDescription remoteCellDescription = remoteCellDescriptionVector[0];

  assertion3(localCellDescriptionIndex >= 0, localCellDescriptionIndex, _position, _level);
  CellDescription localCellDescription = CellDescriptionHeap::getInstance().getData(localCellDescriptionIndex).at(0);
  #ifdef Asserts
  assertionNumericalEquals2(remoteCellDescription.getPosition(), localCellDescription.getPosition(), localCellDescription.toString(), remoteCellDescription.toString());
  assertionNumericalEquals2(remoteCellDescription.getSize(), localCellDescription.getSize(), localCellDescription.toString(), remoteCellDescription.toString());
  assertionEquals2(remoteCellDescription.getLevel(), localCellDescription.getLevel(), localCellDescription.toString(), remoteCellDescription.toString());
  #endif

  //Load arrays and stores according indices in cell description
  if(remoteCellDescription.getUIndex() != -1) {
    remoteCellDescription.setUIndex(_subgridCommunicator.receiveDataArray());
  }

  //Reset undesired values
  remoteCellDescription.setNumberOfTransfersToBeSkipped(0);

  //Copy remote cell description to local cell description
  deleteArraysFromPatch(localCellDescriptionIndex);
  remoteCellDescription.setCellDescriptionIndex(localCellDescriptionIndex);
  CellDescriptionHeap::getInstance().getData(localCellDescriptionIndex).at(0) = remoteCellDescription;
  assertionEquals(CellDescriptionHeap::getInstance().getData(localCellDescriptionIndex).size(), 1);

  Patch subgrid(localCellDescriptionIndex);
  subgrid.initializeNonParallelFields();

  //TODO unterweg debug
//  std::cout << "Received cell description on rank " << tarch::parallel::Node::getInstance().getRank()
//      << " from rank " << _remoteRank << ": " << remoteCellDescription.toString() << std::endl << subgrid.toStringUNew() << std::endl;

  #if defined(AssertForPositiveValues) && defined(Asserts)
  if(subgrid.isLeaf() || subgrid.isVirtual()) {
    assertion4(!subgrid.containsNonPositiveNumberInUnknownInUNew(0),
                tarch::parallel::Node::getInstance().getRank(),
                _remoteRank,
                subgrid,
                subgrid.toStringUNew());
  }
  #endif

  assertionEquals(CellDescriptionHeap::getInstance().getData(localCellDescriptionIndex).at(0).getCellDescriptionIndex(), localCellDescriptionIndex);
  #endif
  logTraceOut("receivePatch");
}
void tarch::plotter::griddata::unstructured::vtk::VTKTextFileWriter::VertexDataWriter::close() {
  assertionEquals( _lastWriteCommandVertexNumber, _myWriter._numberOfVertices-1 );
  assertionMsg( _myWriter.isOpen(), "Maybe you forgot to call close() on a data writer before you destroy your writer?" );

  if (_lastWriteCommandVertexNumber>=-1) {
    _out << std::endl;
    _myWriter._vertexDataDescription += _out.str();
  }
  _lastWriteCommandVertexNumber = -2;
}
scenario::diffusionequation::CornerPointField::CornerPointField(
  const tarch::la::Vector<DIMENSIONS,double>&     fieldBoundingBox,
  const tarch::la::Vector<DIMENSIONS,double>&     fieldOffset,
  tarch::la::Vector<2,int>                        pillars,
  const std::vector<CornerPointPillar>&           entries
):
  _hexahedron(false, fieldBoundingBox, fieldOffset),
  _pillars(pillars),
  _entries(entries) {
  assertionEquals( tarch::la::volume(_pillars), static_cast<int>(_entries.size()) );
}
Exemple #5
0
void peanoclaw::Patch::fillCaches() {
  assertionEquals(sizeof(Data), sizeof(double));

  int ghostlayerWidth = _cellDescription->getGhostlayerWidth();
  tarch::la::Vector<DIMENSIONS, double> subdivisionFactor =
      _cellDescription->getSubdivisionFactor().convertScalar<double>();

  //UOld
  int stride = 1;
  for (int d = DIMENSIONS; d > 0; d--) {
    _uOldStrideCache[d] = stride;
    stride *= subdivisionFactor(d - 1) + 2 * ghostlayerWidth;
  }
  _uOldStrideCache[0] = stride;
  _uOldWithGhostlayerArrayIndex = tarch::la::volume(subdivisionFactor) * _cellDescription->getUnknownsPerSubcell();

  //UNew
  stride = 1;
  for (int d = DIMENSIONS; d > 0; d--) {
    _uNewStrideCache[d] = stride;
    stride *= subdivisionFactor(d - 1);
  }
  _uNewStrideCache[0] = stride;

  //Parameter without ghostlayer
  tarch::la::Vector<DIMENSIONS, int> ghostlayer = tarch::la::Vector<DIMENSIONS, int>(2*ghostlayerWidth);
  _parameterWithoutGhostlayerArrayIndex = _uOldWithGhostlayerArrayIndex
      + tarch::la::volume(_cellDescription->getSubdivisionFactor() + ghostlayer) * _cellDescription->getUnknownsPerSubcell();

  //Parameter with ghostlayer
  _parameterWithGhostlayerArrayIndex = _parameterWithoutGhostlayerArrayIndex
      + tarch::la::volume(_cellDescription->getSubdivisionFactor()) * _cellDescription->getNumberOfParametersWithoutGhostlayerPerSubcell();

  //Precompute subcell size
  tarch::la::Vector<DIMENSIONS,double> size = _cellDescription->getSize();
  for (int d = 0; d < DIMENSIONS; d++) {
    _subcellSize[d] = size[d] / subdivisionFactor[d];
  }
}
Exemple #6
0
void peanoclaw::mappings::ValidateGrid::beginIteration(
  peanoclaw::State&  solverState
) {
  logTraceInWith1Argument( "beginIteration(State)", solverState );

  #ifdef Parallel
  PatchDescriptionHeap::getInstance().startToSendSynchronousData();
  PatchDescriptionHeap::getInstance().startToSendBoundaryData(solverState.isTraversalInverted());
  #endif

  _validator = peanoclaw::statistics::ParallelGridValidator(
    solverState.getDomainOffset(),
    solverState.getDomainSize(),
    solverState.useDimensionalSplittingExtrapolation()
  );
  assertionEquals(_validator.getAllPatches().size(), 0);
  _state = solverState;
  _domainOffset = solverState.getDomainOffset();
  _domainSize = solverState.getDomainSize();
  PatchDescriptionHeap::getInstance().getData(_patchDescriptionsIndex).clear();

  logTraceOutWith1Argument( "beginIteration(State)", solverState);
}
void peano::integration::dataqueries::CartesianGridWriterProxy::receiveQueryData(
    int dataTag,
    int source,
    tarch::plotter::griddata::regular::CartesianGridWriter::VertexDataWriter& vertexDataWriter,
    int recordsPerEntry
) {
  int        flag   = 0;
  MPI_Status status;
  while(!flag){

      int        result = MPI_Iprobe(
          source,
          dataTag,
          tarch::parallel::Node::getInstance().getCommunicator(),
          &flag, &status
      );

      if (result!=MPI_SUCCESS) {
          logError("receiveQueryData()", "probing for messages on node ");
      }
  }
  int messages = 0;
  int datasets = 0;

  MPI_Get_count(&status, MPI_DOUBLE, &messages);

  assertionEquals( messages % 3, 0 );
  datasets = messages/3;

  std::vector<double>  positionList(messages);
  double*                                                 data = new double [datasets*recordsPerEntry];

  MPI_Recv(&positionList[0], messages,               MPI_DOUBLE, source, dataTag, tarch::parallel::Node::getInstance().getCommunicator(), &status);
  MPI_Recv(         data, datasets*recordsPerEntry, MPI_DOUBLE, source, dataTag, tarch::parallel::Node::getInstance().getCommunicator(), &status);

  for (int i=0; i<datasets; i++) {
      if(recordsPerEntry==2){
          tarch::la::Vector<2,double> data_local(0.0);
          for (int k=0; k<recordsPerEntry; k++) {
              data_local(k) = data[i*recordsPerEntry+k];
          }


          tarch::la::Vector<3 ,double> v(0.0);
          v[0]=positionList[i*3];
          v[1]=positionList[i*3+1];
          v[2]=positionList[i*3+2];
          vertexDataWriter.plotVertex(
              vertexDataWriter.getVertexIndex(v),
              data_local
          );
      }else if(recordsPerEntry==3){
          tarch::la::Vector<3,double> data_local(0.0);
          for (int k=0; k<recordsPerEntry; k++) {
              data_local(k) = data[i*recordsPerEntry+k];
          }
          tarch::la::Vector<3 ,double> v(0.0);
          v[0]=positionList[i*3];
          v[1]=positionList[i*3+1];
          v[2]=positionList[i*3+2];
          vertexDataWriter.plotVertex(
              vertexDataWriter.getVertexIndex(v),
              data_local
          );
      }else{

          double dataLocal=(0.0);
          for (int k=0; k<recordsPerEntry; k++) {
              dataLocal = data[i*recordsPerEntry+k];
          }
          tarch::la::Vector<3 ,double> v(0.0);
          v[0]=positionList[i*3];
          v[1]=positionList[i*3+1];
          v[2]=positionList[i*3+2];
          vertexDataWriter.plotVertex(
              vertexDataWriter.getVertexIndex(v),
              dataLocal
          );
      }

  }

  delete[] data;
}
void peanoclaw::interSubgridCommunication::GridLevelTransfer::finalizeVirtualSubgrid(
  Patch&                               subgrid,
  peanoclaw::Vertex * const            fineGridVertices,
  const peano::grid::VertexEnumerator& fineGridVerticesEnumerator,
  bool                                 isPeanoCellLeaf
) {
  tarch::multicore::Lock lock(_virtualPatchListSemaphore);
  assertion1(_virtualPatchDescriptionIndices.size() >= 0, subgrid.toString());

  tarch::la::Vector<DIMENSIONS_PLUS_ONE, double> virtualSubgridKey = createVirtualSubgridKey(subgrid.getPosition(), subgrid.getLevel());
  int virtualPatchDescriptionIndex = _virtualPatchDescriptionIndices[virtualSubgridKey];
  _virtualPatchDescriptionIndices.erase(virtualSubgridKey);
//  _virtualPatchTimeConstraints.erase(virtualSubgridKey);
  CellDescription& virtualPatchDescription = CellDescriptionHeap::getInstance().getData(virtualPatchDescriptionIndex).at(0);
  Patch virtualPatch(virtualPatchDescription);

  //Assert that we're working on the correct virtual patch
  assertionEquals3(subgrid.getCellDescriptionIndex(), virtualPatchDescriptionIndex, subgrid, virtualPatch, _virtualPatchDescriptionIndices.size());
  assertionNumericalEquals(subgrid.getPosition(), virtualPatch.getPosition());
  assertionNumericalEquals(subgrid.getSize(), virtualPatch.getSize());
  assertionEquals(subgrid.getLevel(), virtualPatch.getLevel());
  assertionEquals(subgrid.getUIndex(), virtualPatch.getUIndex());
//    assertionEquals(finePatch.getUOldIndex(), virtualPatch.getUOldIndex());

  #ifndef PEANOCLAW_USE_ASCEND_FOR_RESTRICTION
  _numerics.postProcessRestriction(subgrid, !subgrid.willCoarsen());
  #endif

  //Fill ghostlayer
  for(int i = 0; i < TWO_POWER_D; i++) {
    fineGridVertices[fineGridVerticesEnumerator(i)].fillAdjacentGhostLayers(
      subgrid.getLevel(),
      _useDimensionalSplitting,
      _numerics,
      #ifdef PEANOCLAW_USE_ASCEND_FOR_RESTRICTION
      tarch::la::multiplyComponents(peano::utils::dDelinearised(i, 2).convertScalar<double>(), subgrid.getSize()) + subgrid.getPosition(),
      #else
      fineGridVerticesEnumerator.getVertexPosition(i),
      #endif
      _subgridStatistics
    );
  }

  //Switch to leaf or non-virtual
  if(isPeanoCellLeaf) {
    assertion1(tarch::la::greaterEquals(subgrid.getTimeIntervals().getTimestepSize(), 0.0), subgrid);
    subgrid.switchToLeaf();
    _numerics.update(subgrid);
    ParallelSubgrid parallelSubgrid(subgrid);
    parallelSubgrid.markCurrentStateAsSent(false);
  } else {
    if(!isPatchAdjacentToRemoteRank(
      fineGridVertices,
      fineGridVerticesEnumerator
    )) {
      subgrid.switchToNonVirtual();
    }
  }

  assertion1(!subgrid.isVirtual()
    || isPatchAdjacentToRemoteRank(
        fineGridVertices,
        fineGridVerticesEnumerator),
    subgrid);
}
void peanoclaw::repositories::RepositoryArrayStack::iterate(int numberOfIterations) {
  tarch::timing::Watch watch( "peanoclaw::repositories::RepositoryArrayStack", "iterate(bool)", false);
  
  #ifdef Parallel
  if (tarch::parallel::Node::getInstance().isGlobalMaster()) {
    _repositoryState.setNumberOfIterations(numberOfIterations);
    tarch::parallel::NodePool::getInstance().broadcastToWorkingNodes(
      _repositoryState,
      peano::parallel::SendReceiveBufferPool::getInstance().getIterationManagementTag()
    );
  }
  else {
    assertionEquals( numberOfIterations, 1 );
    numberOfIterations = _repositoryState.getNumberOfIterations();
  }

  if ( numberOfIterations > 1 && ( peano::parallel::loadbalancing::Oracle::getInstance().isLoadBalancingActivated() || _solverState.isInvolvedInJoinOrFork() )) {
    logWarning( "iterate()", "iterate invoked for multiple traversals though load balancing is switched on or grid is not balanced globally. Use activateLoadBalancing(false) to deactivate the load balancing before" );
  }

  peano::datatraversal::autotuning::Oracle::getInstance().switchToOracle(_repositoryState.getAction());

  peano::parallel::loadbalancing::Oracle::getInstance().switchToOracle(_repositoryState.getAction());
  peano::parallel::loadbalancing::Oracle::getInstance().activateLoadBalancing(_repositoryState.getNumberOfIterations()==1);  
  
  _solverState.currentlyRunsMultipleIterations(_repositoryState.getNumberOfIterations()>1);
  #else
  peano::datatraversal::autotuning::Oracle::getInstance().switchToOracle(_repositoryState.getAction());
  #endif
  
  for (int i=0; i<numberOfIterations; i++) {
    switch ( _repositoryState.getAction()) {
      case peanoclaw::records::RepositoryState::UseAdapterInitialiseGrid: watch.startTimer(); _gridWithInitialiseGrid.iterate(); watch.stopTimer(); _measureInitialiseGridCPUTime.setValue( watch.getCPUTime() ); _measureInitialiseGridCalendarTime.setValue( watch.getCalendarTime() ); break;
      case peanoclaw::records::RepositoryState::UseAdapterInitialiseAndValidateGrid: watch.startTimer(); _gridWithInitialiseAndValidateGrid.iterate(); watch.stopTimer(); _measureInitialiseAndValidateGridCPUTime.setValue( watch.getCPUTime() ); _measureInitialiseAndValidateGridCalendarTime.setValue( watch.getCalendarTime() ); break;
      case peanoclaw::records::RepositoryState::UseAdapterPlot: watch.startTimer(); _gridWithPlot.iterate(); watch.stopTimer(); _measurePlotCPUTime.setValue( watch.getCPUTime() ); _measurePlotCalendarTime.setValue( watch.getCalendarTime() ); break;
      case peanoclaw::records::RepositoryState::UseAdapterPlotAndValidateGrid: watch.startTimer(); _gridWithPlotAndValidateGrid.iterate(); watch.stopTimer(); _measurePlotAndValidateGridCPUTime.setValue( watch.getCPUTime() ); _measurePlotAndValidateGridCalendarTime.setValue( watch.getCalendarTime() ); break;
      case peanoclaw::records::RepositoryState::UseAdapterRemesh: watch.startTimer(); _gridWithRemesh.iterate(); watch.stopTimer(); _measureRemeshCPUTime.setValue( watch.getCPUTime() ); _measureRemeshCalendarTime.setValue( watch.getCalendarTime() ); break;
      case peanoclaw::records::RepositoryState::UseAdapterSolveTimestep: watch.startTimer(); _gridWithSolveTimestep.iterate(); watch.stopTimer(); _measureSolveTimestepCPUTime.setValue( watch.getCPUTime() ); _measureSolveTimestepCalendarTime.setValue( watch.getCalendarTime() ); break;
      case peanoclaw::records::RepositoryState::UseAdapterSolveTimestepAndValidateGrid: watch.startTimer(); _gridWithSolveTimestepAndValidateGrid.iterate(); watch.stopTimer(); _measureSolveTimestepAndValidateGridCPUTime.setValue( watch.getCPUTime() ); _measureSolveTimestepAndValidateGridCalendarTime.setValue( watch.getCalendarTime() ); break;
      case peanoclaw::records::RepositoryState::UseAdapterSolveTimestepAndPlot: watch.startTimer(); _gridWithSolveTimestepAndPlot.iterate(); watch.stopTimer(); _measureSolveTimestepAndPlotCPUTime.setValue( watch.getCPUTime() ); _measureSolveTimestepAndPlotCalendarTime.setValue( watch.getCalendarTime() ); break;
      case peanoclaw::records::RepositoryState::UseAdapterSolveTimestepAndPlotAndValidateGrid: watch.startTimer(); _gridWithSolveTimestepAndPlotAndValidateGrid.iterate(); watch.stopTimer(); _measureSolveTimestepAndPlotAndValidateGridCPUTime.setValue( watch.getCPUTime() ); _measureSolveTimestepAndPlotAndValidateGridCalendarTime.setValue( watch.getCalendarTime() ); break;
      case peanoclaw::records::RepositoryState::UseAdapterGatherCurrentSolution: watch.startTimer(); _gridWithGatherCurrentSolution.iterate(); watch.stopTimer(); _measureGatherCurrentSolutionCPUTime.setValue( watch.getCPUTime() ); _measureGatherCurrentSolutionCalendarTime.setValue( watch.getCalendarTime() ); break;
      case peanoclaw::records::RepositoryState::UseAdapterGatherCurrentSolutionAndValidateGrid: watch.startTimer(); _gridWithGatherCurrentSolutionAndValidateGrid.iterate(); watch.stopTimer(); _measureGatherCurrentSolutionAndValidateGridCPUTime.setValue( watch.getCPUTime() ); _measureGatherCurrentSolutionAndValidateGridCalendarTime.setValue( watch.getCalendarTime() ); break;
      case peanoclaw::records::RepositoryState::UseAdapterCleanup: watch.startTimer(); _gridWithCleanup.iterate(); watch.stopTimer(); _measureCleanupCPUTime.setValue( watch.getCPUTime() ); _measureCleanupCalendarTime.setValue( watch.getCalendarTime() ); break;

      case peanoclaw::records::RepositoryState::Terminate:
        assertionMsg( false, "this branch/state should never be reached" ); 
        break;
      case peanoclaw::records::RepositoryState::NumberOfAdapters:
        assertionMsg( false, "this branch/state should never be reached" ); 
        break;
      case peanoclaw::records::RepositoryState::RunOnAllNodes:
        assertionMsg( false, "this branch/state should never be reached" ); 
        break;
      case peanoclaw::records::RepositoryState::ReadCheckpoint:
        assertionMsg( false, "not implemented yet" );
        break;
      case peanoclaw::records::RepositoryState::WriteCheckpoint:
        assertionMsg( false, "not implemented yet" );
        break;
    }
  }
    
  #ifdef Parallel
  if (_solverState.isJoiningWithMaster()) {
    _repositoryState.setAction( peanoclaw::records::RepositoryState::Terminate );
  }
  #endif
}
void dem::repositories::RepositorySTDStack::iterate(int numberOfIterations, bool exchangeBoundaryVertices) {
  tarch::timing::Watch watch( "dem::repositories::RepositorySTDStack", "iterate(bool)", false);
  
  #ifdef Parallel
  if (tarch::parallel::Node::getInstance().isGlobalMaster()) {
    _repositoryState.setNumberOfIterations(numberOfIterations);
    _repositoryState.setExchangeBoundaryVertices(exchangeBoundaryVertices);
    tarch::parallel::NodePool::getInstance().broadcastToWorkingNodes(
      _repositoryState,
      peano::parallel::SendReceiveBufferPool::getInstance().getIterationManagementTag()
    );
  }
  else {
    assertionEquals( numberOfIterations, 1 );
    numberOfIterations = _repositoryState.getNumberOfIterations();
  }
  
  peano::parallel::SendReceiveBufferPool::getInstance().exchangeBoundaryVertices(_repositoryState.getExchangeBoundaryVertices());

  if ( numberOfIterations > 1 && _solverState.isInvolvedInJoinOrFork() ) {
    logWarning( "iterate()", "iterate invoked for multiple traversals though load balancing still does redistribute data" );
  }
  bool switchedLoadBalancingTemporarilyOff = false;
  if ( numberOfIterations > 1 && peano::parallel::loadbalancing::Oracle::getInstance().isLoadBalancingActivated() ) {
    switchedLoadBalancingTemporarilyOff = true;
    peano::parallel::loadbalancing::Oracle::getInstance().activateLoadBalancing(false);
  }

  peano::datatraversal::autotuning::Oracle::getInstance().switchToOracle(_repositoryState.getAction());

  peano::parallel::loadbalancing::Oracle::getInstance().switchToOracle(_repositoryState.getAction());
  
  _solverState.currentlyRunsMultipleIterations(_repositoryState.getNumberOfIterations()>1);
  #else
  peano::datatraversal::autotuning::Oracle::getInstance().switchToOracle(_repositoryState.getAction());
  #endif
  
  for (int i=0; i<numberOfIterations; i++) {
    switch ( _repositoryState.getAction()) {
      case dem::records::RepositoryState::UseAdapterCreateGrid: watch.startTimer(); _gridWithCreateGrid.iterate(); watch.stopTimer(); _measureCreateGridCPUTime.setValue( watch.getCPUTime() ); _measureCreateGridCalendarTime.setValue( watch.getCalendarTime() ); break;
      case dem::records::RepositoryState::UseAdapterCreateGridAndPlot: watch.startTimer(); _gridWithCreateGridAndPlot.iterate(); watch.stopTimer(); _measureCreateGridAndPlotCPUTime.setValue( watch.getCPUTime() ); _measureCreateGridAndPlotCalendarTime.setValue( watch.getCalendarTime() ); break;
      case dem::records::RepositoryState::UseAdapterTimeStep: watch.startTimer(); _gridWithTimeStep.iterate(); watch.stopTimer(); _measureTimeStepCPUTime.setValue( watch.getCPUTime() ); _measureTimeStepCalendarTime.setValue( watch.getCalendarTime() ); break;
      case dem::records::RepositoryState::UseAdapterTimeStepAndPlot: watch.startTimer(); _gridWithTimeStepAndPlot.iterate(); watch.stopTimer(); _measureTimeStepAndPlotCPUTime.setValue( watch.getCPUTime() ); _measureTimeStepAndPlotCalendarTime.setValue( watch.getCalendarTime() ); break;
      case dem::records::RepositoryState::UseAdapterTimeStepOnDynamicGrid: watch.startTimer(); _gridWithTimeStepOnDynamicGrid.iterate(); watch.stopTimer(); _measureTimeStepOnDynamicGridCPUTime.setValue( watch.getCPUTime() ); _measureTimeStepOnDynamicGridCalendarTime.setValue( watch.getCalendarTime() ); break;
      case dem::records::RepositoryState::UseAdapterTimeStepAndPlotOnDynamicGrid: watch.startTimer(); _gridWithTimeStepAndPlotOnDynamicGrid.iterate(); watch.stopTimer(); _measureTimeStepAndPlotOnDynamicGridCPUTime.setValue( watch.getCPUTime() ); _measureTimeStepAndPlotOnDynamicGridCalendarTime.setValue( watch.getCalendarTime() ); break;
      case dem::records::RepositoryState::UseAdapterTimeStepOnReluctantDynamicGrid: watch.startTimer(); _gridWithTimeStepOnReluctantDynamicGrid.iterate(); watch.stopTimer(); _measureTimeStepOnReluctantDynamicGridCPUTime.setValue( watch.getCPUTime() ); _measureTimeStepOnReluctantDynamicGridCalendarTime.setValue( watch.getCalendarTime() ); break;
      case dem::records::RepositoryState::UseAdapterTimeStepAndPlotOnReluctantDynamicGrid: watch.startTimer(); _gridWithTimeStepAndPlotOnReluctantDynamicGrid.iterate(); watch.stopTimer(); _measureTimeStepAndPlotOnReluctantDynamicGridCPUTime.setValue( watch.getCPUTime() ); _measureTimeStepAndPlotOnReluctantDynamicGridCalendarTime.setValue( watch.getCalendarTime() ); break;

      case dem::records::RepositoryState::Terminate:
        assertionMsg( false, "this branch/state should never be reached" ); 
        break;
      case dem::records::RepositoryState::NumberOfAdapters:
        assertionMsg( false, "this branch/state should never be reached" ); 
        break;
      case dem::records::RepositoryState::RunOnAllNodes:
        assertionMsg( false, "this branch/state should never be reached" ); 
        break;
      case dem::records::RepositoryState::ReadCheckpoint:
        assertionMsg( false, "not implemented yet" );
        break;
      case dem::records::RepositoryState::WriteCheckpoint:
        assertionMsg( false, "not implemented yet" );
        break;
    }
    #ifdef Parallel
    if ( switchedLoadBalancingTemporarilyOff && i==numberOfIterations-1) {
      peano::parallel::loadbalancing::Oracle::getInstance().activateLoadBalancing(true);
    }
    #endif
  }
  
  #ifdef Parallel
  if (_solverState.isJoiningWithMaster()) {
    _repositoryState.setAction( dem::records::RepositoryState::Terminate );
  }
  #endif
}
peanoclaw::native::SWECommandLineParser::SWECommandLineParser(
  int argc,
  char** argv
) : _finestSubgridTopology(0),
    _coarsestSubgridTopology(0),
    _subdivisionFactor(0),
    _endTime(0),
    _globalTimestepSize(0),
    _usePeanoClaw(false) {
  const int requiredNumberOfArguments = 6;
  const int numberOfOptionalArguments = 1;
  if(argc != requiredNumberOfArguments && argc != requiredNumberOfArguments + numberOfOptionalArguments) {
    std::stringstream s;
    s << "There have to be " << requiredNumberOfArguments << " or " << (requiredNumberOfArguments + numberOfOptionalArguments)
      << " arguments instead of " << argc;
    s << "\nParameters: finestSubgridTopology coarsesSubgridTopology subdivisionFactor endTime globalTimestepSize [--usePeano]";
    throw std::invalid_argument(s.str());
  }

  //Finest subgrid topology
  {
    std::istringstream s(argv[1]);
    int subgridsPerDimension;
    s >> subgridsPerDimension;
    _finestSubgridTopology = tarch::la::Vector<DIMENSIONS,int>(subgridsPerDimension);
  }

  //Coarsest subgrid topology
  {
    std::istringstream s(argv[2]);
    int subgridsPerDimension;
    s >> subgridsPerDimension;
    _coarsestSubgridTopology = tarch::la::Vector<DIMENSIONS,int>(subgridsPerDimension);
  }

  //Subdivision factor
  {
    std::istringstream s(argv[3]);
    int subdivisionPerDimension;
    s >> subdivisionPerDimension;
    _subdivisionFactor = tarch::la::Vector<DIMENSIONS,int>(subdivisionPerDimension);
  }

  if(!tarch::la::allGreaterEquals(_coarsestSubgridTopology, _finestSubgridTopology)) {
    logError("SWECommandLineParser", "Finest subgrid topology has to be finer than the coarsest one.");
  }

  //End time
  {
    std::istringstream s(argv[4]);
    s >> _endTime;
  }

  //Global timestep size
  {
    std::istringstream s(argv[5]);
    s >> _globalTimestepSize;
  }

  if(argc > requiredNumberOfArguments) {
    assertionEquals(std::string(argv[6]), "--usePeano");
    _usePeanoClaw = true;
  }

  logInfo("SWECommandLineParser", "Parameter: finest subgrid topology=" << _finestSubgridTopology
          << ", coarsest subgrid topology=" << _coarsestSubgridTopology
          << ", subdivision factor=" << _subdivisionFactor
          << ", end time=" << _endTime
          << ", global timestep size=" << _globalTimestepSize);
}