void peanoclaw::parallel::MasterWorkerAndForkJoinCommunicator::receivePatch(int localCellDescriptionIndex) {
  logTraceInWith3Arguments("receivePatch", localCellDescriptionIndex, _position, _level);
  #ifdef Parallel

  std::vector<CellDescription> remoteCellDescriptionVector = CellDescriptionHeap::getInstance().receiveData(_remoteRank, _position, _level, _messageType);
  assertionEquals2(remoteCellDescriptionVector.size(), 1, _position, _level);
  CellDescription remoteCellDescription = remoteCellDescriptionVector[0];

  assertion3(localCellDescriptionIndex >= 0, localCellDescriptionIndex, _position, _level);
  CellDescription localCellDescription = CellDescriptionHeap::getInstance().getData(localCellDescriptionIndex).at(0);
  #ifdef Asserts
  assertionNumericalEquals2(remoteCellDescription.getPosition(), localCellDescription.getPosition(), localCellDescription.toString(), remoteCellDescription.toString());
  assertionNumericalEquals2(remoteCellDescription.getSize(), localCellDescription.getSize(), localCellDescription.toString(), remoteCellDescription.toString());
  assertionEquals2(remoteCellDescription.getLevel(), localCellDescription.getLevel(), localCellDescription.toString(), remoteCellDescription.toString());
  #endif

  //Load arrays and stores according indices in cell description
  if(remoteCellDescription.getUIndex() != -1) {
    remoteCellDescription.setUIndex(_subgridCommunicator.receiveDataArray());
  }

  //Reset undesired values
  remoteCellDescription.setNumberOfTransfersToBeSkipped(0);

  //Copy remote cell description to local cell description
  deleteArraysFromPatch(localCellDescriptionIndex);
  remoteCellDescription.setCellDescriptionIndex(localCellDescriptionIndex);
  CellDescriptionHeap::getInstance().getData(localCellDescriptionIndex).at(0) = remoteCellDescription;
  assertionEquals(CellDescriptionHeap::getInstance().getData(localCellDescriptionIndex).size(), 1);

  Patch subgrid(localCellDescriptionIndex);
  subgrid.initializeNonParallelFields();

  //TODO unterweg debug
//  std::cout << "Received cell description on rank " << tarch::parallel::Node::getInstance().getRank()
//      << " from rank " << _remoteRank << ": " << remoteCellDescription.toString() << std::endl << subgrid.toStringUNew() << std::endl;

  #if defined(AssertForPositiveValues) && defined(Asserts)
  if(subgrid.isLeaf() || subgrid.isVirtual()) {
    assertion4(!subgrid.containsNonPositiveNumberInUnknownInUNew(0),
                tarch::parallel::Node::getInstance().getRank(),
                _remoteRank,
                subgrid,
                subgrid.toStringUNew());
  }
  #endif

  assertionEquals(CellDescriptionHeap::getInstance().getData(localCellDescriptionIndex).at(0).getCellDescriptionIndex(), localCellDescriptionIndex);
  #endif
  logTraceOut("receivePatch");
}
void tarch::logging::CommandLineLogger::indent( bool indent, const std::string& trace, const std::string& message ) {
  #ifdef Debug

  tarch::multicore::Lock lockCout( _semaphore );
  if (indent) {
    _indent+=NumberOfIndentSpaces;
     #if !defined(SharedMemoryParallelisation)
    _indentTraces.push(trace);
     #endif
  }
  else {
    #if !defined(SharedMemoryParallelisation)
    assertionEquals2(
      _indentTraces.top(),
      trace,
      message,
      indent
    );
    _indentTraces.pop();
    #endif
    assertion5(
      _indent >= NumberOfIndentSpaces,
      _indent, NumberOfIndentSpaces,
      "more logTraceOut calls than logTraceIn calls invoked before",
      trace, message
    );
    _indent-=NumberOfIndentSpaces;
  }
  #endif
}
void peanoclaw::parallel::MasterWorkerAndForkJoinCommunicator::mergeCellDuringForkOrJoin(
  peanoclaw::Cell&                      localCell,
  const peanoclaw::Cell&                remoteCell,
  tarch::la::Vector<DIMENSIONS, double> cellSize,
  const peanoclaw::State&               state
) {
  #ifdef Parallel
  bool isForking = !tarch::parallel::Node::getInstance().isGlobalMaster()
                   && _remoteRank == tarch::parallel::NodePool::getInstance().getMasterRank();

  if(localCell.isInside()
      && (
           ( isForking && !remoteCell.isAssignedToRemoteRank())
        || (!isForking && remoteCell.getRankOfRemoteNode() == _remoteRank)
      )
    ) {
    if(localCell.isRemote(state, false, false)) {
      if(tarch::parallel::NodePool::getInstance().getMasterRank() != 0) {
        assertionEquals2(localCell.getCellDescriptionIndex(), -2, _position, _level);
      }
      Patch temporaryPatch(
        _position - cellSize * 0.5,
        cellSize,
        0,
        0,
        0,
        1,
        1,
        0.0,
        _level
      );

      receivePatch(temporaryPatch.getCellDescriptionIndex());
      temporaryPatch.reloadCellDescription();

      if(temporaryPatch.isLeaf()) {
        temporaryPatch.switchToVirtual();
      }
      if(temporaryPatch.isVirtual()) {
        temporaryPatch.switchToNonVirtual();
      }
      CellDescriptionHeap::getInstance().deleteData(temporaryPatch.getCellDescriptionIndex());
    } else {
      assertion2(localCell.getCellDescriptionIndex() != -1, _position, _level);

      Patch localPatch(localCell);

      assertion2(
        (!localPatch.isLeaf() && !localPatch.isVirtual())
        || localPatch.getUIndex() >= 0,
        _position,
        _level
      );

      receivePatch(localPatch.getCellDescriptionIndex());
      localPatch.loadCellDescription(localCell.getCellDescriptionIndex());

      assertion1(!localPatch.isRemote(), localPatch);

      //TODO unterweg dissertation: Wenn auf dem neuen Knoten die Adjazenzinformationen auf den
      // Vertices noch nicht richtig gesetzt sind koennen wir nicht gleich voranschreiten.
      // U.U. brauchen wir sogar 2 Iterationen ohne Aktion... (Wegen hin- und herlaufen).
      localPatch.setSkipNextGridIteration(2);

      assertionEquals1(localPatch.getLevel(), _level, localPatch);
    }
  }
  #endif
}