void peano::applications::navierstokes::prototype2::repositories::PrototypeRepositoryForSpacetreeGridArrayStackImplementation::iterate() { tarch::utils::Watch watch( "peano::applications::navierstokes::prototype2::repositories::PrototypeRepositoryForSpacetreeGridArrayStackImplementation", "iterate()", false); #ifdef Parallel if (tarch::parallel::Node::getInstance().isMasterProcess()) { tarch::parallel::NodePool::getInstance().broadcastToWorkingNodes( _repositoryState, peano::kernel::parallel::SendReceiveBufferPool::getInstance().getIterationManagementTag() ); } #endif peano::kernel::datatraversal::autotuning::Oracle::getInstance().switchToOracle(_repositoryState.getAction()); switch ( _repositoryState.getAction()) { case PrototypeRepositoryState::Terminate: assertionMsg( false, "this branch/state should never be reached" ); break; case PrototypeRepositoryState::ReadCheckpoint: assertionMsg( false, "not implemented yet" ); break; case PrototypeRepositoryState::WriteCheckpoint: assertionMsg( false, "not implemented yet" ); break; } }
void peano::applications::poisson::multigrid::repositories::MultigridBatchJobRepositoryForSpacetreeGridFileStackImplementation::iterate() { tarch::utils::Watch watch( "peano::applications::poisson::multigrid::repositories::MultigridBatchJobRepositoryForSpacetreeGridFileStackImplementation", "iterate()", false); #ifdef Parallel if (tarch::parallel::Node::getInstance().isMasterProcess()) { tarch::parallel::NodePool::getInstance().broadcastToWorkingNodes( _repositoryState, peano::kernel::parallel::SendReceiveBufferPool::getInstance().getIterationManagementTag() ); } #endif peano::kernel::datatraversal::autotuning::Oracle::getInstance().switchToOracle(_repositoryState.getAction()); switch ( _repositoryState.getAction()) { case MultigridBatchJobRepositoryState::UseAdapterSetupExperiment: watch.startTimer(); _gridWithSetupExperiment.iterate(_solverState); watch.stopTimer(); _countSetupExperimentRuns++; _measureSetupExperimentCPUTime += watch.getCPUTime(); _measureSetupExperimentCalendarTime += watch.getCalendarTime(); break; case MultigridBatchJobRepositoryState::UseAdapterSetupExperimentAndPlotGrid: watch.startTimer(); _gridWithSetupExperimentAndPlotGrid.iterate(_solverState); watch.stopTimer(); _countSetupExperimentAndPlotGridRuns++; _measureSetupExperimentAndPlotGridCPUTime += watch.getCPUTime(); _measureSetupExperimentAndPlotGridCalendarTime += watch.getCalendarTime(); break; case MultigridBatchJobRepositoryState::UseAdapterSetupExperimentAndPlotStartSolution: watch.startTimer(); _gridWithSetupExperimentAndPlotStartSolution.iterate(_solverState); watch.stopTimer(); _countSetupExperimentAndPlotStartSolutionRuns++; _measureSetupExperimentAndPlotStartSolutionCPUTime += watch.getCPUTime(); _measureSetupExperimentAndPlotStartSolutionCalendarTime += watch.getCalendarTime(); break; case MultigridBatchJobRepositoryState::UseAdapterSmoothAndComputeGalerkinCoarseGridOperator: watch.startTimer(); _gridWithSmoothAndComputeGalerkinCoarseGridOperator.iterate(_solverState); watch.stopTimer(); _countSmoothAndComputeGalerkinCoarseGridOperatorRuns++; _measureSmoothAndComputeGalerkinCoarseGridOperatorCPUTime += watch.getCPUTime(); _measureSmoothAndComputeGalerkinCoarseGridOperatorCalendarTime += watch.getCalendarTime(); break; case MultigridBatchJobRepositoryState::UseAdapterPlotSolution: watch.startTimer(); _gridWithPlotSolution.iterate(_solverState); watch.stopTimer(); _countPlotSolutionRuns++; _measurePlotSolutionCPUTime += watch.getCPUTime(); _measurePlotSolutionCalendarTime += watch.getCalendarTime(); break; case MultigridBatchJobRepositoryState::Terminate: assertionMsg( false, "this branch/state should never be reached" ); break; case MultigridBatchJobRepositoryState::ReadCheckpoint: assertionMsg( false, "not implemented yet" ); break; case MultigridBatchJobRepositoryState::WriteCheckpoint: assertionMsg( false, "not implemented yet" ); break; } }
void peano::applications::latticeboltzmann::blocklatticeboltzmann::repositories::BlockLatticeBoltzmannBatchJobRepositoryForSpacetreeGridSTDStackImplementation::iterate(bool reduceState) { tarch::utils::Watch watch( "peano::applications::latticeboltzmann::blocklatticeboltzmann::repositories::BlockLatticeBoltzmannBatchJobRepositoryForSpacetreeGridSTDStackImplementation", "iterate(bool)", false); _repositoryState.setReduceState(reduceState); #ifdef Parallel if (tarch::parallel::Node::getInstance().isMasterProcess()) { tarch::parallel::NodePool::getInstance().broadcastToWorkingNodes( _repositoryState, peano::kernel::parallel::SendReceiveBufferPool::getInstance().getIterationManagementTag() ); } #endif peano::kernel::datatraversal::autotuning::Oracle::getInstance().switchToOracle(_repositoryState.getAction()); switch ( _repositoryState.getAction()) { case BlockLatticeBoltzmannBatchJobRepositoryState::UseAdapterRegularBlockSolverAdapter: watch.startTimer(); _gridWithRegularBlockSolverAdapter.iterate(_solverState); watch.stopTimer(); _countRegularBlockSolverAdapterRuns++; _measureRegularBlockSolverAdapterCPUTime += watch.getCPUTime(); _measureRegularBlockSolverAdapterCalendarTime += watch.getCalendarTime(); break; case BlockLatticeBoltzmannBatchJobRepositoryState::UseAdapterInitialiseSpacetreeGridAdapter: watch.startTimer(); _gridWithInitialiseSpacetreeGridAdapter.iterate(_solverState); watch.stopTimer(); _countInitialiseSpacetreeGridAdapterRuns++; _measureInitialiseSpacetreeGridAdapterCPUTime += watch.getCPUTime(); _measureInitialiseSpacetreeGridAdapterCalendarTime += watch.getCalendarTime(); break; case BlockLatticeBoltzmannBatchJobRepositoryState::UseAdapterBlockCCAOutputAdapter: watch.startTimer(); _gridWithBlockCCAOutputAdapter.iterate(_solverState); watch.stopTimer(); _countBlockCCAOutputAdapterRuns++; _measureBlockCCAOutputAdapterCPUTime += watch.getCPUTime(); _measureBlockCCAOutputAdapterCalendarTime += watch.getCalendarTime(); break; case BlockLatticeBoltzmannBatchJobRepositoryState::UseAdapterDynamicRefinementForSpacetreeGridAdapter: watch.startTimer(); _gridWithDynamicRefinementForSpacetreeGridAdapter.iterate(_solverState); watch.stopTimer(); _countDynamicRefinementForSpacetreeGridAdapterRuns++; _measureDynamicRefinementForSpacetreeGridAdapterCPUTime += watch.getCPUTime(); _measureDynamicRefinementForSpacetreeGridAdapterCalendarTime += watch.getCalendarTime(); break; case BlockLatticeBoltzmannBatchJobRepositoryState::UseAdapterBlockVTKOutputAdapter: watch.startTimer(); _gridWithBlockVTKOutputAdapter.iterate(_solverState); watch.stopTimer(); _countBlockVTKOutputAdapterRuns++; _measureBlockVTKOutputAdapterCPUTime += watch.getCPUTime(); _measureBlockVTKOutputAdapterCalendarTime += watch.getCalendarTime(); break; case BlockLatticeBoltzmannBatchJobRepositoryState::UseAdapterRegularBlockSolverAndVTKOutputAdapter: watch.startTimer(); _gridWithRegularBlockSolverAndVTKOutputAdapter.iterate(_solverState); watch.stopTimer(); _countRegularBlockSolverAndVTKOutputAdapterRuns++; _measureRegularBlockSolverAndVTKOutputAdapterCPUTime += watch.getCPUTime(); _measureRegularBlockSolverAndVTKOutputAdapterCalendarTime += watch.getCalendarTime(); break; case BlockLatticeBoltzmannBatchJobRepositoryState::Terminate: assertionMsg( false, "this branch/state should never be reached" ); break; case BlockLatticeBoltzmannBatchJobRepositoryState::ReadCheckpoint: assertionMsg( false, "not implemented yet" ); break; case BlockLatticeBoltzmannBatchJobRepositoryState::WriteCheckpoint: assertionMsg( false, "not implemented yet" ); break; } }
void peano::applications::puregrid::repositories::GridConstructionMovieBatchJobRepositoryForSpacetreeGridFileStackImplementation::iterate() { tarch::utils::Watch watch( "peano::applications::puregrid::repositories::GridConstructionMovieBatchJobRepositoryForSpacetreeGridFileStackImplementation", "iterate()", false); #ifdef Parallel if (tarch::parallel::Node::getInstance().isMasterProcess()) { tarch::parallel::NodePool::getInstance().broadcastToWorkingNodes( _repositoryState, peano::kernel::parallel::SendReceiveBufferPool::getInstance().getIterationManagementTag() ); } #endif peano::kernel::datatraversal::autotuning::Oracle::getInstance().switchToOracle(_repositoryState.getAction()); switch ( _repositoryState.getAction()) { case GridConstructionMovieBatchJobRepositoryState::UseAdapterPerformOneRefinement: watch.startTimer(); _gridWithPerformOneRefinement.iterate(_solverState); watch.stopTimer(); _countPerformOneRefinementRuns++; _measurePerformOneRefinementCPUTime += watch.getCPUTime(); _measurePerformOneRefinementCalendarTime += watch.getCalendarTime(); break; case GridConstructionMovieBatchJobRepositoryState::UseAdapterPerformOneRefinementWithoutGridSnapshot: watch.startTimer(); _gridWithPerformOneRefinementWithoutGridSnapshot.iterate(_solverState); watch.stopTimer(); _countPerformOneRefinementWithoutGridSnapshotRuns++; _measurePerformOneRefinementWithoutGridSnapshotCPUTime += watch.getCPUTime(); _measurePerformOneRefinementWithoutGridSnapshotCalendarTime += watch.getCalendarTime(); break; case GridConstructionMovieBatchJobRepositoryState::Terminate: assertionMsg( false, "this branch/state should never be reached" ); break; case GridConstructionMovieBatchJobRepositoryState::ReadCheckpoint: assertionMsg( false, "not implemented yet" ); break; case GridConstructionMovieBatchJobRepositoryState::WriteCheckpoint: assertionMsg( false, "not implemented yet" ); break; } }
void peano::applications::poisson::vhhjacobi::repositories::JacobiBatchJobRepositoryForRegularGridStandardImplementation::iterate(bool reduceState) { tarch::utils::Watch watch( "peano::applications::poisson::vhhjacobi::repositories::JacobiBatchJobRepositoryForRegularGridStandardImplementation", "iterate(int, bool)", false); _repositoryState.setReduceState(reduceState); #ifdef Parallel if (tarch::parallel::Node::getInstance().isMasterProcess()) { tarch::parallel::NodePool::getInstance().broadcastToWorkingNodes( _repositoryState, peano::kernel::parallel::SendReceiveBufferPool::getInstance().getIterationManagementTag() ); } #endif peano::kernel::datatraversal::autotuning::Oracle::getInstance().switchToOracle(_repositoryState.getAction()); switch ( _repositoryState.getAction()) { case JacobiBatchJobRepositoryState::UseAdapterSetupExperiment: watch.startTimer(); _gridWithSetupExperiment.iterate(reduceState); watch.stopTimer(); _countSetupExperimentRuns++; _measureSetupExperimentCPUTime += watch.getCPUTime(); _measureSetupExperimentCalendarTime += watch.getCalendarTime(); break; case JacobiBatchJobRepositoryState::UseAdapterJacobiStep: watch.startTimer(); _gridWithJacobiStep.iterate(reduceState); watch.stopTimer(); _countJacobiStepRuns++; _measureJacobiStepCPUTime += watch.getCPUTime(); _measureJacobiStepCalendarTime += watch.getCalendarTime(); break; case JacobiBatchJobRepositoryState::UseAdapterJacobiStepAndPlotSolution: watch.startTimer(); _gridWithJacobiStepAndPlotSolution.iterate(reduceState); watch.stopTimer(); _countJacobiStepAndPlotSolutionRuns++; _measureJacobiStepAndPlotSolutionCPUTime += watch.getCPUTime(); _measureJacobiStepAndPlotSolutionCalendarTime += watch.getCalendarTime(); break; case JacobiBatchJobRepositoryState::UseAdapterPlotSolution: watch.startTimer(); _gridWithPlotSolution.iterate(reduceState); watch.stopTimer(); _countPlotSolutionRuns++; _measurePlotSolutionCPUTime += watch.getCPUTime(); _measurePlotSolutionCalendarTime += watch.getCalendarTime(); break; case JacobiBatchJobRepositoryState::Terminate: assertionMsg( false, "this branch/state should never be reached" ); break; case JacobiBatchJobRepositoryState::ReadCheckpoint: assertionMsg( false, "not implemented yet" ); break; case JacobiBatchJobRepositoryState::WriteCheckpoint: assertionMsg( false, "not implemented yet" ); break; } }
void peano::applications::navierstokes::prototype2::repositories::PrototypeRepositoryForSpacetreeGridArrayStackImplementation::readCheckpoint( peano::kernel::gridinterface::Checkpoint<peano::applications::navierstokes::prototype2::SpacetreeGridFluidVertexEnhancedDivFreeEulerExplicit, peano::applications::navierstokes::prototype2::SpacetreeGridFluidCellEnhancedDivFreeEulerExplicit> const * const checkpoint ) { assertionMsg( checkpoint->isValid(), "checkpoint has to be valid if you call this operation" ); _solverState.readFromCheckpoint( *checkpoint ); _vertexStack.readFromCheckpoint( *checkpoint ); _cellStack.readFromCheckpoint( *checkpoint ); }
void pyclaw_peano_evolveToTime(double time, peanoclaw::runners::PeanoClawLibraryRunner* runner) { #ifdef USE_VALGRIND CALLGRIND_START_INSTRUMENTATION; CALLGRIND_ZERO_STATS; #endif static tarch::logging::Log _log("::pyclawBindings"); logTraceInWith1Argument("pyclaw_peano_evolveToTime", time); assertionMsg(runner!=0, "call pyclaw_peano_new before calling pyclaw_peano_run."); if(_calledFromPython) { _pythonState = PyGILState_Ensure(); } runner->evolveToTime(time); if(_calledFromPython) { PyGILState_Release(_pythonState); } #ifdef USE_VALGRIND CALLGRIND_STOP_INSTRUMENTATION; #endif logTraceOut("pyclaw_peano_evolveToTime"); }
void peano::applications::poisson::multigrid::repositories::MultigridBatchJobRepositoryForSpacetreeGridFileStackImplementation::readCheckpoint( peano::kernel::gridinterface::Checkpoint<peano::applications::poisson::multigrid::SpacetreeGridVertex, peano::applications::poisson::multigrid::SpacetreeGridCell> const * const checkpoint ) { assertionMsg( checkpoint->isValid(), "checkpoint has to be valid if you call this operation" ); _solverState.readFromCheckpoint( *checkpoint ); _vertexStack.readFromCheckpoint( *checkpoint ); _cellStack.readFromCheckpoint( *checkpoint ); }
void peano::applications::faxen::adapters::SpacetreeGrid2PlotGrid::plotVertex( const peano::applications::faxen::SpacetreeGridVertex& fineGridVertex, const tarch::la::Vector<DIMENSIONS,double>& fineGridX ) { #ifdef SharedTBB Vertex2IndexMapSemaphore::scoped_lock localLock(_vertex2IndexMapSemaphore); #elif SharedOMP assertionMsg( false, "here should be a critical section, but I don't know how to implement this. If you implement it, please add it to the templates, too." ); #endif if ( fineGridVertex.getRefinementControl() != peano::applications::faxen::SpacetreeGridVertex::Records::Refined && fineGridVertex.getRefinementControl() != peano::applications::faxen::SpacetreeGridVertex::Records::Refining && _vertex2IndexMap.find(fineGridX) == _vertex2IndexMap.end() ) { #if defined(Dim2) || defined(Dim3) _vertex2IndexMap[fineGridX] = _vertexWriter->plotVertex(fineGridX); #else _vertex2IndexMap[fineGridX] = _vertexWriter->plotVertex(tarch::la::Vector<3,double>(fineGridX.data())); #endif _vertexTypeWriter->plotVertex (_vertex2IndexMap[fineGridX],fineGridVertex.isBoundary() ); _vertexRefinementControlWriter->plotVertex(_vertex2IndexMap[fineGridX],fineGridVertex.getRefinementControl() ); _vertexMaximumSubtreeWriter->plotVertex (_vertex2IndexMap[fineGridX],fineGridVertex.getMaximumSubtreeHeight() ); } }
void peano::applications::latticeboltzmann::blocklatticeboltzmann::repositories::BlockLatticeBoltzmannBatchJobRepositoryForSpacetreeGridSTDStackImplementation::readCheckpoint( peano::kernel::gridinterface::Checkpoint<peano::applications::latticeboltzmann::blocklatticeboltzmann::SpacetreeGridBlockVertex, peano::applications::latticeboltzmann::blocklatticeboltzmann::SpacetreeGridBlockCell> const * const checkpoint ) { assertionMsg( checkpoint->isValid(), "checkpoint has to be valid if you call this operation" ); _solverState.readFromCheckpoint( *checkpoint ); _vertexStack.readFromCheckpoint( *checkpoint ); _cellStack.readFromCheckpoint( *checkpoint ); }
void peanoclaw::repositories::RepositoryArrayStack::readCheckpoint( peano::grid::Checkpoint<peanoclaw::Vertex, peanoclaw::Cell> const * const checkpoint ) { assertionMsg( checkpoint->isValid(), "checkpoint has to be valid if you call this operation" ); _solverState.readFromCheckpoint( *checkpoint ); _vertexStack.readFromCheckpoint( *checkpoint ); _cellStack.readFromCheckpoint( *checkpoint ); }
void peano::applications::poisson::multigrid::mappings::SpacetreeGrid2PlotSolution::plotVertex( peano::applications::poisson::multigrid::SpacetreeGridVertex& fineGridVertex, const tarch::la::Vector<DIMENSIONS,double>& fineGridX ) { if ( fineGridVertex.getRefinementControl() == SpacetreeGridVertex::Records::Unrefined ) { #ifdef SharedTBB Vertex2IndexMapSemaphore::scoped_lock localLock(_vertex2IndexMapSemaphore); #elif SharedOMP assertionMsg( false, "here should be a critical section, but I don't know how to implement this. If you implement it, please add it to the templates, too." ); #endif if ( _vertex2IndexMap.find(fineGridX) == _vertex2IndexMap.end() ) { #if defined(Dim2) || defined(Dim3) _vertex2IndexMap[fineGridX] = _vertexWriter->plotVertex(fineGridX); #else _vertex2IndexMap[fineGridX] = _vertexWriter->plotVertex(tarch::la::Vector<3,double>(fineGridX.data())); #endif if (fineGridVertex.isHangingNode()) { _vertexResidualWriter->plotVertex(_vertex2IndexMap[fineGridX],0.0); _vertexValueWriter->plotVertex(_vertex2IndexMap[fineGridX],0.0); // @todo For a smooth plot, it might make sense to set the 'right' rhs, i.e. // the rhs belonging to a persistent vertex at this very position. _vertexRhsWriter->plotVertex(_vertex2IndexMap[fineGridX],0.0); } else { _vertexResidualWriter->plotVertex(_vertex2IndexMap[fineGridX],fineGridVertex.getResidual()); _vertexValueWriter->plotVertex(_vertex2IndexMap[fineGridX],fineGridVertex.getU()); _vertexRhsWriter->plotVertex(_vertex2IndexMap[fineGridX],fineGridVertex.getRhs()); } } } }
void peano::applications::poisson::multigrid::mappings::SpacetreeGrid2PlotSolution::enterCell( peano::applications::poisson::multigrid::SpacetreeGridCell& fineGridCell, peano::applications::poisson::multigrid::SpacetreeGridVertex * const fineGridVertices, const peano::kernel::gridinterface::VertexEnumerator& fineGridVerticesEnumerator, peano::applications::poisson::multigrid::SpacetreeGridVertex const * const coarseGridVertices, const peano::kernel::gridinterface::VertexEnumerator& coarseGridVerticesEnumerator, const peano::applications::poisson::multigrid::SpacetreeGridCell& coarseGridCell, const tarch::la::Vector<DIMENSIONS,int>& fineGridPositionOfCell ) { logTraceInWith4Arguments( "enterCell(...)", fineGridCell, fineGridVerticesEnumerator.toString(), coarseGridCell, fineGridPositionOfCell ); if (!fineGridCell.isRefined()) { #ifdef SharedTBB Vertex2IndexMapSemaphore::scoped_lock localLock(_vertex2IndexMapSemaphore); #elif SharedOMP assertionMsg( false, "here should be a critical section, but I don't know how to implement this. If you implement it, please add it to the templates, too." ); #endif assertion( DIMENSIONS==2 || DIMENSIONS==3 ); int vertexIndex[TWO_POWER_D]; dfor2(i) tarch::la::Vector<DIMENSIONS,double> currentVertexPosition = fineGridVerticesEnumerator.getVertexPosition(i); assertion2 ( _vertex2IndexMap.find(currentVertexPosition) != _vertex2IndexMap.end(), currentVertexPosition, fineGridVertices[fineGridVerticesEnumerator(i)].toString() ); vertexIndex[iScalar] = _vertex2IndexMap[currentVertexPosition]; enddforx if (DIMENSIONS==2) { _cellWriter->plotQuadrangle(vertexIndex); } if (DIMENSIONS==3) { _cellWriter->plotHexahedron(vertexIndex); } }
int peano::applications::puregrid::runners::GridConstructionMovieBatchJobRunnerForSpacetreeGrid::runAsClient(peano::applications::puregrid::repositories::GridConstructionMovieBatchJobRepositoryForSpacetreeGrid& repository) { while ( tarch::parallel::NodePool::getInstance().waitForJob() ) { assertionMsg( false, "not implemented yet" ); // peano::kernel::spacetreegrid::parallel::messages::ForkMessage forkMessage; // forkMessage.receive(tarch::parallel::NodePool::getInstance().getMasterNodeNumber(),tarch::parallel::NodePool::getInstance().getTagForForkMessages()); // repository.restart( // forkMessage.getNumberOfGridPoints(), // peano::kernel::spacetreegrid::parallel::SetupPartitioner::getDomainSizeOfForkMessage(forkMessage), // forkMessage.getDomainOffset(), // forkMessage.getNeighbourRanks(), // forkMessage.getTraversalOrderOfNeighbours() // ); // // while (repository.continueToIterate()) { // repository.iterate(); // } // insert your postprocessing here // ------------------------------- // ------------------------------- repository.terminate(); } return 0; }
void tarch::plotter::griddata::unstructured::binaryvtu::BinaryVTUTextFileWriter::VertexWriter::close() { assertion( _myWriter._numberOfVertices==0 ); assertionMsg( _myWriter.isOpen(), "Maybe you forgot to call close() on a data writer before you destroy your writer?" ); _myWriter._numberOfVertices = _currentVertexNumber; _currentVertexNumber = -1; }
void tarch::plotter::griddata::unstructured::vtk::VTKTextFileWriter::VertexDataWriter::close() { assertionEquals( _lastWriteCommandVertexNumber, _myWriter._numberOfVertices-1 ); assertionMsg( _myWriter.isOpen(), "Maybe you forgot to call close() on a data writer before you destroy your writer?" ); if (_lastWriteCommandVertexNumber>=-1) { _out << std::endl; _myWriter._vertexDataDescription += _out.str(); } _lastWriteCommandVertexNumber = -2; }
void peano::applications::faxen::repositories::FaxenBatchJobRepositoryForSpacetreeGridArrayStackImplementation::iterate() { tarch::utils::Watch watch( "peano::applications::faxen::repositories::FaxenBatchJobRepositoryForSpacetreeGridArrayStackImplementation", "iterate()", false); #ifdef Parallel if (tarch::parallel::Node::getInstance().isMasterProcess()) { tarch::parallel::NodePool::getInstance().broadcastToWorkingNodes( _repositoryState, peano::kernel::parallel::SendReceiveBufferPool::getInstance().getIterationManagementTag() ); } #endif peano::kernel::datatraversal::autotuning::Oracle::getInstance().switchToOracle(_repositoryState.getAction()); switch ( _repositoryState.getAction()) { case FaxenBatchJobRepositoryState::UseAdapterInitialize: watch.startTimer(); _gridWithInitialize.iterate(_solverState); watch.stopTimer(); _countInitializeRuns++; _measureInitializeCPUTime += watch.getCPUTime(); _measureInitializeCalendarTime += watch.getCalendarTime(); break; case FaxenBatchJobRepositoryState::UseAdapterInitializeAndSetBoundary: watch.startTimer(); _gridWithInitializeAndSetBoundary.iterate(_solverState); watch.stopTimer(); _countInitializeAndSetBoundaryRuns++; _measureInitializeAndSetBoundaryCPUTime += watch.getCPUTime(); _measureInitializeAndSetBoundaryCalendarTime += watch.getCalendarTime(); break; case FaxenBatchJobRepositoryState::UseAdapterPlotGrid: watch.startTimer(); _gridWithPlotGrid.iterate(_solverState); watch.stopTimer(); _countPlotGridRuns++; _measurePlotGridCPUTime += watch.getCPUTime(); _measurePlotGridCalendarTime += watch.getCalendarTime(); break; case FaxenBatchJobRepositoryState::UseAdapterControlTimeStep: watch.startTimer(); _gridWithControlTimeStep.iterate(_solverState); watch.stopTimer(); _countControlTimeStepRuns++; _measureControlTimeStepCPUTime += watch.getCPUTime(); _measureControlTimeStepCalendarTime += watch.getCalendarTime(); break; case FaxenBatchJobRepositoryState::UseAdapterSetVelocitiesBoundary: watch.startTimer(); _gridWithSetVelocitiesBoundary.iterate(_solverState); watch.stopTimer(); _countSetVelocitiesBoundaryRuns++; _measureSetVelocitiesBoundaryCPUTime += watch.getCPUTime(); _measureSetVelocitiesBoundaryCalendarTime += watch.getCalendarTime(); break; case FaxenBatchJobRepositoryState::UseAdapterSetScenarioBoundary: watch.startTimer(); _gridWithSetScenarioBoundary.iterate(_solverState); watch.stopTimer(); _countSetScenarioBoundaryRuns++; _measureSetScenarioBoundaryCPUTime += watch.getCPUTime(); _measureSetScenarioBoundaryCalendarTime += watch.getCalendarTime(); break; case FaxenBatchJobRepositoryState::UseAdapterComputeVelocitiesDerivatives: watch.startTimer(); _gridWithComputeVelocitiesDerivatives.iterate(_solverState); watch.stopTimer(); _countComputeVelocitiesDerivativesRuns++; _measureComputeVelocitiesDerivativesCPUTime += watch.getCPUTime(); _measureComputeVelocitiesDerivativesCalendarTime += watch.getCalendarTime(); break; case FaxenBatchJobRepositoryState::UseAdapterComputeRightHandSide: watch.startTimer(); _gridWithComputeRightHandSide.iterate(_solverState); watch.stopTimer(); _countComputeRightHandSideRuns++; _measureComputeRightHandSideCPUTime += watch.getCPUTime(); _measureComputeRightHandSideCalendarTime += watch.getCalendarTime(); break; case FaxenBatchJobRepositoryState::UseAdapterSetZeroPressureBoundary: watch.startTimer(); _gridWithSetZeroPressureBoundary.iterate(_solverState); watch.stopTimer(); _countSetZeroPressureBoundaryRuns++; _measureSetZeroPressureBoundaryCPUTime += watch.getCPUTime(); _measureSetZeroPressureBoundaryCalendarTime += watch.getCalendarTime(); break; case FaxenBatchJobRepositoryState::UseAdapterSetPressureBoundary: watch.startTimer(); _gridWithSetPressureBoundary.iterate(_solverState); watch.stopTimer(); _countSetPressureBoundaryRuns++; _measureSetPressureBoundaryCPUTime += watch.getCPUTime(); _measureSetPressureBoundaryCalendarTime += watch.getCalendarTime(); break; case FaxenBatchJobRepositoryState::UseAdapterSORStep: watch.startTimer(); _gridWithSORStep.iterate(_solverState); watch.stopTimer(); _countSORStepRuns++; _measureSORStepCPUTime += watch.getCPUTime(); _measureSORStepCalendarTime += watch.getCalendarTime(); break; case FaxenBatchJobRepositoryState::UseAdapterComputeResidualNorm: watch.startTimer(); _gridWithComputeResidualNorm.iterate(_solverState); watch.stopTimer(); _countComputeResidualNormRuns++; _measureComputeResidualNormCPUTime += watch.getCPUTime(); _measureComputeResidualNormCalendarTime += watch.getCalendarTime(); break; case FaxenBatchJobRepositoryState::UseAdapterComputeVelocities: watch.startTimer(); _gridWithComputeVelocities.iterate(_solverState); watch.stopTimer(); _countComputeVelocitiesRuns++; _measureComputeVelocitiesCPUTime += watch.getCPUTime(); _measureComputeVelocitiesCalendarTime += watch.getCalendarTime(); break; case FaxenBatchJobRepositoryState::UseAdapterPlotSolution: watch.startTimer(); _gridWithPlotSolution.iterate(_solverState); watch.stopTimer(); _countPlotSolutionRuns++; _measurePlotSolutionCPUTime += watch.getCPUTime(); _measurePlotSolutionCalendarTime += watch.getCalendarTime(); break; case FaxenBatchJobRepositoryState::Terminate: assertionMsg( false, "this branch/state should never be reached" ); break; case FaxenBatchJobRepositoryState::ReadCheckpoint: assertionMsg( false, "not implemented yet" ); break; case FaxenBatchJobRepositoryState::WriteCheckpoint: assertionMsg( false, "not implemented yet" ); break; } }
void tarch::plotter::griddata::unstructured::vtk::VTKTextFileWriter::CellWriter::close() { assertion( _myWriter._numberOfCells==0 ); assertion( _myWriter._numberOfCellEntries==0 ); assertionMsg( _myWriter.isOpen(), "Maybe you forgot to call close() on a data writer before you destroy your writer?" ); _myWriter._numberOfCells = _currentCellNumber; _myWriter._numberOfCellEntries = _cellListEntries; _myWriter._cellDescription = _cellOut.str(); _myWriter._cellTypeDescription = _cellTypeOut.str(); _currentCellNumber = -1; _cellListEntries = -1; }
void peano::applications::navierstokes::prototype1::repositories::PrototypeRepositoryForRegularGridStandardImplementation::iterate(bool reduceState) { tarch::utils::Watch watch( "peano::applications::navierstokes::prototype1::repositories::PrototypeRepositoryForRegularGridStandardImplementation", "iterate(int, bool)", false); _repositoryState.setReduceState(reduceState); #ifdef Parallel if (tarch::parallel::Node::getInstance().isMasterProcess()) { tarch::parallel::NodePool::getInstance().broadcastToWorkingNodes( _repositoryState, peano::kernel::parallel::SendReceiveBufferPool::getInstance().getIterationManagementTag() ); } #endif peano::kernel::datatraversal::autotuning::Oracle::getInstance().switchToOracle(_repositoryState.getAction()); switch ( _repositoryState.getAction()) { case PrototypeRepositoryState::UseAdapterInitialiseScenario: watch.startTimer(); _gridWithInitialiseScenario.iterate(reduceState); watch.stopTimer(); _countInitialiseScenarioRuns++; _measureInitialiseScenarioCPUTime += watch.getCPUTime(); _measureInitialiseScenarioCalendarTime += watch.getCalendarTime(); break; case PrototypeRepositoryState::UseAdapterMergeA: watch.startTimer(); _gridWithMergeA.iterate(reduceState); watch.stopTimer(); _countMergeARuns++; _measureMergeACPUTime += watch.getCPUTime(); _measureMergeACalendarTime += watch.getCalendarTime(); break; case PrototypeRepositoryState::UseAdapterCalculateF: watch.startTimer(); _gridWithCalculateF.iterate(reduceState); watch.stopTimer(); _countCalculateFRuns++; _measureCalculateFCPUTime += watch.getCPUTime(); _measureCalculateFCalendarTime += watch.getCalendarTime(); break; case PrototypeRepositoryState::UseAdapterCalculatePPERHS: watch.startTimer(); _gridWithCalculatePPERHS.iterate(reduceState); watch.stopTimer(); _countCalculatePPERHSRuns++; _measureCalculatePPERHSCPUTime += watch.getCPUTime(); _measureCalculatePPERHSCalendarTime += watch.getCalendarTime(); break; case PrototypeRepositoryState::UseAdapterGaussSeidelForEnhancedDivFree: watch.startTimer(); _gridWithGaussSeidelForEnhancedDivFree.iterate(reduceState); watch.stopTimer(); _countGaussSeidelForEnhancedDivFreeRuns++; _measureGaussSeidelForEnhancedDivFreeCPUTime += watch.getCPUTime(); _measureGaussSeidelForEnhancedDivFreeCalendarTime += watch.getCalendarTime(); break; case PrototypeRepositoryState::UseAdapterUpdateVelocity: watch.startTimer(); _gridWithUpdateVelocity.iterate(reduceState); watch.stopTimer(); _countUpdateVelocityRuns++; _measureUpdateVelocityCPUTime += watch.getCPUTime(); _measureUpdateVelocityCalendarTime += watch.getCalendarTime(); break; case PrototypeRepositoryState::UseAdapterPlotSolutionVTK: watch.startTimer(); _gridWithPlotSolutionVTK.iterate(reduceState); watch.stopTimer(); _countPlotSolutionVTKRuns++; _measurePlotSolutionVTKCPUTime += watch.getCPUTime(); _measurePlotSolutionVTKCalendarTime += watch.getCalendarTime(); break; case PrototypeRepositoryState::UseAdapterPlotRegularSolutionVTK: watch.startTimer(); _gridWithPlotRegularSolutionVTK.iterate(reduceState); watch.stopTimer(); _countPlotRegularSolutionVTKRuns++; _measurePlotRegularSolutionVTKCPUTime += watch.getCPUTime(); _measurePlotRegularSolutionVTKCalendarTime += watch.getCalendarTime(); break; case PrototypeRepositoryState::UseAdapterUpdateScenario: watch.startTimer(); _gridWithUpdateScenario.iterate(reduceState); watch.stopTimer(); _countUpdateScenarioRuns++; _measureUpdateScenarioCPUTime += watch.getCPUTime(); _measureUpdateScenarioCalendarTime += watch.getCalendarTime(); break; case PrototypeRepositoryState::Terminate: assertionMsg( false, "this branch/state should never be reached" ); break; case PrototypeRepositoryState::ReadCheckpoint: assertionMsg( false, "not implemented yet" ); break; case PrototypeRepositoryState::WriteCheckpoint: assertionMsg( false, "not implemented yet" ); break; } }
double scenario::diffusionequation::CornerPointField::getThermalDiffusivity(const tarch::la::Vector<3,double>& x) { logTraceInWith1Argument( "getThermalDiffusivity(x)", x ); if (_hexahedron.isOutsideClosedDomain(x)) { logTraceOutWith1Argument( "getThermalDiffusivity(x)", 0.0 ); return 0.0; } #ifdef Dim3 double result = getPorosityFromDataSet(x); logTraceOutWith1Argument( "getThermalDiffusivity(x)", result ); return result; #else assertionMsg( false, "not implemented yet"); logTraceOutWith1Argument( "getThermalDiffusivity(x)", 0.0 ); return 0.0; #endif }
void pyclaw_peano_destroy(peanoclaw::runners::PeanoClawLibraryRunner* runner) { static tarch::logging::Log _log("::pyclawBindings"); logTraceIn("pyclaw_peano_destroy"); assertionMsg(runner!=0, "call pyclaw_peano_new before calling pyclaw_peano_destroy."); delete runner; if(_configuration != 0) { delete _configuration; } if(!_calledFromPython) { Py_Finalize(); } logTraceOut("pyclaw_peano_destroy"); }
int peano::applications::navierstokes::prototype1::scenarios::FluidObstacleInChannel::getUniqueBoundaryNumber( int numberOfBoundaries, int boundaryNumber[], const Vector& x ) const { bool isWall = false; bool isObstacle = false; bool isInflow = false; bool isOutflow = false; for (int i=0; i<numberOfBoundaries; i++) { isWall |= boundaryNumber[i] > 0 && boundaryNumber[i] < 2*DIMENSIONS && boundaryNumber[i] != DIMENSIONS; isObstacle |= boundaryNumber[i] >= 2*DIMENSIONS && boundaryNumber[i] < 20; isInflow |= boundaryNumber[i] == 0; isOutflow |= boundaryNumber[i] == DIMENSIONS; assertionMsg( boundaryNumber[i] < 20, "must not appear (except PeGSI workaround channel)! check obstacle basenumber!"); } // add inlet offset information: inlet smaller -> WALL if (isInflow && isInletSmallerResultingInWall(x) ) { return WALL; } if ( isInflow && isOutflow ) { //Assertion disabled for moving geometries //assertionMsg( false, "topology mismatch" ); } if ( isObstacle && isInflow ) { //Assertion disabled for moving geometries //assertionMsg( false, "topology mismatch" ); } if ( isObstacle && isWall ) { //following assertion ignored for peano grid: root cell problem! //assertionMsg( false, "topology mismatch" ); } if (isWall) return WALL; if (isObstacle) return OBSTACLE; if (isOutflow) return OUTFLOW; if (isInflow) return INFLOW; assertion(false); return -1; }
void peano::applications::faxen::adapters::SpacetreeGrid2PlotGrid::enterCell( peano::applications::faxen::SpacetreeGridCell& fineGridCell, peano::applications::faxen::SpacetreeGridVertex * const fineGridVertices, const peano::kernel::gridinterface::VertexEnumerator& fineGridVerticesEnumerator, peano::applications::faxen::SpacetreeGridVertex const * const coarseGridVertices, const peano::kernel::gridinterface::VertexEnumerator& coarseGridVerticesEnumerator, const peano::applications::faxen::SpacetreeGridCell& coarseGridCell, const tarch::la::Vector<DIMENSIONS,int>& fineGridPositionOfCell ) { logTraceInWith5Arguments( "enterCell(...)", fineGridCell, fineGridVerticesEnumerator.toString(), coarseGridVerticesEnumerator.toString(), coarseGridCell, fineGridPositionOfCell ); #ifdef SharedTBB Vertex2IndexMapSemaphore::scoped_lock localLock(_vertex2IndexMapSemaphore); #elif SharedOMP assertionMsg( false, "here should be a critical section, but I don't know how to implement this. If you implement it, please add it to the templates, too." ); #endif if ( fineGridCell.isLeaf() ) { assertion( DIMENSIONS==2 || DIMENSIONS==3 ); int vertexIndex[TWO_POWER_D]; dfor2(i) tarch::la::Vector<DIMENSIONS,double> currentVertexPosition = fineGridVerticesEnumerator.getVertexPosition(i); assertion1 ( _vertex2IndexMap.find(currentVertexPosition) != _vertex2IndexMap.end(), currentVertexPosition ); vertexIndex[iScalar] = _vertex2IndexMap[currentVertexPosition]; enddforx int cellIndex; if (DIMENSIONS==2) { cellIndex = _cellWriter->plotQuadrangle(vertexIndex); } if (DIMENSIONS==3) { cellIndex = _cellWriter->plotHexahedron(vertexIndex); } #ifdef Parallel _cellDeltaWriter->plotCell( cellIndex,fineGridCell.getDelta() ); _cellWeightWriter->plotCell( cellIndex,fineGridCell.getWeight() ); #endif } logTraceOut( "enterCell(...)" ); }
void peano::applications::poisson::multigrid::mappings::SpacetreeGrid2SetupExperiment::createInnerVertex( peano::applications::poisson::multigrid::SpacetreeGridVertex& fineGridVertex, const tarch::la::Vector<DIMENSIONS,double>& fineGridX, const tarch::la::Vector<DIMENSIONS,double>& fineGridH, peano::applications::poisson::multigrid::SpacetreeGridVertex const * const coarseGridVertices, const peano::kernel::gridinterface::VertexEnumerator& coarseGridVerticesEnumerator, const peano::applications::poisson::multigrid::SpacetreeGridCell& coarseGridCell, const tarch::la::Vector<DIMENSIONS,int>& fineGridPositionOfVertex ) { logTraceInWith6Arguments( "createInnerVertex(...)", fineGridVertex, fineGridX, fineGridH, coarseGridVerticesEnumerator.toString(), coarseGridCell, fineGridPositionOfVertex ); // if (tarch::la::volume(fineGridH) > _refinementThreshold) { // fineGridVertex.refine(); // } if (coarseGridVerticesEnumerator.getLevel() < 3) { fineGridVertex.refine(); } peano::toolbox::stencil::Stencil stencil; #ifdef Dim2 //if(fineGridVertex.getLevel() == 4){ stencil = // kappa_x * peano::toolbox::stencil::StencilFactory::stencilProduct( peano::toolbox::stencil::StencilFactory::get1DLaplaceStencil(fineGridH(0)), peano::toolbox::stencil::StencilFactory::get1DMassStencil(fineGridH(1)) ) + // kappa-y * peano::toolbox::stencil::StencilFactory::stencilProduct( peano::toolbox::stencil::StencilFactory::get1DMassStencil(fineGridH(0)), peano::toolbox::stencil::StencilFactory::get1DLaplaceStencil(fineGridH(1)) ); assertionNumericalEquals(stencil(0), -1.0/3.0); assertionNumericalEquals(stencil(1), -1.0/3.0); assertionNumericalEquals(stencil(2), -1.0/3.0); assertionNumericalEquals(stencil(3), -1.0/3.0); assertionNumericalEquals(stencil(4), 8.0/3.0); assertionNumericalEquals(stencil(5), -1.0/3.0); assertionNumericalEquals(stencil(6), -1.0/3.0); assertionNumericalEquals(stencil(7), -1.0/3.0); assertionNumericalEquals(stencil(8), -1.0/3.0); #if defined(Asserts) peano::toolbox::stencil::ElementMatrix elementMatrix; peano::toolbox::stencil::ElementWiseAssemblyMatrix testMatrix = elementMatrix.getElementWiseAssemblyMatrix( stencil ); assertionNumericalEquals(testMatrix(0,0), 2.0/3.0); assertionNumericalEquals(testMatrix(0,1), -0.5/3.0); assertionNumericalEquals(testMatrix(0,2), -0.5/3.0); assertionNumericalEquals(testMatrix(0,3), -1.0/3.0); assertionNumericalEquals(testMatrix(1,0), -0.5/3.0); assertionNumericalEquals(testMatrix(1,1), 2.0/3.0); assertionNumericalEquals(testMatrix(1,2), -1.0/3.0); assertionNumericalEquals(testMatrix(1,3), -0.5/3.0); assertionNumericalEquals(testMatrix(2,0), -0.5/3.0); assertionNumericalEquals(testMatrix(2,1), -1.0/3.0); assertionNumericalEquals(testMatrix(2,2), 2.0/3.0); assertionNumericalEquals(testMatrix(2,3), -0.5/3.0); assertionNumericalEquals(testMatrix(3,0), -1.0/3.0); assertionNumericalEquals(testMatrix(3,1), -0.5/3.0); assertionNumericalEquals(testMatrix(3,2), -0.5/3.0); assertionNumericalEquals(testMatrix(3,3), 2.0/3.0); //logDebug( "createInnerVertex(...)", testMatrix ); #endif // tarch::la::assignList(stencil) = -1.0/3.0, -1.0/3.0, -1.0/3.0, -1.0/3.0, 8.0/3.0, -1.0/3.0, -1.0/3.0, -1.0/3.0, -1.0/3.0; //} //else{ // tarch::la::assignList(stencil) = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0; //} fineGridVertex.setStencil(stencil); // double squaredDistanceFromCenter = 0.0; // for (int d=0; d<DIMENSIONS; d++) { // squaredDistanceFromCenter += (0.5 - fineGridX(d)) * (0.5 - fineGridX(d)); // } // if (squaredDistanceFromCenter<0.24*0.24) { // stencil *= 4.2; // } peano::toolbox::stencil::ProlongationMatrix prolongation; tarch::la::assignList(prolongation) = 1.0/9.0, 2.0/9.0, 3.0/9.0, 2.0/9.0, 1.0/9.0, 2.0/9.0, 4.0/9.0, 6.0/9.0, 4.0/9.0, 2.0/9.0, 3.0/9.0, 6.0/9.0, 9.0/9.0, 6.0/9.0, 3.0/9.0, 2.0/9.0, 4.0/9.0, 6.0/9.0, 4.0/9.0, 2.0/9.0, 1.0/9.0, 2.0/9.0, 3.0/9.0, 2.0/9.0, 1.0/9.0; fineGridVertex.setP(prolongation); peano::toolbox::stencil::RestrictionMatrix restriction; tarch::la::assignList(restriction) = 1.0/9.0, 2.0/9.0, 3.0/9.0, 2.0/9.0, 1.0/9.0, 2.0/9.0, 4.0/9.0, 6.0/9.0, 4.0/9.0, 2.0/9.0, 3.0/9.0, 6.0/9.0, 9.0/9.0, 6.0/9.0, 3.0/9.0, 2.0/9.0, 4.0/9.0, 6.0/9.0, 4.0/9.0, 2.0/9.0, 1.0/9.0, 2.0/9.0, 3.0/9.0, 2.0/9.0, 1.0/9.0; fineGridVertex.setR(restriction); fineGridVertex.setRhs(1.0, fineGridH); #else assertionMsg( false, "not implemented yet" ); #endif logTraceOutWith1Argument( "createInnerVertex(...)", fineGridVertex ); }
dem::repositories::RepositorySTDStack::~RepositorySTDStack() { assertionMsg( _repositoryState.getAction() == dem::records::RepositoryState::Terminate, "terminate() must be called before destroying repository." ); }
peano::kernel::spacetreegrid::SingleLevelEnumerator::Vector peano::kernel::spacetreegrid::SingleLevelEnumerator::getVertexPosition(int localVertexNumber) const { peano::kernel::spacetreegrid::SingleLevelEnumerator::Vector result( _domainOffset ); assertionMsg(false,"not implemented yet"); return result; }
void peanoclaw::repositories::RepositoryArrayStack::iterate(int numberOfIterations) { tarch::timing::Watch watch( "peanoclaw::repositories::RepositoryArrayStack", "iterate(bool)", false); #ifdef Parallel if (tarch::parallel::Node::getInstance().isGlobalMaster()) { _repositoryState.setNumberOfIterations(numberOfIterations); tarch::parallel::NodePool::getInstance().broadcastToWorkingNodes( _repositoryState, peano::parallel::SendReceiveBufferPool::getInstance().getIterationManagementTag() ); } else { assertionEquals( numberOfIterations, 1 ); numberOfIterations = _repositoryState.getNumberOfIterations(); } if ( numberOfIterations > 1 && ( peano::parallel::loadbalancing::Oracle::getInstance().isLoadBalancingActivated() || _solverState.isInvolvedInJoinOrFork() )) { logWarning( "iterate()", "iterate invoked for multiple traversals though load balancing is switched on or grid is not balanced globally. Use activateLoadBalancing(false) to deactivate the load balancing before" ); } peano::datatraversal::autotuning::Oracle::getInstance().switchToOracle(_repositoryState.getAction()); peano::parallel::loadbalancing::Oracle::getInstance().switchToOracle(_repositoryState.getAction()); peano::parallel::loadbalancing::Oracle::getInstance().activateLoadBalancing(_repositoryState.getNumberOfIterations()==1); _solverState.currentlyRunsMultipleIterations(_repositoryState.getNumberOfIterations()>1); #else peano::datatraversal::autotuning::Oracle::getInstance().switchToOracle(_repositoryState.getAction()); #endif for (int i=0; i<numberOfIterations; i++) { switch ( _repositoryState.getAction()) { case peanoclaw::records::RepositoryState::UseAdapterInitialiseGrid: watch.startTimer(); _gridWithInitialiseGrid.iterate(); watch.stopTimer(); _measureInitialiseGridCPUTime.setValue( watch.getCPUTime() ); _measureInitialiseGridCalendarTime.setValue( watch.getCalendarTime() ); break; case peanoclaw::records::RepositoryState::UseAdapterInitialiseAndValidateGrid: watch.startTimer(); _gridWithInitialiseAndValidateGrid.iterate(); watch.stopTimer(); _measureInitialiseAndValidateGridCPUTime.setValue( watch.getCPUTime() ); _measureInitialiseAndValidateGridCalendarTime.setValue( watch.getCalendarTime() ); break; case peanoclaw::records::RepositoryState::UseAdapterPlot: watch.startTimer(); _gridWithPlot.iterate(); watch.stopTimer(); _measurePlotCPUTime.setValue( watch.getCPUTime() ); _measurePlotCalendarTime.setValue( watch.getCalendarTime() ); break; case peanoclaw::records::RepositoryState::UseAdapterPlotAndValidateGrid: watch.startTimer(); _gridWithPlotAndValidateGrid.iterate(); watch.stopTimer(); _measurePlotAndValidateGridCPUTime.setValue( watch.getCPUTime() ); _measurePlotAndValidateGridCalendarTime.setValue( watch.getCalendarTime() ); break; case peanoclaw::records::RepositoryState::UseAdapterRemesh: watch.startTimer(); _gridWithRemesh.iterate(); watch.stopTimer(); _measureRemeshCPUTime.setValue( watch.getCPUTime() ); _measureRemeshCalendarTime.setValue( watch.getCalendarTime() ); break; case peanoclaw::records::RepositoryState::UseAdapterSolveTimestep: watch.startTimer(); _gridWithSolveTimestep.iterate(); watch.stopTimer(); _measureSolveTimestepCPUTime.setValue( watch.getCPUTime() ); _measureSolveTimestepCalendarTime.setValue( watch.getCalendarTime() ); break; case peanoclaw::records::RepositoryState::UseAdapterSolveTimestepAndValidateGrid: watch.startTimer(); _gridWithSolveTimestepAndValidateGrid.iterate(); watch.stopTimer(); _measureSolveTimestepAndValidateGridCPUTime.setValue( watch.getCPUTime() ); _measureSolveTimestepAndValidateGridCalendarTime.setValue( watch.getCalendarTime() ); break; case peanoclaw::records::RepositoryState::UseAdapterSolveTimestepAndPlot: watch.startTimer(); _gridWithSolveTimestepAndPlot.iterate(); watch.stopTimer(); _measureSolveTimestepAndPlotCPUTime.setValue( watch.getCPUTime() ); _measureSolveTimestepAndPlotCalendarTime.setValue( watch.getCalendarTime() ); break; case peanoclaw::records::RepositoryState::UseAdapterSolveTimestepAndPlotAndValidateGrid: watch.startTimer(); _gridWithSolveTimestepAndPlotAndValidateGrid.iterate(); watch.stopTimer(); _measureSolveTimestepAndPlotAndValidateGridCPUTime.setValue( watch.getCPUTime() ); _measureSolveTimestepAndPlotAndValidateGridCalendarTime.setValue( watch.getCalendarTime() ); break; case peanoclaw::records::RepositoryState::UseAdapterGatherCurrentSolution: watch.startTimer(); _gridWithGatherCurrentSolution.iterate(); watch.stopTimer(); _measureGatherCurrentSolutionCPUTime.setValue( watch.getCPUTime() ); _measureGatherCurrentSolutionCalendarTime.setValue( watch.getCalendarTime() ); break; case peanoclaw::records::RepositoryState::UseAdapterGatherCurrentSolutionAndValidateGrid: watch.startTimer(); _gridWithGatherCurrentSolutionAndValidateGrid.iterate(); watch.stopTimer(); _measureGatherCurrentSolutionAndValidateGridCPUTime.setValue( watch.getCPUTime() ); _measureGatherCurrentSolutionAndValidateGridCalendarTime.setValue( watch.getCalendarTime() ); break; case peanoclaw::records::RepositoryState::UseAdapterCleanup: watch.startTimer(); _gridWithCleanup.iterate(); watch.stopTimer(); _measureCleanupCPUTime.setValue( watch.getCPUTime() ); _measureCleanupCalendarTime.setValue( watch.getCalendarTime() ); break; case peanoclaw::records::RepositoryState::Terminate: assertionMsg( false, "this branch/state should never be reached" ); break; case peanoclaw::records::RepositoryState::NumberOfAdapters: assertionMsg( false, "this branch/state should never be reached" ); break; case peanoclaw::records::RepositoryState::RunOnAllNodes: assertionMsg( false, "this branch/state should never be reached" ); break; case peanoclaw::records::RepositoryState::ReadCheckpoint: assertionMsg( false, "not implemented yet" ); break; case peanoclaw::records::RepositoryState::WriteCheckpoint: assertionMsg( false, "not implemented yet" ); break; } } #ifdef Parallel if (_solverState.isJoiningWithMaster()) { _repositoryState.setAction( peanoclaw::records::RepositoryState::Terminate ); } #endif }
double scenario::diffusionequation::CornerPointField::getThermalDiffusivity(const tarch::la::Vector<1,double>& x) { logTraceInWith1Argument( "getThermalDiffusivity(x)", x ); assertionMsg( false, "not implemented yet"); logTraceOutWith1Argument( "getThermalDiffusivity(x)", 0.0 ); return 0.0; }
void peano::applications::poisson::vhhjacobi::repositories::JacobiBatchJobRepositoryForRegularGridStandardImplementation::readCheckpoint( peano::kernel::gridinterface::Checkpoint<peano::applications::poisson::vhhjacobi::RegularGridVertex, peano::applications::poisson::vhhjacobi::RegularGridCell> const * const checkpoint ) { assertionMsg( checkpoint->isValid(), "checkpoint has to be valid if you call this operation" ); _solverState.readFromCheckpoint( *checkpoint ); _gridContainer.readFromCheckpoint( *checkpoint ); }
void peano::applications::navierstokes::prototype1::repositories::PrototypeRepositoryForRegularGridStandardImplementation::readCheckpoint( peano::kernel::gridinterface::Checkpoint<peano::applications::navierstokes::prototype1::RegularGridFluidVertexEnhancedDivFreeEulerExplicit, peano::applications::navierstokes::prototype1::RegularGridFluidCellEnhancedDivFreeEulerExplicit> const * const checkpoint ) { assertionMsg( checkpoint->isValid(), "checkpoint has to be valid if you call this operation" ); _solverState.readFromCheckpoint( *checkpoint ); _gridContainer.readFromCheckpoint( *checkpoint ); }