void LocationIndexRobinBcCoefs::setBcCoefs( const std::shared_ptr<pdat::ArrayData<double> >& acoef_data, const std::shared_ptr<pdat::ArrayData<double> >& bcoef_data, const std::shared_ptr<pdat::ArrayData<double> >& gcoef_data, const std::shared_ptr<hier::Variable>& variable, const hier::Patch& patch, const hier::BoundaryBox& bdry_box, double fill_time) const { TBOX_ASSERT_DIM_OBJDIM_EQUALITY2(d_dim, patch, bdry_box); NULL_USE(variable); NULL_USE(patch); NULL_USE(fill_time); int location = bdry_box.getLocationIndex(); TBOX_ASSERT(location >= 0 && location < 2 * d_dim.getValue()); if (acoef_data) { TBOX_ASSERT_DIM_OBJDIM_EQUALITY1(d_dim, *acoef_data); acoef_data->fill(d_a_map[location]); } if (bcoef_data) { TBOX_ASSERT_DIM_OBJDIM_EQUALITY1(d_dim, *bcoef_data); bcoef_data->fill(d_b_map[location]); } if (gcoef_data) { TBOX_ASSERT_DIM_OBJDIM_EQUALITY1(d_dim, *gcoef_data); gcoef_data->fill(d_g_map[location]); } }
// 输出网格到 VTK 文件 void MeshOpt::writeToVTK(hier::Patch<NDIM>& patch, const double time, const double dt, const bool initial_time) { NULL_USE(dt); NULL_USE(time); NULL_USE(initial_time); const tbox::Pointer< hier::BlockPatchGeometry<NDIM> > pgeom = patch.getPatchGeometry(); int block_index = pgeom->getBlockNumber(); int patch_index = patch.getPatchNumber(); std::stringstream bi, pi, df; bi << block_index; pi << patch_index; df << d_flag; std::string file_name = df.str() + "_block_ " + bi.str()+ "_patch_" + pi.str() + ".vtk"; MsqError err; MeshImpl * mesh = createLocalMesh(patch); mesh->write_vtk(file_name.c_str(), err); return; }
int SAMRAI_MPI::Waitsome( int incount, Request* array_of_requests, int* outcount, int* array_of_indices, Status* array_of_statuses) { #ifndef HAVE_MPI NULL_USE(incount); NULL_USE(array_of_requests); NULL_USE(outcount); NULL_USE(array_of_indices); NULL_USE(array_of_statuses); #endif int rval = MPI_SUCCESS; if (!s_mpi_is_initialized) { TBOX_ERROR("SAMRAI_MPI::Waitsome is a no-op without run-time MPI!"); } #ifdef HAVE_MPI else { rval = MPI_Waitsome(incount, array_of_requests, outcount, array_of_indices, array_of_statuses); } #endif return rval; }
void CVODEModel::applyGradientDetector( const std::shared_ptr<PatchHierarchy>& hierarchy, const int level_number, const double time, const int tag_index, const bool initial_time, const bool uses_richardson_extrapolation_too) { NULL_USE(time); NULL_USE(initial_time); NULL_USE(uses_richardson_extrapolation_too); std::shared_ptr<PatchLevel> level( hierarchy->getPatchLevel(level_number)); for (PatchLevel::iterator p(level->begin()); p != level->end(); ++p) { const std::shared_ptr<Patch>& patch = *p; std::shared_ptr<CellData<int> > tag_data( SAMRAI_SHARED_PTR_CAST<CellData<int>, PatchData>( patch->getPatchData(tag_index))); TBOX_ASSERT(tag_data); // dumb implementation that tags all cells. tag_data->fillAll(TRUE); } }
int SAMRAI_MPI::Allreduce( void* sendbuf, void* recvbuf, int count, Datatype datatype, Op op) const { #ifndef HAVE_MPI NULL_USE(sendbuf); NULL_USE(recvbuf); NULL_USE(count); NULL_USE(datatype); NULL_USE(op); #endif int rval = MPI_SUCCESS; if (!s_mpi_is_initialized) { TBOX_ERROR("SAMRAI_MPI::Allreduce is a no-op without run-time MPI!"); } #ifdef HAVE_MPI else { rval = MPI_Allreduce(sendbuf, recvbuf, count, datatype, op, d_comm); } #endif return rval; }
void HierarchyProjector::initializeLevelData( const Pointer<BasePatchHierarchy<NDIM> > hierarchy, const int level_number, const double /*init_data_time*/, const bool /*can_be_refined*/, const bool /*initial_time*/, const Pointer<BasePatchLevel<NDIM> > old_level, const bool /*allocate_data*/) { IBAMR_TIMER_START(t_initialize_level_data); #ifdef DEBUG_CHECK_ASSERTIONS TBOX_ASSERT(!hierarchy.isNull()); TBOX_ASSERT((level_number >= 0) && (level_number <= hierarchy->getFinestLevelNumber())); if (!old_level.isNull()) { TBOX_ASSERT(level_number == old_level->getLevelNumber()); } TBOX_ASSERT(!(hierarchy->getPatchLevel(level_number)).isNull()); #else NULL_USE(hierarchy); NULL_USE(level_number); NULL_USE(old_level); #endif // intentionally blank IBAMR_TIMER_STOP(t_initialize_level_data); return; }// initializeLevelData
int SAMRAI_MPI::Send( void* buf, int count, Datatype datatype, int dest, int tag) const { #ifndef HAVE_MPI NULL_USE(buf); NULL_USE(count); NULL_USE(datatype); NULL_USE(dest); NULL_USE(tag); #endif int rval = MPI_SUCCESS; if (!s_mpi_is_initialized) { TBOX_ERROR("SAMRAI_MPI::Send is a no-op without run-time MPI!"); } #ifdef HAVE_MPI else { rval = MPI_Send(buf, count, datatype, dest, tag, d_comm); } #endif return rval; }
/* ************************************************************************** * * Wrapper for MPI_Init(). * ************************************************************************** */ void SAMRAI_MPI::init( int* argc, char** argv[]) { #ifdef HAVE_MPI MPI_Init(argc, argv); s_mpi_is_initialized = true; s_we_started_mpi = true; Comm dup_comm; MPI_Comm_dup(MPI_COMM_WORLD, &dup_comm); s_samrai_world.setCommunicator(dup_comm); #else NULL_USE(argc); NULL_USE(argv); s_samrai_world.d_comm = MPI_COMM_WORLD; s_samrai_world.d_size = 1; s_samrai_world.d_rank = 0; #endif if (getenv("SAMRAI_ABORT_ON_ERROR")) { SAMRAI_MPI::setCallAbortInSerialInsteadOfExit(true); SAMRAI_MPI::setCallAbortInParallelInsteadOfMPIAbort(true); } }
/************************************************************************* * 步长构件: 计算并返回网格片上的稳定时间步长. *************************************************************************/ double MeshOpt::getPatchDt(hier::Patch<NDIM>& patch, const double time, const bool initial_time, const int flag_last_dt, const double last_dt, const string& intc_name) { #ifdef DEBUG_CHECK_ASSERTIONS assert(intc_name=="TIME_STEP_SIZE"); #endif NULL_USE(flag_last_dt); NULL_USE(last_dt); tbox::Pointer< pdat::NodeData<NDIM,double> > coords_current = patch.getPatchData(d_coords_current_id); #ifdef DEBUG_CHECK_ASSERTIONS assert(!coords_current.isNull()); #endif #ifdef DEBUG_CHECK_ASSERTIONS assert(ghost_cells == d_zeroghosts); #endif double stabdt = 0.1; return stabdt; }
// 扰动网格 void MeshOpt::disturbMesh(hier::Patch<NDIM>& patch, const double time, const double dt, const bool initial_time) { NULL_USE(dt); NULL_USE(time); NULL_USE(initial_time); tbox::Pointer< pdat::NodeData<NDIM,double> > coords_current = patch.getPatchData(d_coords_current_id); tbox::Pointer< pdat::NodeData<NDIM,bool> > fixed_info = patch.getPatchData(d_fixed_info_id); int count = -1; double dist = 0.01; for(pdat::NodeIterator<NDIM> ic((*coords_current).getBox()); ic; ic++) { dist *= -1; if((*fixed_info)(ic(),0) == false) { (*coords_current)(ic(),0) += dist; (*coords_current)(ic(),1) -= dist; } ++count; } // 表示已扰动 d_flag = 1; }
void setArrayDataToSinusoidalGradient( int dim , double** g_ptr , const int* lower , const int* upper , const double* xlo, const double* xhi, const double* h) { NULL_USE(xhi); NULL_USE(h); if (dim == 2) { double* gx_ptr = g_ptr[0]; MDA_Access<double, 2, MDA_OrderColMajor<2> > gx(gx_ptr, lower, upper); double* gy_ptr = g_ptr[1]; MDA_Access<double, 2, MDA_OrderColMajor<2> > gy(gy_ptr, lower, upper); for (int j = lower[1]; j <= upper[1]; ++j) { double y = xlo[1] + h[1] * (j - lower[1] + 0.5); double siny = sin(2 * M_PI * y); double cosy = cos(2 * M_PI * y); for (int i = lower[0]; i <= upper[0]; ++i) { double x = xlo[0] + h[0] * (i - lower[0] + 0.5); double sinx = sin(2 * M_PI * x); double cosx = cos(2 * M_PI * x); gx(i, j) = 2 * M_PI * cosx * siny; gy(i, j) = sinx * 2 * M_PI * cosy; } } } else if (dim == 3) { double* gx_ptr = g_ptr[0]; MDA_Access<double, 3, MDA_OrderColMajor<3> > gx(gx_ptr, lower, upper); double* gy_ptr = g_ptr[1]; MDA_Access<double, 3, MDA_OrderColMajor<3> > gy(gy_ptr, lower, upper); double* gz_ptr = g_ptr[2]; MDA_Access<double, 3, MDA_OrderColMajor<3> > gz(gz_ptr, lower, upper); for (int k = lower[2]; k <= upper[2]; ++k) { double z = xlo[2] + h[2] * (k - lower[2] + 0.5); double sinz = sin(2 * M_PI * z); double cosz = cos(2 * M_PI * z); for (int j = lower[1]; j <= upper[1]; ++j) { double y = xlo[1] + h[1] * (j - lower[1] + 0.5); double siny = sin(2 * M_PI * y); double cosy = cos(2 * M_PI * y); for (int i = lower[0]; i <= upper[0]; ++i) { double x = xlo[0] + h[0] * (i - lower[0] + 0.5); double sinx = sin(2 * M_PI * x); double cosx = cos(2 * M_PI * x); gx(i, j, k) = 2 * M_PI * cosx * siny * sinz; gy(i, j, k) = sinx * 2 * M_PI * cosy * sinz; gz(i, j, k) = sinx * cosy * 2 * M_PI * cosz; } } } } }
void CVODEModel::preprocessRefine( Patch& fine, const Patch& coarse, const Box& fine_box, const IntVector& ratio) { NULL_USE(fine); NULL_USE(coarse); NULL_USE(fine_box); NULL_USE(ratio); }
void CVODEModel::postprocessCoarsen( Patch& coarse, const Patch& fine, const Box& coarse_box, const IntVector& ratio) { NULL_USE(coarse); NULL_USE(fine); NULL_USE(coarse_box); NULL_USE(ratio); }
void CVODEModel::resetHierarchyConfiguration( const std::shared_ptr<PatchHierarchy>& hierarchy, const int coarsest_level, const int finest_level) { NULL_USE(hierarchy); NULL_USE(coarsest_level); NULL_USE(finest_level); // This method is empty because this example does not exercise the // situation when the grid changes, so it effectively is never called. // This is a subject for future work... }
void CVODEModel::initializeLevelData( const std::shared_ptr<PatchHierarchy>& hierarchy, const int level_number, const double time, const bool can_be_refined, const bool initial_time, const std::shared_ptr<PatchLevel>& old_level, const bool allocate_data) { NULL_USE(hierarchy); NULL_USE(level_number); NULL_USE(time); NULL_USE(can_be_refined); NULL_USE(initial_time); NULL_USE(time); NULL_USE(old_level); NULL_USE(allocate_data); // This method is empty because initialization is taken care of // by the setInitialConditions() method below. If there is any // data that is not managed inside the SAMRAI CVODESolver class // but that must be set on the level, do it here. }
void setArrayDataTo( MDA_Access<double, 3, MDA_OrderColMajor<3> >& s , const int* lower , const int* upper , const double* xlo, const double* xhi, const double* h , const double* coef) { NULL_USE(xhi); const double ucoef[3] = { 1., 1., 1. }; if (coef == 0) coef = ucoef; for (int k = lower[2]; k <= upper[2]; ++k) { double z = xlo[2] + h[2] * (k - lower[2] + 0.5); for (int j = lower[1]; j <= upper[1]; ++j) { double y = xlo[1] + h[1] * (j - lower[1] + 0.5); for (int i = lower[0]; i <= upper[0]; ++i) { double x = xlo[0] + h[0] * (i - lower[0] + 0.5); s(i, j, k) = coef[0] * x + coef[1] * y + coef[2] * z; } } } }
void setArrayDataToLinear( MDA_Access<double, 3, MDA_OrderColMajor<3> >& s, const int* lower, const int* upper, const double* xlo, const double* xhi, const double* h, double a0, double ax, double ay, double az, double axy, double axz, double ayz, double axyz) { NULL_USE(xhi); for (int k = lower[2]; k <= upper[2]; ++k) { double z = xlo[2] + h[2] * (k - lower[2] + 0.5); for (int j = lower[1]; j <= upper[1]; ++j) { double y = xlo[1] + h[1] * (j - lower[1] + 0.5); for (int i = lower[0]; i <= upper[0]; ++i) { double x = xlo[0] + h[0] * (i - lower[0] + 0.5); s(i, j, k) = a0 + ax * x + ay * y + az * z + axy * x * y + axz * x * z + ayz * y * z + axyz * x * y * z; } } } }
int CoarsenClasses::getEquivalenceClassIndex( const CoarsenClasses::Data& data, const std::shared_ptr<hier::PatchDescriptor>& descriptor) const { NULL_USE(descriptor); int eq_index = -1; bool class_found = false; int check_index = 0; while (!class_found && check_index < getNumberOfEquivalenceClasses()) { const CoarsenClasses::Data& class_rep = getClassRepresentative(check_index); class_found = itemsAreEquivalent(data, class_rep); if (class_found) { eq_index = check_index; } ++check_index; } return eq_index; }
void setArrayDataToSinusoidal( MDA_Access<double, 3, MDA_OrderColMajor<3> >& s , const int* lower , const int* upper , const double* xlo, const double* xhi, const double* h , const double* npi, const double* ppi) { NULL_USE(xhi); double nx = npi[0], px = ppi[0]; double ny = npi[1], py = ppi[1]; double nz = npi[2], pz = ppi[2]; for (int k = lower[2]; k <= upper[2]; ++k) { double z = xlo[2] + h[2] * (k - lower[2] + 0.5); double sinz = sin(M_PI * (nz * z + pz)); for (int j = lower[1]; j <= upper[1]; ++j) { double y = xlo[1] + h[1] * (j - lower[1] + 0.5); double siny = sin(M_PI * (ny * y + py)); for (int i = lower[0]; i <= upper[0]; ++i) { double x = xlo[0] + h[0] * (i - lower[0] + 0.5); double sinx = sin(M_PI * (nx * x + px)); s(i, j, k) = sinx * siny * sinz; } } } }
void PatchLevelEnhancedFillPattern::computeDestinationFillBoxesOnSourceProc( FillSet& dst_fill_boxes_on_src_proc, const hier::BoxLevel& dst_box_level, const hier::Connector& src_to_dst, const hier::IntVector& fill_ghost_width) { NULL_USE(dst_box_level); NULL_USE(src_to_dst); NULL_USE(fill_ghost_width); NULL_USE(dst_fill_boxes_on_src_proc); if (!needsToCommunicateDestinationFillBoxes()) { TBOX_ERROR( "PatchLevelEnhancedFillPattern cannot compute destination:\n" << "fill boxes on the source processor.\n"); } }
int SAMRAI_MPI::Gather( void* sendbuf, int sendcount, Datatype sendtype, void* recvbuf, int recvcount, Datatype recvtype, int root) const { #ifndef HAVE_MPI NULL_USE(sendbuf); NULL_USE(sendcount); NULL_USE(sendtype); NULL_USE(recvbuf); NULL_USE(recvcount); NULL_USE(recvtype); NULL_USE(root); #endif int rval = MPI_SUCCESS; if (!s_mpi_is_initialized) { TBOX_ERROR("SAMRAI_MPI::Gather is a no-op without run-time MPI!"); } #ifdef HAVE_MPI else { rval = MPI_Gather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, d_comm); } #endif return rval; }
void INSStaggeredCenteredConvectiveOperator::initializeOperatorState( const SAMRAIVectorReal<NDIM, double>& in, const SAMRAIVectorReal<NDIM, double>& out) { IBAMR_TIMER_START(t_initialize_operator_state); if (d_is_initialized) deallocateOperatorState(); // Get the hierarchy configuration. d_hierarchy = in.getPatchHierarchy(); d_coarsest_ln = in.getCoarsestLevelNumber(); d_finest_ln = in.getFinestLevelNumber(); #if !defined(NDEBUG) TBOX_ASSERT(d_hierarchy == out.getPatchHierarchy()); TBOX_ASSERT(d_coarsest_ln == out.getCoarsestLevelNumber()); TBOX_ASSERT(d_finest_ln == out.getFinestLevelNumber()); #else NULL_USE(out); #endif // Setup the interpolation transaction information. typedef HierarchyGhostCellInterpolation::InterpolationTransactionComponent InterpolationTransactionComponent; d_transaction_comps.resize(1); d_transaction_comps[0] = InterpolationTransactionComponent(d_U_scratch_idx, in.getComponentDescriptorIndex(0), "CONSERVATIVE_LINEAR_REFINE", false, "CONSERVATIVE_COARSEN", d_bdry_extrap_type, false, d_bc_coefs); // Initialize the interpolation operators. d_hier_bdry_fill = new HierarchyGhostCellInterpolation(); d_hier_bdry_fill->initializeOperatorState(d_transaction_comps, d_hierarchy); // Initialize the BC helper. d_bc_helper = new StaggeredStokesPhysicalBoundaryHelper(); d_bc_helper->cacheBcCoefData(d_bc_coefs, d_solution_time, d_hierarchy); // Allocate scratch data. for (int ln = d_coarsest_ln; ln <= d_finest_ln; ++ln) { Pointer<PatchLevel<NDIM> > level = d_hierarchy->getPatchLevel(ln); if (!level->checkAllocated(d_U_scratch_idx)) { level->allocatePatchData(d_U_scratch_idx); } } d_is_initialized = true; IBAMR_TIMER_STOP(t_initialize_operator_state); return; } // initializeOperatorState
int SAMRAI_MPI::Test_cancelled( Status* status, int* flag) { #ifndef HAVE_MPI NULL_USE(status); NULL_USE(flag); #endif int rval = MPI_SUCCESS; if (!s_mpi_is_initialized) { TBOX_ERROR("SAMRAI_MPI::Test_canceled is a no-op without run-time MPI!"); } #ifdef HAVE_MPI else { rval = MPI_Test_cancelled(status, flag); } #endif return rval; }
int SAMRAI_MPI::Wait( Request* request, Status* status) { #ifndef HAVE_MPI NULL_USE(request); NULL_USE(status); #endif int rval = MPI_SUCCESS; if (!s_mpi_is_initialized) { TBOX_ERROR("SAMRAI_MPI::Wait is a no-op without run-time MPI!"); } #ifdef HAVE_MPI else { rval = MPI_Wait(request, status); } #endif return rval; }
void AdvDiffCenteredConvectiveOperator::initializeOperatorState(const SAMRAIVectorReal<NDIM, double>& in, const SAMRAIVectorReal<NDIM, double>& out) { IBAMR_TIMER_START(t_initialize_operator_state); if (d_is_initialized) deallocateOperatorState(); // Get the hierarchy configuration. d_hierarchy = in.getPatchHierarchy(); d_coarsest_ln = in.getCoarsestLevelNumber(); d_finest_ln = in.getFinestLevelNumber(); #if !defined(NDEBUG) TBOX_ASSERT(d_hierarchy == out.getPatchHierarchy()); TBOX_ASSERT(d_coarsest_ln == out.getCoarsestLevelNumber()); TBOX_ASSERT(d_finest_ln == out.getFinestLevelNumber()); #else NULL_USE(out); #endif Pointer<CartesianGridGeometry<NDIM> > grid_geom = d_hierarchy->getGridGeometry(); // Setup the coarsen algorithm, operator, and schedules. Pointer<CoarsenOperator<NDIM> > coarsen_op = grid_geom->lookupCoarsenOperator(d_q_flux_var, "CONSERVATIVE_COARSEN"); d_coarsen_alg = new CoarsenAlgorithm<NDIM>(); if (d_difference_form == ADVECTIVE || d_difference_form == SKEW_SYMMETRIC) d_coarsen_alg->registerCoarsen(d_q_extrap_idx, d_q_extrap_idx, coarsen_op); if (d_difference_form == CONSERVATIVE || d_difference_form == SKEW_SYMMETRIC) d_coarsen_alg->registerCoarsen(d_q_flux_idx, d_q_flux_idx, coarsen_op); d_coarsen_scheds.resize(d_finest_ln + 1); for (int ln = d_coarsest_ln + 1; ln <= d_finest_ln; ++ln) { Pointer<PatchLevel<NDIM> > level = d_hierarchy->getPatchLevel(ln); Pointer<PatchLevel<NDIM> > coarser_level = d_hierarchy->getPatchLevel(ln - 1); d_coarsen_scheds[ln] = d_coarsen_alg->createSchedule(coarser_level, level); } // Setup the refine algorithm, operator, patch strategy, and schedules. Pointer<RefineOperator<NDIM> > refine_op = grid_geom->lookupRefineOperator(d_Q_var, "CONSERVATIVE_LINEAR_REFINE"); d_ghostfill_alg = new RefineAlgorithm<NDIM>(); d_ghostfill_alg->registerRefine(d_Q_scratch_idx, in.getComponentDescriptorIndex(0), d_Q_scratch_idx, refine_op); if (d_outflow_bdry_extrap_type != "NONE") d_ghostfill_strategy = new CartExtrapPhysBdryOp(d_Q_scratch_idx, d_outflow_bdry_extrap_type); d_ghostfill_scheds.resize(d_finest_ln + 1); for (int ln = d_coarsest_ln; ln <= d_finest_ln; ++ln) { Pointer<PatchLevel<NDIM> > level = d_hierarchy->getPatchLevel(ln); d_ghostfill_scheds[ln] = d_ghostfill_alg->createSchedule(level, ln - 1, d_hierarchy, d_ghostfill_strategy); } d_is_initialized = true; IBAMR_TIMER_STOP(t_initialize_operator_state); return; } // initializeOperatorState
void setArrayDataToConstant( MDA_Access<double, 2, MDA_OrderColMajor<2> >& s , const int* lower, const int* upper , const double* xlo, const double* xhi, const double* h , double value) { NULL_USE(xlo); NULL_USE(xhi); NULL_USE(h); for (int j = lower[1]; j <= upper[1]; ++j) { for (int i = lower[0]; i <= upper[0]; ++i) { s(i, j) = value; } } }
int SAMRAI_MPI::Recv( void* buf, int count, Datatype datatype, int source, int tag, Status* status) const { #ifndef HAVE_MPI NULL_USE(buf); NULL_USE(count); NULL_USE(datatype); NULL_USE(source); NULL_USE(tag); NULL_USE(status); #endif int rval = MPI_SUCCESS; if (!s_mpi_is_initialized) { TBOX_ERROR("SAMRAI_MPI::Recv is a no-op without run-time MPI!"); } #ifdef HAVE_MPI else { rval = MPI_Recv(buf, count, datatype, source, tag, d_comm, status); } #endif return rval; }
int SAMRAI_MPI::Attr_get( int keyval, void* attribute_val, int* flag) const { #ifndef HAVE_MPI NULL_USE(keyval); NULL_USE(attribute_val); NULL_USE(flag); #endif int rval = MPI_SUCCESS; if (!s_mpi_is_initialized) { TBOX_ERROR("SAMRAI_MPI::Attr_get is a no-op without run-time MPI!"); } #ifdef HAVE_MPI else { rval = MPI_Attr_get(d_comm, keyval, attribute_val, flag); } #endif return rval; }
int SAMRAI_MPI::Comm_compare( Comm comm1, Comm comm2, int* result) { #ifndef HAVE_MPI NULL_USE(comm1); NULL_USE(comm2); NULL_USE(result); #endif int rval = MPI_SUCCESS; if (!s_mpi_is_initialized) { TBOX_ERROR("SAMRAI_MPI::Comm_compare is a no-op without run-time MPI!"); } #ifdef HAVE_MPI else { rval = MPI_Comm_compare(comm1, comm2, result); } #endif return rval; }
int SAMRAI_MPI::Get_count( Status* status, Datatype datatype, int* count) { #ifndef HAVE_MPI NULL_USE(status); NULL_USE(datatype); NULL_USE(count); #endif int rval = MPI_SUCCESS; if (!s_mpi_is_initialized) { TBOX_ERROR("SAMRAI_MPI::Get_count is a no-op without run-time MPI!"); } #ifdef HAVE_MPI else { rval = MPI_Get_count(status, datatype, count); } #endif return rval; }