void IBHierarchyIntegrator::getFromRestart() { Pointer<Database> restart_db = RestartManager::getManager()->getRootDatabase(); Pointer<Database> db; if (restart_db->isDatabase(d_object_name)) { db = restart_db->getDatabase(d_object_name); } else { TBOX_ERROR(d_object_name << ": Restart database corresponding to " << d_object_name << " not found in restart file." << std::endl); } int ver = db->getInteger("IB_HIERARCHY_INTEGRATOR_VERSION"); if (ver != IB_HIERARCHY_INTEGRATOR_VERSION) { TBOX_ERROR(d_object_name << ": Restart file version different than class version." << std::endl); } d_time_stepping_type = string_to_enum<TimeSteppingType>(db->getString("d_time_stepping_type")); d_regrid_cfl_interval = db->getDouble("d_regrid_cfl_interval"); d_regrid_cfl_estimate = db->getDouble("d_regrid_cfl_estimate"); return; } // getFromRestart
void HierarchyProjector::getFromRestart() { Pointer<Database> restart_db = RestartManager::getManager()->getRootDatabase(); Pointer<Database> db; if (restart_db->isDatabase(d_object_name)) { db = restart_db->getDatabase(d_object_name); } else { TBOX_ERROR(d_object_name << ": \n" << "Restart database corresponding to " << d_object_name << " not found in restart file."); } int ver = db->getInteger("HIERARCHY_PROJECTOR_VERSION"); if (ver != HIERARCHY_PROJECTOR_VERSION) { TBOX_ERROR(d_object_name << ": " << "Restart file version different than class version."); } return; }// getFromRestart
void GeneralizedIBMethod::interpolateVelocity( const int u_data_idx, const std::vector<Pointer<CoarsenSchedule<NDIM> > >& u_synch_scheds, const std::vector<Pointer<RefineSchedule<NDIM> > >& u_ghost_fill_scheds, const double data_time) { // Interpolate the linear velocities. IBMethod::interpolateVelocity(u_data_idx, u_synch_scheds, u_ghost_fill_scheds, data_time); // Interpolate the angular velocities. std::vector<Pointer<LData> >* W_data = NULL; if (MathUtilities<double>::equalEps(data_time, d_current_time)) { W_data = &d_W_current_data; } else if (MathUtilities<double>::equalEps(data_time, d_half_time)) { TBOX_ERROR(d_object_name << "::interpolateVelocity():\n" << " time-stepping type MIDPOINT_RULE not supported by " "class GeneralizedIBMethod;\n" << " use TRAPEZOIDAL_RULE instead.\n"); } else if (MathUtilities<double>::equalEps(data_time, d_new_time)) { W_data = &d_W_new_data; } Pointer<Variable<NDIM> > u_var = d_ib_solver->getVelocityVariable(); Pointer<CellVariable<NDIM, double> > u_cc_var = u_var; Pointer<SideVariable<NDIM, double> > u_sc_var = u_var; if (u_cc_var) { Pointer<CellVariable<NDIM, double> > w_cc_var = d_w_var; getHierarchyMathOps()->curl(d_w_idx, w_cc_var, u_data_idx, u_cc_var, NULL, data_time); } else if (u_sc_var) { Pointer<SideVariable<NDIM, double> > w_sc_var = d_w_var; getHierarchyMathOps()->curl(d_w_idx, w_sc_var, u_data_idx, u_sc_var, NULL, data_time); } else { TBOX_ERROR(d_object_name << "::interpolateVelocity():\n" << " unsupported velocity data centering" << std::endl); } std::vector<Pointer<LData> >* X_LE_data; bool* X_LE_needs_ghost_fill; getLECouplingPositionData(&X_LE_data, &X_LE_needs_ghost_fill, data_time); getVelocityHierarchyDataOps()->scale(d_w_idx, 0.5, d_w_idx); d_l_data_manager->interp(d_w_idx, *W_data, *X_LE_data, std::vector<Pointer<CoarsenSchedule<NDIM> > >(), getGhostfillRefineSchedules(d_object_name + "::w"), data_time); resetAnchorPointValues(*W_data, /*coarsest_ln*/ 0, /*finest_ln*/ d_hierarchy->getFinestLevelNumber()); return; } // interpolateVelocity
/* ************************************************************************* * * Read data from restart database. * ************************************************************************* */ void CVODEModel::getFromRestart() { std::shared_ptr<Database> root_db( RestartManager::getManager()->getRootDatabase()); if (!root_db->isDatabase(d_object_name)) { TBOX_ERROR("Restart database corresponding to " << d_object_name << " not found in the restart file."); } std::shared_ptr<Database> db(root_db->getDatabase(d_object_name)); int ver = db->getInteger("CVODE_MODEL_VERSION"); if (ver != CVODE_MODEL_VERSION) { TBOX_ERROR( d_object_name << ": " << "Restart file version different than class version."); } d_initial_value = db->getDouble("d_initial_value"); d_scalar_bdry_edge_conds = db->getIntegerVector("d_scalar_bdry_edge_conds"); d_scalar_bdry_node_conds = db->getIntegerVector("d_scalar_bdry_node_conds"); if (d_dim == Dimension(2)) { d_bdry_edge_val = db->getDoubleVector("d_bdry_edge_val"); } else if (d_dim == Dimension(3)) { d_scalar_bdry_face_conds = db->getIntegerVector("d_scalar_bdry_face_conds"); d_bdry_face_val = db->getDoubleVector("d_bdry_face_val"); } }
void Stokes::FACOps::xeqScheduleRRestriction(int p_dst, int p_src, int v_dst, int v_src, int dest_ln) { /* p */ { if (!p_rrestriction_coarsen_schedules[dest_ln]) { TBOX_ERROR("Expected schedule not found."); } SAMRAI::xfer::CoarsenAlgorithm coarsener(d_dim); coarsener.registerCoarsen(p_dst,p_src,p_rrestriction_coarsen_operator); coarsener.resetSchedule(p_rrestriction_coarsen_schedules[dest_ln]); p_rrestriction_coarsen_schedules[dest_ln]->coarsenData(); } /* v */ { if (!v_rrestriction_coarsen_schedules[dest_ln]) { TBOX_ERROR("Expected schedule not found."); } SAMRAI::xfer::CoarsenAlgorithm coarsener(d_dim); coarsener.registerCoarsen(v_dst,v_src,v_rrestriction_coarsen_operator); coarsener.resetSchedule(v_rrestriction_coarsen_schedules[dest_ln]); v_rrestriction_coarsen_schedules[dest_ln]->coarsenData(); } }
bool CoarsenClasses::itemIsValid( const CoarsenClasses::Data& data_item, const std::shared_ptr<hier::PatchDescriptor>& descriptor) const { bool item_good = true; std::shared_ptr<hier::PatchDescriptor> pd(descriptor); if (!pd) { pd = hier::VariableDatabase::getDatabase()->getPatchDescriptor(); } const tbox::Dimension& dim = pd->getPatchDataFactory(data_item.d_dst)->getDim(); const int dst_id = data_item.d_dst; const int src_id = data_item.d_src; if (dst_id < 0) { item_good = false; TBOX_ERROR("Bad data given to CoarsenClasses...\n" << "`Destination' patch data id invalid (< 0!)" << std::endl); } if (item_good && (src_id < 0)) { item_good = false; TBOX_ERROR("Bad data given to CoarsenClasses...\n" << "`Source' patch data id invalid (< 0!)" << std::endl); } std::shared_ptr<hier::PatchDataFactory> dfact( pd->getPatchDataFactory(dst_id)); std::shared_ptr<hier::PatchDataFactory> sfact( pd->getPatchDataFactory(src_id)); if (item_good && !(sfact->validCopyTo(dfact))) { item_good = false; TBOX_ERROR("Bad data given to CoarsenClasses...\n" << "It is not a valid operation to copy from `Source' patch data \n" << pd->mapIndexToName(src_id) << " to `Destination' patch data " << pd->mapIndexToName(dst_id) << std::endl); } std::shared_ptr<hier::CoarsenOperator> coarsop(data_item.d_opcoarsen); if (item_good && coarsop) { if (coarsop->getStencilWidth(dim) > sfact->getGhostCellWidth()) { item_good = false; TBOX_ERROR("Bad data given to CoarsenClasses...\n" << "Coarsen operator " << coarsop->getOperatorName() << "\nhas larger stencil width than ghost cell width" << "of `Source' patch data" << pd->mapIndexToName(src_id) << "\noperator stencil width = " << coarsop->getStencilWidth(dim) << "\n`Source' ghost width = " << sfact->getGhostCellWidth() << std::endl); } } return item_good; }
/* ******************************************************************* * Remove mutual reference between Member and the stage. * 1. Confirm that the Member is currently on the stage * (or else it is an illegal operation). * 2. Remove mutual references. * 3. Reduce the stage data size by skimming unused space * off the ends of arrays, if possible. ******************************************************************* */ void AsyncCommStage::privateDestageMember( Member* member) { if (member->hasPendingRequests()) { TBOX_ERROR("Cannot clear a Member with pending communications.\n" << "It would corrupt message passing algorithms.\n"); } #ifdef DEBUG_CHECK_ASSERTIONS assertDataConsistency(); #endif if (getMember(member->d_index_on_stage) != member) { /* * Member was not staged with this AsyncCommStage. Since staging * and destaging are private methods, there must be some logic * bug in the library. */ TBOX_ERROR("Library error: An AsyncCommStage cannot destage a Member\n" << "that was not staged with it." << std::endl); } d_members[member->d_index_on_stage] = 0; --d_member_count; /* * Remove the ends of the arrays as much as possible without * shifting arrays. (This is only possible if member is at the * end.) */ size_t min_required_len = d_members.size(); while (min_required_len > 0 && d_members[min_required_len - 1] == 0) { --min_required_len; } if (min_required_len != d_members.size()) { d_members.resize(min_required_len, 0); d_member_to_req.resize(d_members.size() + 1, size_t(MathUtilities<int>::getMax())); const size_t new_num_req = d_member_to_req[d_member_to_req.size() - 1]; d_req_to_member.resize(new_num_req, size_t(MathUtilities<int>::getMax())); d_req.resize(new_num_req, MPI_REQUEST_NULL); } member->d_nreq = member->d_index_on_stage = size_t( MathUtilities<int>::getMax()); member->d_stage = 0; member->d_handler = 0; #ifdef DEBUG_CHECK_ASSERTIONS assertDataConsistency(); #endif }
void LocationIndexRobinBcCoefs::getFromInput( const std::shared_ptr<tbox::Database>& input_db) { if (!input_db) { return; } for (int i = 0; i < 2 * d_dim.getValue(); ++i) { std::string name = "boundary_" + tbox::Utilities::intToString(i); if (input_db->isString(name)) { d_a_map[i] = 1.0; d_g_map[i] = 0.0; std::vector<std::string> specs = input_db->getStringVector(name); if (specs[0] == "value") { d_a_map[i] = 1.0; d_b_map[i] = 0.0; if (specs.size() != 2) { TBOX_ERROR("LocationIndexRobinBcCoefs::getFromInput error...\n" << "exactly 1 value needed with \"value\" boundary specifier" << std::endl); } else { d_g_map[i] = atof(specs[1].c_str()); } } else if (specs[0] == "slope") { d_a_map[i] = 0.0; d_b_map[i] = 1.0; if (specs.size() != 2) { TBOX_ERROR("LocationIndexRobinBcCoefs::getFromInput error...\n" << "exactly 1 value needed with \"slope\" boundary specifier" << std::endl); } else { d_g_map[i] = atof(specs[1].c_str()); } } else if (specs[0] == "coefficients") { if (specs.size() != 3) { TBOX_ERROR("LocationIndexRobinBcCoefs::getFromInput error...\n" << "exactly 2 values needed with \"coefficients\" boundary specifier" << std::endl); } else { d_a_map[i] = atof(specs[1].c_str()); d_b_map[i] = atof(specs[2].c_str()); } } else { TBOX_ERROR(d_object_name << ": Bad boundary specifier\n" << "'" << specs[0] << "'. Use either 'value'\n" << "'slope' or 'coefficients'.\n"); } } else { TBOX_ERROR(d_object_name << ": Missing boundary specifier.\n"); } } }
void GeneralizedIBMethod::registerEulerianVariables() { IBMethod::registerEulerianVariables(); const IntVector<NDIM> ib_ghosts = getMinimumGhostCellWidth(); const IntVector<NDIM> ghosts = 1; const IntVector<NDIM> no_ghosts = 0; Pointer<Variable<NDIM> > u_var = d_ib_solver->getVelocityVariable(); Pointer<CellVariable<NDIM, double> > u_cc_var = u_var; Pointer<SideVariable<NDIM, double> > u_sc_var = u_var; if (u_cc_var) { d_f_var = new CellVariable<NDIM, double>(d_object_name + "::f", NDIM); d_w_var = new CellVariable<NDIM, double>(d_object_name + "::w", NDIM); d_n_var = new CellVariable<NDIM, double>(d_object_name + "::n", NDIM); } else if (u_sc_var) { d_f_var = new SideVariable<NDIM, double>(d_object_name + "::f"); d_w_var = new SideVariable<NDIM, double>(d_object_name + "::w"); d_n_var = new SideVariable<NDIM, double>(d_object_name + "::n"); } else { TBOX_ERROR(d_object_name << "::registerEulerianVariables():\n" << " unsupported velocity data centering" << std::endl); } registerVariable(d_f_idx, d_f_var, no_ghosts, d_ib_solver->getScratchContext()); registerVariable(d_w_idx, d_w_var, ib_ghosts, d_ib_solver->getScratchContext()); registerVariable(d_n_idx, d_n_var, ghosts, d_ib_solver->getScratchContext()); return; } // registerEulerianVariables
int SAMRAI_MPI::Sendrecv( void* sendbuf, int sendcount, Datatype sendtype, int dest, int sendtag, void* recvbuf, int recvcount, Datatype recvtype, int source, int recvtag, Status* status) const { #ifndef HAVE_MPI NULL_USE(sendbuf); NULL_USE(sendcount); NULL_USE(sendtype); NULL_USE(dest); NULL_USE(sendtag); NULL_USE(recvbuf); NULL_USE(recvcount); NULL_USE(recvtype); NULL_USE(source); NULL_USE(recvtag); NULL_USE(status); #endif int rval = MPI_SUCCESS; if (!s_mpi_is_initialized) { TBOX_ERROR("SAMRAI_MPI::Send is a no-op without run-time MPI!"); } #ifdef HAVE_MPI else { rval = MPI_Sendrecv( sendbuf, sendcount, sendtype, dest, sendtag, recvbuf, recvcount, recvtype, source, recvtag, d_comm, status); } #endif return rval; }
void QInit::getFromInput(Pointer<Database> db) { if (db) { if (db->keyExists("X")) { db->getDoubleArray("X", d_X.data(), NDIM); } d_init_type = db->getStringWithDefault("init_type", d_init_type); if (d_init_type == "GAUSSIAN") { d_gaussian_kappa = db->getDoubleWithDefault("kappa", d_gaussian_kappa); } else if (d_init_type == "ZALESAK") { d_zalesak_r = db->getDoubleWithDefault("zalesak_r", d_zalesak_r); d_zalesak_slot_w = db->getDoubleWithDefault("zalesak_slot_w", d_zalesak_slot_w); d_zalesak_slot_l = db->getDoubleWithDefault("zalesak_slot_l", d_zalesak_slot_l); } else { TBOX_ERROR(d_object_name << "::getFromInput()\n" << " invalid initialization type " << d_init_type << "\n"); } } return; } // getFromInput
int SAMRAI_MPI::Gather( void* sendbuf, int sendcount, Datatype sendtype, void* recvbuf, int recvcount, Datatype recvtype, int root) const { #ifndef HAVE_MPI NULL_USE(sendbuf); NULL_USE(sendcount); NULL_USE(sendtype); NULL_USE(recvbuf); NULL_USE(recvcount); NULL_USE(recvtype); NULL_USE(root); #endif int rval = MPI_SUCCESS; if (!s_mpi_is_initialized) { TBOX_ERROR("SAMRAI_MPI::Gather is a no-op without run-time MPI!"); } #ifdef HAVE_MPI else { rval = MPI_Gather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, d_comm); } #endif return rval; }
int SAMRAI_MPI::Waitsome( int incount, Request* array_of_requests, int* outcount, int* array_of_indices, Status* array_of_statuses) { #ifndef HAVE_MPI NULL_USE(incount); NULL_USE(array_of_requests); NULL_USE(outcount); NULL_USE(array_of_indices); NULL_USE(array_of_statuses); #endif int rval = MPI_SUCCESS; if (!s_mpi_is_initialized) { TBOX_ERROR("SAMRAI_MPI::Waitsome is a no-op without run-time MPI!"); } #ifdef HAVE_MPI else { rval = MPI_Waitsome(incount, array_of_requests, outcount, array_of_indices, array_of_statuses); } #endif return rval; }
int SAMRAI_MPI::Allreduce( void* sendbuf, void* recvbuf, int count, Datatype datatype, Op op) const { #ifndef HAVE_MPI NULL_USE(sendbuf); NULL_USE(recvbuf); NULL_USE(count); NULL_USE(datatype); NULL_USE(op); #endif int rval = MPI_SUCCESS; if (!s_mpi_is_initialized) { TBOX_ERROR("SAMRAI_MPI::Allreduce is a no-op without run-time MPI!"); } #ifdef HAVE_MPI else { rval = MPI_Allreduce(sendbuf, recvbuf, count, datatype, op, d_comm); } #endif return rval; }
int SAMRAI_MPI::Recv( void* buf, int count, Datatype datatype, int source, int tag, Status* status) const { #ifndef HAVE_MPI NULL_USE(buf); NULL_USE(count); NULL_USE(datatype); NULL_USE(source); NULL_USE(tag); NULL_USE(status); #endif int rval = MPI_SUCCESS; if (!s_mpi_is_initialized) { TBOX_ERROR("SAMRAI_MPI::Recv is a no-op without run-time MPI!"); } #ifdef HAVE_MPI else { rval = MPI_Recv(buf, count, datatype, source, tag, d_comm, status); } #endif return rval; }
double Wall::applyForce(double wall_distance, double /*eval_time*/) { // apply forces from this wall. For now time dependence is not implemented. int sgn = (wall_distance > 0.0) ? 1 : ((wall_distance < 0.0) ? -1 : 0); if(sgn == 0) { TBOX_ERROR ("Particle is on the wall, must be inside the domain"); return 0.0; } else { // make sure we don't apply forces to particles that moved out of the // wall area, but haven't been regridded yet. if (abs(wall_distance) < d_force_distance) { return sgn*(d_wall_force_fcn)(abs(wall_distance),d_parameters); } else { return 0.0; } } } // applyForce
PetscErrorCode VecNorm_local_SAMRAI(Vec x, NormType type, PetscScalar* val) { IBTK_TIMER_START(t_vec_norm_local); #if !defined(NDEBUG) TBOX_ASSERT(x); #endif static const bool local_only = true; if (type == NORM_1) { *val = NormOps::L1Norm(PSVR_CAST2(x), local_only); } else if (type == NORM_2) { *val = NormOps::L2Norm(PSVR_CAST2(x), local_only); } else if (type == NORM_INFINITY) { *val = NormOps::maxNorm(PSVR_CAST2(x), local_only); } else if (type == NORM_1_AND_2) { val[0] = NormOps::L1Norm(PSVR_CAST2(x), local_only); val[1] = NormOps::L2Norm(PSVR_CAST2(x), local_only); } else { TBOX_ERROR("PETScSAMRAIVectorReal::norm()\n" << " vector norm type " << static_cast<int>(type) << " unsupported" << std::endl); } IBTK_TIMER_STOP(t_vec_norm_local); PetscFunctionReturn(0); } // VecNorm_local
int SAMRAI_MPI::Isend( void* buf, int count, Datatype datatype, int dest, int tag, Request* req) const { #ifndef HAVE_MPI NULL_USE(buf); NULL_USE(count); NULL_USE(datatype); NULL_USE(dest); NULL_USE(tag); NULL_USE(req); #endif int rval = MPI_SUCCESS; if (!s_mpi_is_initialized) { TBOX_ERROR("SAMRAI_MPI::Isend is a no-op without run-time MPI!"); } #ifdef HAVE_MPI else { rval = MPI_Isend(buf, count, datatype, dest, tag, d_comm, req); } #endif return rval; }
void CartCellRobinPhysBdryOp::fillGhostCellValuesCodim2( const int patch_data_idx, const Array<BoundaryBox<NDIM> >& physical_codim2_boxes, const IntVector<NDIM>& ghost_width_to_fill, const Patch<NDIM>& patch, const bool adjoint_op) { const int n_physical_codim2_boxes = physical_codim2_boxes.size(); if (n_physical_codim2_boxes == 0) return; const Box<NDIM>& patch_box = patch.getBox(); Pointer<CartesianPatchGeometry<NDIM> > pgeom = patch.getPatchGeometry(); Pointer<CellData<NDIM, double> > patch_data = patch.getPatchData(patch_data_idx); const int patch_data_depth = patch_data->getDepth(); const int patch_data_gcw = (patch_data->getGhostCellWidth()).max(); #if !defined(NDEBUG) if (patch_data_gcw != (patch_data->getGhostCellWidth()).min()) { TBOX_ERROR( "CartCellRobinPhysBdryOp::fillGhostCellValuesCodim2():\n" " patch data for patch data index " << patch_data_idx << " does not have uniform ghost cell widths." << std::endl); } #endif const IntVector<NDIM> gcw_to_fill = IntVector<NDIM>::min(patch_data->getGhostCellWidth(), ghost_width_to_fill); for (int n = 0; n < n_physical_codim2_boxes; ++n) { const BoundaryBox<NDIM>& bdry_box = physical_codim2_boxes[n]; const unsigned int location_index = bdry_box.getLocationIndex(); const Box<NDIM> bc_fill_box = pgeom->getBoundaryFillBox(bdry_box, patch_box, gcw_to_fill); for (int d = 0; d < patch_data_depth; ++d) { CC_ROBIN_PHYS_BDRY_OP_2_FC(patch_data->getPointer(d), patch_data_gcw, location_index, patch_box.lower(0), patch_box.upper(0), patch_box.lower(1), patch_box.upper(1), #if (NDIM == 3) patch_box.lower(2), patch_box.upper(2), #endif bc_fill_box.lower(0), bc_fill_box.upper(0), bc_fill_box.lower(1), bc_fill_box.upper(1), #if (NDIM == 3) bc_fill_box.lower(2), bc_fill_box.upper(2), #endif adjoint_op ? 1 : 0); } } return; } // fillGhostCellValuesCodim2
void BoundaryDataTester::readBoundaryDataStateEntry( std::shared_ptr<tbox::Database> db, string& db_name, int bdry_location_index) { TBOX_ASSERT(db); TBOX_ASSERT(!db_name.empty()); TBOX_ASSERT(d_variable_bc_values.size() == d_variable_name.size()); for (int iv = 0; iv < static_cast<int>(d_variable_name.size()); ++iv) { #ifdef DEBUG_CHECK_ASSERTIONS if (d_dim == tbox::Dimension(2)) { TBOX_ASSERT(static_cast<int>(d_variable_bc_values[iv].size()) == NUM_2D_EDGES * d_variable_depth[iv]); } if (d_dim == tbox::Dimension(3)) { TBOX_ASSERT(static_cast<int>(d_variable_bc_values[iv].size()) == NUM_3D_FACES * d_variable_depth[iv]); } #endif if (db->keyExists(d_variable_name[iv])) { int depth = d_variable_depth[iv]; std::vector<double> tmp_val = db->getDoubleVector(d_variable_name[iv]); if (static_cast<int>(tmp_val.size()) < depth) { TBOX_ERROR(d_object_name << ": " << "Insufficient number of " << d_variable_name[iv] << " values given in " << db_name << " input database." << endl); } for (int id = 0; id < depth; ++id) { d_variable_bc_values[iv][bdry_location_index * depth + id] = tmp_val[id]; } } else { TBOX_ERROR(d_object_name << ": " << d_variable_name[iv] << " entry missing from " << db_name << " input database. " << endl); } } }
StaggeredStokesOpenBoundaryStabilizer::StaggeredStokesOpenBoundaryStabilizer( const std::string& object_name, Pointer<Database> input_db, const INSHierarchyIntegrator* fluid_solver, Pointer<CartesianGridGeometry<NDIM> > grid_geometry) : CartGridFunction(object_name), d_open_bdry(array_constant<bool, 2 * NDIM>(false)), d_inflow_bdry(array_constant<bool, 2 * NDIM>(false)), d_outflow_bdry(array_constant<bool, 2 * NDIM>(false)), d_width(array_constant<double, 2 * NDIM>(0.0)), d_fluid_solver(fluid_solver), d_grid_geometry(grid_geometry) { if (input_db) { for (unsigned int location_index = 0; location_index < 2 * NDIM; ++location_index) { std::ostringstream stabilization_type_stream; stabilization_type_stream << "stabilization_type_" << location_index; const std::string stabilization_type_key = stabilization_type_stream.str(); if (input_db->keyExists(stabilization_type_key)) { const std::string stabilization_type = input_db->getString(stabilization_type_key); if (stabilization_type == "INFLOW") { d_open_bdry[location_index] = true; d_inflow_bdry[location_index] = true; d_outflow_bdry[location_index] = false; } else if (stabilization_type == "OUTFLOW") { d_open_bdry[location_index] = true; d_inflow_bdry[location_index] = false; d_outflow_bdry[location_index] = true; } else if (stabilization_type != "NONE") { TBOX_ERROR( "StaggeredStokesOpenBoundaryStabilizer::" "StaggeredStokesOpenBoundaryStabilizer():\n" << " unsupported stabilization type: ``" << stabilization_type << "''\n" << " supported values are: ``INFLOW'', ``OUTFLOW'', or ``NONE''\n"); } } std::ostringstream width_stream; width_stream << "width_" << location_index; const std::string width_key = width_stream.str(); if (input_db->keyExists(width_key)) { d_width[location_index] = input_db->getDouble(width_key); } } } return; } // StaggeredStokesOpenBoundaryStabilizer
void GeneralizedIBMethod::computeLagrangianForce(const double data_time) { IBMethod::computeLagrangianForce(data_time); const int coarsest_ln = 0; const int finest_ln = d_hierarchy->getFinestLevelNumber(); int ierr; std::vector<Pointer<LData> >* F_data = NULL; std::vector<Pointer<LData> >* N_data = NULL; std::vector<Pointer<LData> >* X_data = NULL; std::vector<Pointer<LData> >* D_data = NULL; if (MathUtilities<double>::equalEps(data_time, d_current_time)) { d_F_current_needs_ghost_fill = true; d_N_current_needs_ghost_fill = true; F_data = &d_F_current_data; N_data = &d_N_current_data; X_data = &d_X_current_data; D_data = &d_D_current_data; } else if (MathUtilities<double>::equalEps(data_time, d_half_time)) { TBOX_ERROR(d_object_name << "::computeLagrangianForce():\n" << " time-stepping type MIDPOINT_RULE not supported by " "class GeneralizedIBMethod;\n" << " use TRAPEZOIDAL_RULE instead.\n"); } else if (MathUtilities<double>::equalEps(data_time, d_new_time)) { d_F_new_needs_ghost_fill = true; d_N_new_needs_ghost_fill = true; F_data = &d_F_new_data; N_data = &d_N_new_data; X_data = &d_X_new_data; D_data = &d_D_new_data; } for (int ln = coarsest_ln; ln <= finest_ln; ++ln) { if (!d_l_data_manager->levelContainsLagrangianData(ln)) continue; ierr = VecSet((*N_data)[ln]->getVec(), 0.0); IBTK_CHKERRQ(ierr); if (d_ib_force_and_torque_fcn) { d_ib_force_and_torque_fcn->computeLagrangianForceAndTorque((*F_data)[ln], (*N_data)[ln], (*X_data)[ln], (*D_data)[ln], d_hierarchy, ln, data_time, d_l_data_manager); } } resetAnchorPointValues(*F_data, coarsest_ln, finest_ln); resetAnchorPointValues(*N_data, coarsest_ln, finest_ln); return; } // computeLagrangianForce
void RobinPhysBdryPatchStrategy::accumulateFromPhysicalBoundaryData( Patch<NDIM>& /*patch*/, double /*fill_time*/, const IntVector<NDIM>& /*ghost_width_to_fill*/) { TBOX_ERROR( "RobinPhysBdryPatchStrategy::accumulateFromPhysicalBoundaryData(): unimplemented\n"); return; } // accumulateFromPhysicalBoundaryData
void IBImplicitStaggeredHierarchyIntegrator::preprocessIntegrateHierarchy(const double current_time, const double new_time, const int num_cycles) { IBHierarchyIntegrator::preprocessIntegrateHierarchy(current_time, new_time, num_cycles); const int coarsest_ln = 0; const int finest_ln = d_hierarchy->getFinestLevelNumber(); TBOX_ASSERT(d_time_stepping_type == MIDPOINT_RULE); // Allocate Eulerian scratch and new data. for (int ln = coarsest_ln; ln <= finest_ln; ++ln) { Pointer<PatchLevel<NDIM> > level = d_hierarchy->getPatchLevel(ln); level->allocatePatchData(d_u_idx, current_time); level->allocatePatchData(d_f_idx, current_time); level->allocatePatchData(d_scratch_data, current_time); level->allocatePatchData(d_new_data, new_time); } // Initialize IB data. d_ib_implicit_ops->preprocessIntegrateData(current_time, new_time, num_cycles); // Initialize the fluid solver. const int ins_num_cycles = d_ins_hier_integrator->getNumberOfCycles(); if (ins_num_cycles != d_current_num_cycles && d_current_num_cycles != 1) { TBOX_ERROR(d_object_name << "::preprocessIntegrateHierarchy():\n" << " attempting to perform " << d_current_num_cycles << " cycles of fixed point iteration.\n" << " number of cycles required by Navier-Stokes solver = " << ins_num_cycles << ".\n" << " current implementation requires either that both solvers " "use the same number of cycles,\n" << " or that the IB solver use only a single cycle.\n"); } d_ins_hier_integrator->preprocessIntegrateHierarchy( current_time, new_time, ins_num_cycles); // Compute an initial prediction of the updated positions of the Lagrangian // structure. // // NOTE: The velocity should already have been interpolated to the // curvilinear mesh and should not need to be re-interpolated. if (d_enable_logging) plog << d_object_name << "::preprocessIntegrateHierarchy(): performing Lagrangian forward Euler step\n"; d_ib_implicit_ops->eulerStep(current_time, new_time); // Execute any registered callbacks. executePreprocessIntegrateHierarchyCallbackFcns(current_time, new_time, num_cycles); return; } // preprocessIntegrateHierarchy
void GeneralizedIBMethod::midpointStep(const double /*current_time*/, const double /*new_time*/) { TBOX_ERROR( d_object_name << "::midpointStep():\n" << " time-stepping type MIDPOINT_RULE not supported by class GeneralizedIBMethod;\n" << " use TRAPEZOIDAL_RULE instead.\n"); return; } // midpointStep
AsyncCommStage::Member::~Member() { if (hasPendingRequests()) { TBOX_ERROR("Cannot deallocate a Member with pending communications.\n" << "It would corrupt message passing algorithms.\n"); } if (d_stage != 0) { d_stage->privateDestageMember(this); } d_handler = 0; }
void AsyncCommStage::Member::pushToCompletionQueue() { if (!isDone()) { TBOX_ERROR("AsyncCommStage::Member::pushToCompletionQueue error:\n" << "This method may not be called by Members that have not\n" << "completed their operation (and returns true from isDone()." << std::endl); } d_stage->privatePushToCompletionQueue(*this); }
void FACPreconditioner::setInitialGuessNonzero( bool initial_guess_nonzero) { if (initial_guess_nonzero) { TBOX_ERROR(d_object_name << "::setInitialGuessNonzero()\n" << " class IBTK::FACPreconditioner requires a zero initial guess" << std::endl); } return; }// setInitialGuessNonzero
void BGaussSeidelPreconditioner::setMaxIterations(int max_iterations) { if (max_iterations > 1) { TBOX_ERROR(d_object_name << "::setMaxIterations()\n" << " class IBTK::BGaussSeidelPreconditioner requires max_iterations == 1" << std::endl); } d_max_iterations = max_iterations; return; } // setMaxIterations
void FACPreconditioner::setMaxIterations( int max_iterations) { if (max_iterations != 1) { TBOX_ERROR(d_object_name << "::setMaxIterations()\n" << " class IBTK::FACPreconditioner only performs a single iteration" << std::endl); } return; }// setMaxIterations