/** Search tags by substring. Find all tags is substring is null. Returns a list sorted by hierarchy first, alphabet next. */ QList<QString> tags_repository::search_substring(QString substring) { typedef QPair<int,QString> tag_t; // depth and full name QList<tag_t*> res; for (std::map<int,message_tag>::const_iterator it= m_tags_map.begin(); it != m_tags_map.end(); ++it) { QString fulln = hierarchy(it->second.id(), "->"); // DBG_PRINTF(3, "fulln=%s", fulln.toLocal8Bit().constData()); if (substring.isNull() || fulln.contains(substring, Qt::CaseInsensitive)) { // DBG_PRINTF(3, "tag found id=%d", it->second.id()); tag_t* t = new tag_t(depth(it->second.id()), fulln); res.append(t); } } /* sort by hierarchy (closer to top comes first), then alphabetically */ qSort(res.begin(), res.end(), [](const tag_t* a, const tag_t* b) -> bool { return (a->first == b->first) ? (a->second < b->second) : (a->first < b->first); }); /* extract names only */ QList<QString> res1; for (QList<tag_t*>::iterator it = res.begin(); it != res.end(); ++it) { res1.append((*it)->second); } qDeleteAll(res); return res1; }
int main() { int retval; test_batch_runner *runner = test_batch_runner_new(); version(runner); constructor(runner); accessors(runner); node_check(runner); iterator(runner); iterator_delete(runner); create_tree(runner); hierarchy(runner); parser(runner); render_html(runner); utf8(runner); line_endings(runner); numeric_entities(runner); test_cplusplus(runner); test_print_summary(runner); retval = test_ok(runner) ? 0 : 1; free(runner); return retval; }
Try<Nothing> extendLifetime(pid_t child) { if (!systemd::exists()) { return Error("systemd does not exist on this system"); } if (!systemd::enabled()) { return Error("systemd is not enabled on this system"); } Try<Nothing> assign = cgroups::assign( hierarchy(), systemd::mesos::MESOS_EXECUTORS_SLICE, child); if (assign.isError()) { LOG(ERROR) << "Failed to assign process " << child << " to its systemd executor slice: " << assign.error(); ::kill(child, SIGKILL); return Error("Failed to contain process on systemd"); } LOG(INFO) << "Assigned child process '" << child << "' to '" << systemd::mesos::MESOS_EXECUTORS_SLICE << "'"; return Nothing(); }
void MyMoneyTemplate::hierarchy(QMap<QString, QTreeWidgetItem*>& list) { bool rc = !m_accounts.isNull(); QDomNode accounts = m_accounts; while (rc == true && !accounts.isNull() && accounts.isElement()) { QDomElement childElement = accounts.toElement(); if (childElement.tagName() == "account" && childElement.attribute("name").isEmpty()) { switch (childElement.attribute("type").toUInt()) { case MyMoneyAccount::Asset: list[i18n("Asset")] = 0; rc = hierarchy(list, i18n("Asset"), childElement.firstChild()); break; case MyMoneyAccount::Liability: list[i18n("Liability")] = 0; rc = hierarchy(list, i18n("Liability"), childElement.firstChild()); break; case MyMoneyAccount::Income: list[i18n("Income")] = 0; rc = hierarchy(list, i18n("Income"), childElement.firstChild()); break; case MyMoneyAccount::Expense: list[i18n("Expense")] = 0; rc = hierarchy(list, i18n("Expense"), childElement.firstChild()); break; case MyMoneyAccount::Equity: list[i18n("Equity")] = 0; rc = hierarchy(list, i18n("Equity"), childElement.firstChild()); break; default: rc = false; break; } } else { rc = false; } accounts = accounts.nextSibling(); } }
// FillHole - plug up a dynamically punched authorization hole // bool IpVerify::FillHole(DCpermission perm, MyString& id) { HolePunchTable_t* table = PunchedHoleArray[perm]; if (table == NULL) { return false; } int count; if (table->lookup(id, count) == -1) { return false; } if (table->remove(id) == -1) { EXCEPT("IpVerify::FillHole: table entry removal error"); } count--; if (count != 0) { if (table->insert(id, count) == -1) { EXCEPT("IpVerify::FillHole: " "table entry insertion error"); } } if (count == 0) { dprintf(D_SECURITY, "IpVerify::FillHole: " "removed %s-level opening for %s\n", PermString(perm), id.Value()); } else { dprintf(D_SECURITY, "IpVerify::FillHole: " "open count at level %s for %s now %d\n", PermString(perm), id.Value(), count); } DCpermissionHierarchy hierarchy( perm ); DCpermission const *implied_perms=hierarchy.getImpliedPerms(); for(; implied_perms[0] != LAST_PERM; implied_perms++ ) { if( perm != implied_perms[0] ) { FillHole(implied_perms[0],id); } } return true; }
// PunchHole - dynamically opens up a perm level to the // given user / IP. The hole can be removed with FillHole. // Additions persist across a reconfig. This is intended // for transient permissions (like to automatic permission // granted to a remote startd host when a shadow starts up.) // bool IpVerify::PunchHole(DCpermission perm, MyString& id) { int count = 0; if (PunchedHoleArray[perm] == NULL) { PunchedHoleArray[perm] = new HolePunchTable_t(compute_host_hash); ASSERT(PunchedHoleArray[perm] != NULL); } else { int c; if (PunchedHoleArray[perm]->lookup(id, c) != -1) { count = c; if (PunchedHoleArray[perm]->remove(id) == -1) { EXCEPT("IpVerify::PunchHole: " "table entry removal error"); } } } count++; if (PunchedHoleArray[perm]->insert(id, count) == -1) { EXCEPT("IpVerify::PunchHole: table entry insertion error"); } if (count == 1) { dprintf(D_SECURITY, "IpVerify::PunchHole: opened %s level to %s\n", PermString(perm), id.Value()); } else { dprintf(D_SECURITY, "IpVerify::PunchHole: " "open count at level %s for %s now %d\n", PermString(perm), id.Value(), count); } DCpermissionHierarchy hierarchy( perm ); DCpermission const *implied_perms=hierarchy.getImpliedPerms(); for(; implied_perms[0] != LAST_PERM; implied_perms++ ) { if( perm != implied_perms[0] ) { PunchHole(implied_perms[0],id); } } return true; }
bool MyMoneyTemplate::hierarchy(QMap<QString, QTreeWidgetItem*>& list, const QString& parent, QDomNode account) { bool rc = true; while (rc == true && !account.isNull()) { if (account.isElement()) { QDomElement accountElement = account.toElement(); if (accountElement.tagName() == "account") { QString name = QString("%1:%2").arg(parent).arg(accountElement.attribute("name")); list[name] = 0; hierarchy(list, name, account.firstChild()); } } account = account.nextSibling(); } return rc; }
void init_variable_hierarchy(const std::string &var_name, Varset &variable_hierarchy) { // an empty class const char *python_class = "OBJ"; int last_pos = -1; std::string hierarchy; while (true) { size_t pos = var_name.find_first_of('.', last_pos + 1); int next_pos = (pos == std::string::npos ? var_name.length() : (int) pos); // path up to this point ("a", then "a.b", then "a.b.c", etc.) std::string hierarchy(var_name, 0, next_pos); size_t bracket_pos = hierarchy.find_first_of('[', last_pos + 1); bool is_array = (bracket_pos != std::string::npos); if (is_array) { std::string upto_bracket(var_name, 0, bracket_pos + 1); // include final "[" Varset::iterator i = variable_hierarchy.find(upto_bracket); if (i == variable_hierarchy.end()) { if (variable_hierarchy.size() == 0) { define_python_class(python_class); } variable_hierarchy.insert(upto_bracket); std::string before_bracket(var_name, 0, bracket_pos); // exclude final "[" std::cout << before_bracket << "={}\n"; } } if (next_pos == (int) var_name.length()) { break; } Varset::iterator i = variable_hierarchy.find(hierarchy); if (i == variable_hierarchy.end()) { if (variable_hierarchy.size() == 0) { define_python_class(python_class); } variable_hierarchy.insert(hierarchy); std::cout << hierarchy << "=" << python_class << "()\n"; } last_pos = next_pos; } }
void Ontology::normalize() { hierarchy.closure(); //reduce transitivity for universals set<const UniversalConcept *> s; FOREACH(u, positive_universals) FOREACH(i, transitive_roles) if (hierarchy(*i, (*u)->role()->ID())) { const Role *t = factory.role(*i); const UniversalConcept *c = factory.universal(t, factory.universal(t, (*u)->concept())); if (t != (*u)->role()) unary(Concept::concept_decompose(*u), Disjunction(Concept::concept_decompose(c))); s.insert(c); } FOREACH(u, s) { unary(Concept::concept_decompose((*u)->concept()), Disjunction(Concept::concept_decompose(*u))); positive_universals.insert(*u); positive_universals.insert((const UniversalConcept *) (*u)->concept()); }
/* ************************************************************************* * * Set initial conditions for CVODE solver * ************************************************************************* */ void CVODEModel::setInitialConditions( SundialsAbstractVector* soln_init) { std::shared_ptr<SAMRAIVectorReal<double> > soln_init_samvect( Sundials_SAMRAIVector::getSAMRAIVector(soln_init)); std::shared_ptr<PatchHierarchy> hierarchy( soln_init_samvect->getPatchHierarchy()); for (int ln = 0; ln < hierarchy->getNumberOfLevels(); ++ln) { std::shared_ptr<PatchLevel> level(hierarchy->getPatchLevel(ln)); for (int cn = 0; cn < soln_init_samvect->getNumberOfComponents(); ++cn) { for (PatchLevel::iterator p(level->begin()); p != level->end(); ++p) { const std::shared_ptr<Patch>& patch = *p; /* * Set initial conditions for y */ std::shared_ptr<CellData<double> > y_init( SAMRAI_SHARED_PTR_CAST<CellData<double>, PatchData>( soln_init_samvect->getComponentPatchData(cn, *patch))); TBOX_ASSERT(y_init); y_init->fillAll(d_initial_value); /* * Set initial diffusion coeff values. * NOTE: in a "real" application, the diffusion coefficient is * some function of y. Here, we just do a simple minded * approach and set it to 1. */ std::shared_ptr<SideData<double> > diffusion( SAMRAI_SHARED_PTR_CAST<SideData<double>, PatchData>( patch->getPatchData(d_diff_id))); TBOX_ASSERT(diffusion); diffusion->fillAll(1.0); } } } }
/* Return the name of the tag including its parent hierarchy */ QString tags_repository::hierarchy(int id, QString sep) { if (!m_tags_map_fetched) { fetch(); } std::map<int,message_tag>::const_iterator i; i = m_tags_map.find(id); if (i == m_tags_map.end()) return ""; QString s; int parent_id = i->second.parent_id(); if (parent_id) { s = hierarchy(parent_id, sep); s.append(sep); s.append(i->second.name()); } else s = i->second.name(); return s; }
QString EvObjectInfo::hierarchicalName() const { return hierarchy().join('.'); }
void BJson::_Parse(BMessage& message, BString& JSON) { BMessageBuilder builder(message); int32 pos = 0; int32 length = JSON.Length(); /* Locals used by the parser. */ // Keeps track of the hierarchy (e.g. "{[{{") that has // been read in. Allows the parser to verify that openbraces // match up to closebraces and so on and so forth. BString hierarchy(""); // Stores the key that was just read by the string parser, // in the case that we are parsing a map. BString key(""); // TODO: Check builder return codes and throw exception, or // change builder implementation/interface to throw exceptions // instead of returning errors. // TODO: Elimitate more duplicated code, for example by moving // more code into _ParseConstant(). while (pos < length) { switch (JSON[pos]) { case '{': hierarchy += "{"; if (hierarchy != "{") { if (builder.What() == JSON_TYPE_ARRAY) builder.PushObject(builder.CountNames()); else { builder.PushObject(key.String()); key = ""; } } builder.SetWhat(JSON_TYPE_MAP); break; case '}': if (hierarchy.EndsWith("{") && hierarchy.Length() != 1) { hierarchy.Truncate(hierarchy.Length() - 1); builder.PopObject(); } else if (hierarchy.Length() == 1) return; // End of the JSON data else throw ParseException(pos, "Unmatched closebrace }"); break; case '[': hierarchy += "["; if (builder.What() == JSON_TYPE_ARRAY) builder.PushObject(builder.CountNames()); else { builder.PushObject(key.String()); key = ""; } builder.SetWhat(JSON_TYPE_ARRAY); break; case ']': if (hierarchy.EndsWith("[")) { hierarchy.Truncate(hierarchy.Length() - 1); builder.PopObject(); } else { BString error("Unmatched closebrace ] hierarchy: "); error << hierarchy; throw ParseException(pos, error); } break; case 't': { if (builder.What() != JSON_TYPE_ARRAY && key.Length() == 0) { throw ParseException(pos, "'true' cannot be a key, it can only be a value"); } if (_ParseConstant(JSON, pos, "true")) { if (builder.What() == JSON_TYPE_ARRAY) key.SetToFormat("%" B_PRIu32, builder.CountNames()); builder.AddBool(key.String(), true); key = ""; } else throw ParseException(pos, "Unexpected 't'"); break; } case 'f': { if (builder.What() != JSON_TYPE_ARRAY && key.Length() == 0) { throw ParseException(pos, "'false' cannot be a key, it can only be a value"); } if (_ParseConstant(JSON, pos, "false")) { if (builder.What() == JSON_TYPE_ARRAY) key.SetToFormat("%" B_PRIu32, builder.CountNames()); builder.AddBool(key.String(), false); key = ""; } else throw ParseException(pos, "Unexpected 'f'"); break; } case 'n': { if (builder.What() != JSON_TYPE_ARRAY && key.Length() == 0) { throw ParseException(pos, "'null' cannot be a key, it can only be a value"); } if (_ParseConstant(JSON, pos, "null")) { if (builder.What() == JSON_TYPE_ARRAY) key.SetToFormat("%" B_PRIu32, builder.CountNames()); builder.AddPointer(key.String(), (void*)NULL); key = ""; } else throw ParseException(pos, "Unexpected 'n'"); break; } case '"': if (builder.What() != JSON_TYPE_ARRAY && key.Length() == 0) key = _ParseString(JSON, pos); else if (builder.What() != JSON_TYPE_ARRAY && key.Length() > 0) { builder.AddString(key, _ParseString(JSON, pos)); key = ""; } else if (builder.What() == JSON_TYPE_ARRAY) { key << builder.CountNames(); builder.AddString(key, _ParseString(JSON, pos)); key = ""; } else throw ParseException(pos, "Internal error at encountering \""); break; case '+': case '-': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { if (builder.What() != JSON_TYPE_ARRAY && key.Length() == 0) { throw ParseException(pos, "Numbers cannot be keys, they can only be values"); } if (builder.What() == JSON_TYPE_ARRAY) key << builder.CountNames(); double number = _ParseNumber(JSON, pos); builder.AddDouble(key.String(), number); key = ""; break; } case ':': case ',': default: // No need to do anything here. break; } pos++; } throw ParseException(pos, "Unexpected end of document"); }
int CVODEModel::CVSpgmrPrecondSolve( double t, SundialsAbstractVector* y, SundialsAbstractVector* fy, SundialsAbstractVector* r, SundialsAbstractVector* z, double gamma, double delta, int lr, SundialsAbstractVector* vtemp) { NULL_USE(y); NULL_USE(fy); NULL_USE(vtemp); #ifndef USE_FAC_PRECONDITIONER NULL_USE(gamma); #endif NULL_USE(delta); NULL_USE(lr); #ifdef USE_FAC_PRECONDITIONER /* * Convert passed-in CVODE vectors into SAMRAI vectors */ std::shared_ptr<SAMRAIVectorReal<double> > r_samvect( Sundials_SAMRAIVector::getSAMRAIVector(r)); std::shared_ptr<SAMRAIVectorReal<double> > z_samvect( Sundials_SAMRAIVector::getSAMRAIVector(z)); int ret_val = 0; std::shared_ptr<PatchHierarchy> hierarchy( r_samvect->getPatchHierarchy()); int r_indx = r_samvect->getComponentDescriptorIndex(0); int z_indx = z_samvect->getComponentDescriptorIndex(0); /****************************************************************** * * We need to supply to the FAC solver a "version" of the z vector * that contains ghost cells. The operations below allocate * on the patches a scratch context of the solution vector z and * fill it with z vector data * *****************************************************************/ /* * Construct a communication schedule which will fill ghosts of * soln_scratch with z vector data (z -> soln_scratch). */ RefineAlgorithm fill_z_vector_bounds; std::shared_ptr<RefineOperator> refine_op(d_grid_geometry-> lookupRefineOperator(d_soln_var, "CONSERVATIVE_LINEAR_REFINE")); fill_z_vector_bounds.registerRefine(d_soln_scr_id, z_indx, d_soln_scr_id, refine_op); /* * Set initial guess for z (if applicable) and copy z data into the * solution scratch context. */ int ln; for (ln = hierarchy->getFinestLevelNumber(); ln >= 0; --ln) { std::shared_ptr<PatchLevel> level(hierarchy->getPatchLevel(ln)); if (!level->checkAllocated(d_soln_scr_id)) { level->allocatePatchData(d_soln_scr_id); } for (PatchLevel::iterator p(level->begin()); p != level->end(); ++p) { const std::shared_ptr<Patch>& patch = *p; std::shared_ptr<CellData<double> > z_data( SAMRAI_SHARED_PTR_CAST<CellData<double>, PatchData>( patch->getPatchData(z_indx))); TBOX_ASSERT(z_data); /* * Set initial guess for z here. */ z_data->fillAll(0.); /* * Scale RHS by 1/gamma */ PatchCellDataOpsReal<double> math_ops; std::shared_ptr<CellData<double> > r_data( SAMRAI_SHARED_PTR_CAST<CellData<double>, PatchData>( patch->getPatchData(r_indx))); TBOX_ASSERT(r_data); math_ops.scale(r_data, 1.0 / gamma, r_data, r_data->getBox()); /* * Copy interior data from z vector to soln_scratch */ std::shared_ptr<CellData<double> > z_scr_data( SAMRAI_SHARED_PTR_CAST<CellData<double>, PatchData>( patch->getPatchData(d_soln_scr_id))); TBOX_ASSERT(z_scr_data); z_scr_data->copy(*z_data); } /* * Fill ghost boundaries of soln_scratch. * Construct a schedule for each level, from the algorithm * constructed above. */ std::shared_ptr<RefineSchedule> fill_z_vector_bounds_sched( fill_z_vector_bounds.createSchedule(level, ln - 1, hierarchy, this)); fill_z_vector_bounds_sched->fillData(t); } /****************************************************************** * * Apply the FAC solver. It solves the system Az=r with the * format "solveSystem(z, r)". A was constructed in the precondSetup() * method. * ******************************************************************/ if (d_print_solver_info) { pout << "\t\tBefore FAC Solve (Az=r): " << "\n \t\t\tz_l2norm = " << z_samvect->L2Norm() << "\n \t\t\tz_maxnorm = " << z_samvect->maxNorm() << "\n \t\t\tr_l2norm = " << r_samvect->L2Norm() << "\n \t\t\tr_maxnorm = " << r_samvect->maxNorm() << endl; } /* * Set paramemters in the FAC solver. It solves the system Az=r. * Here we supply the max norm of r in order to scale the * residual (i.e. residual = Az - r) to properly scale the convergence * error. */ const int coarsest_solve_ln = 0; const int finest_solve_ln = 0; /* * Note: I don't know why we are only solving on level 0 here. * When upgrading to the new FAC solver from the old, I noticed * that the old solver only solved on level 0. BTNG. */ bool converge = d_FAC_solver->solveSystem(d_soln_scr_id, r_indx, hierarchy, coarsest_solve_ln, finest_solve_ln); if (d_print_solver_info) { double avg_convergence, final_convergence; d_FAC_solver->getConvergenceFactors(avg_convergence, final_convergence); pout << " \t\t\tFinal Residual Norm: " << d_FAC_solver->getResidualNorm() << endl; pout << " \t\t\tFinal Convergence Error: " << final_convergence << endl; pout << " \t\t\tFinal Convergence Rate: " << avg_convergence << endl; } /****************************************************************** * * The FAC solver has computed a solution to z but it is stored * in the soln_scratch data space. Copy it from soln_scratch back * into the z vector. * ******************************************************************/ for (ln = hierarchy->getFinestLevelNumber(); ln >= 0; --ln) { std::shared_ptr<PatchLevel> level(hierarchy->getPatchLevel(ln)); for (PatchLevel::iterator p(level->begin()); p != level->end(); ++p) { const std::shared_ptr<Patch>& patch = *p; std::shared_ptr<CellData<double> > soln_scratch( SAMRAI_SHARED_PTR_CAST<CellData<double>, PatchData>( patch->getPatchData(d_soln_scr_id))); std::shared_ptr<CellData<double> > z( SAMRAI_SHARED_PTR_CAST<CellData<double>, PatchData>( patch->getPatchData(z_indx))); TBOX_ASSERT(soln_scratch); TBOX_ASSERT(z); z->copy(*soln_scratch); } } if (d_print_solver_info) { double avg_convergence, final_convergence; d_FAC_solver->getConvergenceFactors(avg_convergence, final_convergence); pout << "\t\tAfter FAC Solve (Az=r): " << "\n \t\t\tz_l2norm = " << z_samvect->L2Norm() << "\n \t\t\tz_maxnorm = " << z_samvect->maxNorm() << "\n \t\t\tResidual Norm: " << d_FAC_solver->getResidualNorm() << "\n \t\t\tConvergence Error: " << final_convergence << endl; } if (converge != true) { ret_val = 1; } /* * Increment counter for number of precond solves */ ++d_number_precond_solve; return ret_val; #else return 0; #endif }
int CVODEModel::CVSpgmrPrecondSet( double t, SundialsAbstractVector* y, SundialsAbstractVector* fy, int jok, int* jcurPtr, double gamma, SundialsAbstractVector* vtemp1, SundialsAbstractVector* vtemp2, SundialsAbstractVector* vtemp3) { #ifndef USE_FAC_PRECONDITIONER NULL_USE(t); NULL_USE(y); NULL_USE(gamma); #endif NULL_USE(fy); NULL_USE(jok); NULL_USE(jcurPtr); NULL_USE(vtemp1); NULL_USE(vtemp2); NULL_USE(vtemp3); #ifdef USE_FAC_PRECONDITIONER /* * Convert passed-in CVODE vectors into SAMRAI vectors */ std::shared_ptr<SAMRAIVectorReal<double> > y_samvect( Sundials_SAMRAIVector::getSAMRAIVector(y)); std::shared_ptr<PatchHierarchy> hierarchy( y_samvect->getPatchHierarchy()); int y_indx = y_samvect->getComponentDescriptorIndex(0); /* * Construct refine algorithm to fill boundaries of solution vector */ RefineAlgorithm fill_soln_vector_bounds; std::shared_ptr<RefineOperator> refine_op(d_grid_geometry-> lookupRefineOperator(d_soln_var, "CONSERVATIVE_LINEAR_REFINE")); fill_soln_vector_bounds.registerRefine(d_soln_scr_id, y_samvect->getComponentDescriptorIndex(0), d_soln_scr_id, refine_op); /* * Construct coarsen algorithm to fill interiors on coarser levels * with solution on finer level. */ CoarsenAlgorithm fill_soln_interior_on_coarser(d_dim); std::shared_ptr<CoarsenOperator> coarsen_op(d_grid_geometry-> lookupCoarsenOperator(d_soln_var, "CONSERVATIVE_COARSEN")); fill_soln_interior_on_coarser.registerCoarsen(y_indx, y_indx, coarsen_op); /* * Step through levels - largest to smallest */ for (int amr_level = hierarchy->getFinestLevelNumber(); amr_level >= 0; --amr_level) { std::shared_ptr<PatchLevel> level( hierarchy->getPatchLevel(amr_level)); std::shared_ptr<RefineSchedule> fill_soln_vector_bounds_sched = fill_soln_vector_bounds.createSchedule(level, amr_level - 1, hierarchy, this); if (!level->checkAllocated(d_soln_scr_id)) { level->allocatePatchData(d_soln_scr_id); } fill_soln_vector_bounds_sched->fillData(t); /* * Construct a coarsen schedule for all levels larger than coarsest, * and fill interiors of solution vector on coarser levels using fine * data. */ if (amr_level > 0) { std::shared_ptr<PatchLevel> coarser_level( hierarchy->getPatchLevel(amr_level - 1)); std::shared_ptr<CoarsenSchedule> fill_soln_interior_on_coarser_sched( fill_soln_interior_on_coarser.createSchedule(coarser_level, level)); fill_soln_interior_on_coarser_sched->coarsenData(); } for (PatchLevel::iterator p(level->begin()); p != level->end(); ++p) { const std::shared_ptr<Patch>& patch = *p; const Index ifirst(patch->getBox().lower()); const Index ilast(patch->getBox().upper()); std::shared_ptr<SideData<double> > diffusion( SAMRAI_SHARED_PTR_CAST<SideData<double>, PatchData>( patch->getPatchData(d_diff_id))); TBOX_ASSERT(diffusion); diffusion->fillAll(1.0); TBOX_ASSERT((t - d_current_soln_time) >= 0.); /* * Set Neumann fluxes and flag array (if desired) */ if (d_use_neumann_bcs) { std::shared_ptr<OuterfaceData<int> > flag_data( SAMRAI_SHARED_PTR_CAST<OuterfaceData<int>, PatchData>( patch->getPatchData(d_flag_id))); std::shared_ptr<OuterfaceData<double> > neuf_data( SAMRAI_SHARED_PTR_CAST<OuterfaceData<double>, PatchData>( patch->getPatchData(d_neuf_id))); TBOX_ASSERT(flag_data); TBOX_ASSERT(neuf_data); /* * Outerface data access: * neuf_data->getPointer(axis,face); * where axis specifies X, Y, or Z (0,1,2 respectively) * and face specifies lower or upper (0,1 respectively) */ if (d_dim == Dimension(2)) { SAMRAI_F77_FUNC(setneufluxvalues2d, SETNEUFLUXVALUES2D) ( ifirst(0), ilast(0), ifirst(1), ilast(1), d_bdry_types, &d_bdry_edge_val[0], flag_data->getPointer(0, 0), // x lower flag_data->getPointer(0, 1), // x upper flag_data->getPointer(1, 0), // y lower flag_data->getPointer(1, 1), // y upper neuf_data->getPointer(0, 0), // x lower neuf_data->getPointer(0, 1), // x upper neuf_data->getPointer(1, 0), // y lower neuf_data->getPointer(1, 1)); // y upper } else if (d_dim == Dimension(3)) { SAMRAI_F77_FUNC(setneufluxvalues3d, SETNEUFLUXVALUES3D) ( ifirst(0), ilast(0), ifirst(1), ilast(1), ifirst(2), ilast(2), d_bdry_types, &d_bdry_face_val[0], flag_data->getPointer(0, 0), // x lower flag_data->getPointer(0, 1), // x upper flag_data->getPointer(1, 0), // y lower flag_data->getPointer(1, 1), // y upper flag_data->getPointer(2, 0), // z lower flag_data->getPointer(2, 1), // z lower neuf_data->getPointer(0, 0), // x lower neuf_data->getPointer(0, 1), // x upper neuf_data->getPointer(1, 0), // y lower neuf_data->getPointer(1, 1), // y upper neuf_data->getPointer(2, 0), // z lower neuf_data->getPointer(2, 1)); // z upper } } } // patch loop level->deallocatePatchData(d_soln_scr_id); } // level loop /* * Set boundaries. The "bdry_types" array holds a set of integers * where 0 = dirichlet and 1 = neumann boundary conditions. */ if (d_use_neumann_bcs) { d_FAC_solver->setBoundaries("Mixed", d_neuf_id, d_flag_id, d_bdry_types); } else { d_FAC_solver->setBoundaries("Dirichlet"); } d_FAC_solver->setCConstant(1.0 / gamma); d_FAC_solver->setDPatchDataId(d_diff_id); /* * increment counter for number of precond setup calls */ ++d_number_precond_setup; #endif /* * We return 0 or 1 here - 0 if it passes, 1 if it fails. For now, * just be optimistic and return 0. Eventually we should add some * assertion handling above to set what this value should be. */ return 0; }
int CVODEModel::evaluateRHSFunction( double time, SundialsAbstractVector* y, SundialsAbstractVector* y_dot) { /* * Convert Sundials vectors to SAMRAI vectors */ std::shared_ptr<SAMRAIVectorReal<double> > y_samvect( Sundials_SAMRAIVector::getSAMRAIVector(y)); std::shared_ptr<SAMRAIVectorReal<double> > y_dot_samvect( Sundials_SAMRAIVector::getSAMRAIVector(y_dot)); std::shared_ptr<PatchHierarchy> hierarchy(y_samvect->getPatchHierarchy()); /* * Compute max norm of solution vector. */ //std::shared_ptr<HierarchyDataOpsReal<double> > hierops( // new HierarchyCellDataOpsReal<double>(hierarchy)); //double max_norm = hierops->maxNorm(y_samvect-> // getComponentDescriptorIndex(0)); if (d_print_solver_info) { pout << "\t\tEval RHS: " << "\n \t\t\ttime = " << time << "\n \t\t\ty_maxnorm = " << y_samvect->maxNorm() << endl; } /* * Allocate scratch space and fill ghost cells in the solution vector * 1) Create a refine algorithm * 2) Register with the algorithm the current & scratch space, along * with a refine operator. * 3) Use the refine algorithm to construct a refine schedule * 4) Use the refine schedule to fill data on fine level. */ std::shared_ptr<RefineAlgorithm> bdry_fill_alg( new RefineAlgorithm()); std::shared_ptr<RefineOperator> refine_op(d_grid_geometry-> lookupRefineOperator(d_soln_var, "CONSERVATIVE_LINEAR_REFINE")); bdry_fill_alg->registerRefine(d_soln_scr_id, // dest y_samvect-> getComponentDescriptorIndex(0), // src d_soln_scr_id, // scratch refine_op); for (int ln = hierarchy->getFinestLevelNumber(); ln >= 0; --ln) { std::shared_ptr<PatchLevel> level(hierarchy->getPatchLevel(ln)); if (!level->checkAllocated(d_soln_scr_id)) { level->allocatePatchData(d_soln_scr_id); } // Note: a pointer to "this" tells the refine schedule to invoke // the setPhysicalBCs defined in this class. std::shared_ptr<RefineSchedule> bdry_fill_alg_schedule( bdry_fill_alg->createSchedule(level, ln - 1, hierarchy, this)); bdry_fill_alg_schedule->fillData(time); } /* * Step through the levels and compute rhs */ for (int ln = hierarchy->getFinestLevelNumber(); ln >= 0; --ln) { std::shared_ptr<PatchLevel> level(hierarchy->getPatchLevel(ln)); for (PatchLevel::iterator ip(level->begin()); ip != level->end(); ++ip) { const std::shared_ptr<Patch>& patch = *ip; std::shared_ptr<CellData<double> > y( SAMRAI_SHARED_PTR_CAST<CellData<double>, PatchData>( patch->getPatchData(d_soln_scr_id))); std::shared_ptr<SideData<double> > diff( SAMRAI_SHARED_PTR_CAST<SideData<double>, PatchData>( patch->getPatchData(d_diff_id))); std::shared_ptr<CellData<double> > rhs( SAMRAI_SHARED_PTR_CAST<CellData<double>, PatchData>( patch->getPatchData(y_dot_samvect->getComponentDescriptorIndex(0)))); TBOX_ASSERT(y); TBOX_ASSERT(diff); TBOX_ASSERT(rhs); const Index ifirst(patch->getBox().lower()); const Index ilast(patch->getBox().upper()); const std::shared_ptr<CartesianPatchGeometry> patch_geom( SAMRAI_SHARED_PTR_CAST<CartesianPatchGeometry, PatchGeometry>( patch->getPatchGeometry())); TBOX_ASSERT(patch_geom); const double* dx = patch_geom->getDx(); IntVector ghost_cells(y->getGhostCellWidth()); /* * 1 eqn radiation diffusion */ if (d_dim == Dimension(2)) { SAMRAI_F77_FUNC(comprhs2d, COMPRHS2D) ( ifirst(0), ilast(0), ifirst(1), ilast(1), ghost_cells(0), ghost_cells(1), dx, y->getPointer(), diff->getPointer(0), diff->getPointer(1), rhs->getPointer()); } else if (d_dim == Dimension(3)) { SAMRAI_F77_FUNC(comprhs3d, COMPRHS3D) ( ifirst(0), ilast(0), ifirst(1), ilast(1), ifirst(2), ilast(2), ghost_cells(0), ghost_cells(1), ghost_cells(2), dx, y->getPointer(), diff->getPointer(0), diff->getPointer(1), diff->getPointer(2), rhs->getPointer()); } } // loop over patches } // loop over levels /* * Deallocate scratch space. */ for (int ln = hierarchy->getFinestLevelNumber(); ln >= 0; --ln) { hierarchy->getPatchLevel(ln)->deallocatePatchData(d_soln_scr_id); } /* * record current time and increment counter for number of RHS * evaluations. */ d_current_soln_time = time; ++d_number_rhs_eval; return 0; }
void DfsAcyclicSubgraph::callUML ( const GraphAttributes &AG, List<edge> &arcSet) { const Graph &G = AG.constGraph(); // identify hierarchies NodeArray<int> hierarchy(G,-1); int count = 0; int treeNum = -1; node v; forall_nodes(v,G) { if(hierarchy[v] == -1) { int n = dfsFindHierarchies(AG,hierarchy,count,v); if(n > 1) treeNum = count; ++count; } } arcSet.clear(); // perform DFS on the directed graph formed by generalizations NodeArray<int> number(G,0), completion(G); int nNumber = 0, nCompletion = 0; forall_nodes(v,G) { if(number[v] == 0) dfsBackedgesHierarchies(AG,v,number,completion,nNumber,nCompletion); } // collect all backedges within a hierarchy // and compute outdeg of each vertex within its hierarchy EdgeArray<bool> reversed(G,false); NodeArray<int> outdeg(G,0); edge e; forall_edges(e,G) { if(AG.type(e) != Graph::generalization || e->isSelfLoop()) continue; node src = e->source(), tgt = e->target(); outdeg[src]++; if (hierarchy[src] == hierarchy[tgt] && number[src] >= number[tgt] && completion[src] <= completion[tgt]) reversed[e] = true; } // topologial numbering of nodes within a hierarchy (for each hierarchy) NodeArray<int> numV(G); Queue<node> Q; int countV = 0; forall_nodes(v,G) if(outdeg[v] == 0) Q.append(v); while(!Q.empty()) { v = Q.pop(); numV[v] = countV++; forall_adj_edges(e,v) { node w = e->source(); if(w != v) { if(--outdeg[w] == 0) Q.append(w); } } }
int IpVerify::Verify( DCpermission perm, const condor_sockaddr& addr, const char * user, MyString *allow_reason, MyString *deny_reason ) { perm_mask_t mask; in6_addr sin6_addr; const char *thehost; const char * who = user; MyString peer_description; // we build this up as we go along (DNS etc.) if( !did_init ) { Init(); } /* * Be Warned: careful about parameter "sin" being NULL. It could be, in * which case we should return FALSE (unless perm is ALLOW) * */ switch ( perm ) { case ALLOW: return USER_AUTH_SUCCESS; break; default: break; } sin6_addr = addr.to_ipv6_address(); mask = 0; // must initialize to zero because we logical-or bits into this if (who == NULL || *who == '\0') { who = TotallyWild; } if ( perm >= LAST_PERM || !PermTypeArray[perm] ) { EXCEPT("IpVerify::Verify: called with unknown permission %d\n",perm); } // see if a authorization hole has been dyamically punched (via // PunchHole) for this perm / user / IP // Note that the permission hierarchy is dealt with in // PunchHole(), by punching a hole for all implied levels. // Therefore, if there is a hole or an implied hole, we will // always find it here before we get into the subsequent code // which recursively calls Verify() to traverse the hierarchy. // This is important, because we do not want holes to find // there way into the authorization cache. // if ( PunchedHoleArray[perm] != NULL ) { HolePunchTable_t* hpt = PunchedHoleArray[perm]; MyString ip_str_buf = addr.to_ip_string(); const char* ip_str = ip_str_buf.Value(); MyString id_with_ip; MyString id; int count; if ( who != TotallyWild ) { id_with_ip.sprintf("%s/%s", who, ip_str); id = who; if ( hpt->lookup(id, count) != -1 ) { if( allow_reason ) { allow_reason->sprintf( "%s authorization has been made automatic for %s", PermString(perm), id.Value() ); } return USER_AUTH_SUCCESS; } if ( hpt->lookup(id_with_ip, count) != -1 ) { if( allow_reason ) { allow_reason->sprintf( "%s authorization has been made automatic for %s", PermString(perm), id_with_ip.Value() ); } return USER_AUTH_SUCCESS; } } id = ip_str; if ( hpt->lookup(id, count) != -1 ) { if( allow_reason ) { allow_reason->sprintf( "%s authorization has been made automatic for %s", PermString(perm), id.Value() ); } return USER_AUTH_SUCCESS; } } if ( PermTypeArray[perm]->behavior == USERVERIFY_ALLOW ) { // allow if no HOSTALLOW_* or HOSTDENY_* restrictions // specified. if( allow_reason ) { allow_reason->sprintf( "%s authorization policy allows access by anyone", PermString(perm)); } return USER_AUTH_SUCCESS; } if ( PermTypeArray[perm]->behavior == USERVERIFY_DENY ) { // deny if( deny_reason ) { deny_reason->sprintf( "%s authorization policy denies all access", PermString(perm)); } return USER_AUTH_FAILURE; } if( LookupCachedVerifyResult(perm,sin6_addr,who,mask) ) { if( deny_reason && (mask&deny_mask(perm)) ) { deny_reason->sprintf( "cached result for %s; see first case for the full reason", PermString(perm)); } else if( allow_reason && (mask&allow_mask(perm)) ) { allow_reason->sprintf( "cached result for %s; see first case for the full reason", PermString(perm)); } } else { mask = 0; // if the deny bit is already set, skip further DENY analysis perm_mask_t const deny_resolved = deny_mask(perm); // if the allow or deny bit is already set, // skip further ALLOW analysis perm_mask_t const allow_resolved = allow_mask(perm)|deny_mask(perm); // check for matching subnets in ip/mask style char ipstr[INET6_ADDRSTRLEN] = { 0, }; addr.to_ip_string(ipstr, INET6_ADDRSTRLEN); peer_description = addr.to_ip_string(); if ( !(mask&deny_resolved) && lookup_user_ip_deny(perm,who,ipstr)) { mask |= deny_mask(perm); if( deny_reason ) { deny_reason->sprintf( "%s authorization policy denies IP address %s", PermString(perm), addr.to_ip_string().Value() ); } } if ( !(mask&allow_resolved) && lookup_user_ip_allow(perm,who,ipstr)) { mask |= allow_mask(perm); if( allow_reason ) { allow_reason->sprintf( "%s authorization policy allows IP address %s", PermString(perm), addr.to_ip_string().Value() ); } } std::vector<MyString> hostnames; // now scan through hostname strings if( !(mask&allow_resolved) || !(mask&deny_resolved) ) { hostnames = get_hostname_with_alias(addr); } for (unsigned int i = 0; i < hostnames.size(); ++i) { thehost = hostnames[i].Value(); peer_description.append_to_list(thehost); if ( !(mask&deny_resolved) && lookup_user_host_deny(perm,who,thehost) ) { mask |= deny_mask(perm); if( deny_reason ) { deny_reason->sprintf( "%s authorization policy denies hostname %s", PermString(perm), thehost ); } } if ( !(mask&allow_resolved) && lookup_user_host_allow(perm,who,thehost) ) { mask |= allow_mask(perm); if( allow_reason ) { allow_reason->sprintf( "%s authorization policy allows hostname %s", PermString(perm), thehost ); } } } // if we found something via our hostname or subnet mactching, we now have // a mask, and we should add it into our table so we need not // do a gethostbyaddr() next time. if we still do not have a mask // (perhaps because this host doesn't appear in any list), create one // and then add to the table. // But first, check our parent permission levels in the // authorization heirarchy. // DAEMON and ADMINISTRATOR imply WRITE. // WRITE, NEGOTIATOR, and CONFIG_PERM imply READ. bool determined_by_parent = false; if ( mask == 0 ) { if ( PermTypeArray[perm]->behavior == USERVERIFY_ONLY_DENIES ) { dprintf(D_SECURITY,"IPVERIFY: %s at %s not matched to deny list, so allowing.\n",who, addr.to_sinful().Value()); if( allow_reason ) { allow_reason->sprintf( "%s authorization policy does not deny, so allowing", PermString(perm)); } mask |= allow_mask(perm); } else { DCpermissionHierarchy hierarchy( perm ); DCpermission const *parent_perms = hierarchy.getPermsIAmDirectlyImpliedBy(); bool parent_allowed = false; for( ; *parent_perms != LAST_PERM; parent_perms++ ) { if( Verify( *parent_perms, addr, user, allow_reason, NULL ) == USER_AUTH_SUCCESS ) { determined_by_parent = true; parent_allowed = true; dprintf(D_SECURITY,"IPVERIFY: allowing %s at %s for %s because %s is allowed\n",who, addr.to_sinful().Value(),PermString(perm),PermString(*parent_perms)); if( allow_reason ) { MyString tmp = *allow_reason; allow_reason->sprintf( "%s is implied by %s; %s", PermString(perm), PermString(*parent_perms), tmp.Value()); } break; } } if( parent_allowed ) { mask |= allow_mask(perm); } else { mask |= deny_mask(perm); if( !determined_by_parent && deny_reason ) { // We don't just allow anyone, and this request // did not match any of the entries we do allow. // In case the reason we didn't match is // because of a typo or a DNS problem, record // all the hostnames we searched for. deny_reason->sprintf( "%s authorization policy contains no matching " "ALLOW entry for this request" "; identifiers used for this host: %s, hostname size = %lu, " "original ip address = %s", PermString(perm), peer_description.Value(), (unsigned long)hostnames.size(), ipstr); } } } } if( !determined_by_parent && (mask&allow_mask(perm)) ) { // In case we are allowing because of not matching a DENY // entry that the user expected us to match (e.g. because // of typo or DNS problem), record all the hostnames we // searched for. if( allow_reason && !peer_description.IsEmpty() ) { allow_reason->sprintf_cat( "; identifiers used for this remote host: %s", peer_description.Value()); } } // finally, add the mask we computed into the table with this IP addr add_hash_entry(sin6_addr, who, mask); } // end of if find_match is FALSE // decode the mask and return True or False to the user. if ( mask & deny_mask(perm) ) { return USER_AUTH_FAILURE; } if ( mask & allow_mask(perm) ) { return USER_AUTH_SUCCESS; } return USER_AUTH_FAILURE; }