// ImGui is ready to be used, just place your Gui elements. bool basicEffect::showGuiWindow( const shapesDB& _scene ) { if( !bShowGuiWindow ) return false; ImGui::SetNextWindowSize(ImVec2(400,ofGetHeight()*0.8), ImGuiSetCond_Once); ImGui::Begin( ((string)"Effect Settings: ").append(getName()).append("###effect-").append( ofToString(this) ).c_str() , &bShowGuiWindow ); ImGui::LabelText("Type", "%s", getType().c_str() ); static char nameBuffer[32] = ""; if( ImGui::InputText("Name", nameBuffer, 32) ){ effectName = nameBuffer; } ImGui::TextWrapped("Status: %s", getShortStatus().c_str() ); if( ImGui::Button("Reset effect") ){ reset(); } if (!ImGui::IsItemActive()){ memcpy(nameBuffer, effectName.c_str(), effectName.size() ); } if( ImGui::Checkbox("Enabled", &bEnabled) ){ bEnabled?enable():disable(); } ImGui::SameLine(-150); ImGui::LabelText((isLoading()?"(Loading...)":"(Loaded)" ), ""); ImGui::SameLine(-50); ImGui::LabelText((bHasError?"(Has Error)":"(No Errors)" ), ""); ImGui::Spacing(); ImGui::Spacing(); ImGui::ColorEdit4("Effect Color", &mainColor[0]); ImGui::Spacing(); ImGui::Spacing(); if (ImGui::CollapsingHeader( GUIBoundShapesTitle, "GUIBoundShapesTitle", true, true)){ ImGui::PushID("shapeBindings"); ImGui::TextWrapped("Bound to %i shapes", getNumShapes()); ImGui::Spacing(); ImGui::SameLine(); if( ImGui::Button("Bind to shape group...") ){ ImGui::OpenPopup("batchBindShapes"); } if( ImGui::BeginPopup("batchBindShapes") ){ ImGui::SameLine(); //ImGui::Text( "" ); //ImGui::Separator(); if( ImGui::Selectable( "All shapes", false) ){ int newBoundShapes = 0; for(auto s=_scene.getShapesItConstBegin(); s!=_scene.getShapesItConstEnd(); ++s){ if( !isBoundWithShape(*s) ){ if( bindWithShape(*s) ) newBoundShapes++; } } // toggle selection ? if( newBoundShapes==0 ){ detachFromAllShapes(); } } else { auto groups = _scene.getAllShapesByGroup(); for (auto g=groups.cbegin(); g!=groups.cend(); ++g){ if( ImGui::Selectable( ofToString(g->first).c_str(), false) ){ for( auto s=g->second.cbegin(); s!=g->second.cend(); ++s ){ bindWithShape(*s); } } } } ImGui::EndPopup(); } ImGui::SameLine(); if (ImGui::Button("Unbind All")){ detachFromAllShapes(); } // list shapes ImGui::ListBoxHeader("shapeBindings"); if(_scene.getNumShapes()<1) ImGui::Selectable("<None Available>", false); else for (auto s=_scene.getShapesItConstBegin(); s!=_scene.getShapesItConstEnd(); ++s){ bool tmpSelected = (bool)(std::find(shapes.cbegin(), shapes.cend(), *s) !=shapes.cend() ); if( ImGui::Selectable( (*s)->getName().c_str(), &tmpSelected ) ){ if (tmpSelected) bindWithShape( *s ); else detachFromShape( *s ); }; } ImGui::ListBoxFooter(); ImGui::PopID(); // pop boundShapes ImGui::Spacing(); } ImGui::Spacing(); ImGui::Spacing(); printCustomEffectGui(); // spacing ImGui::Text(" "); ImGui::End(); return true; }
result_type sum_approach3(int const N) { auto results = QtConcurrent::blockingMapped<QVector<result_type>>(num_b_iterator{0}, num_b_iterator{N}, map_function); return std::accumulate(results.cbegin(), results.cend(), result_type{}, sum_function); }
bool DiagramOperation::isEntryPoint() { auto entryPoints = model->getEntryPoints(); auto result = std::find(entryPoints.cbegin(), entryPoints.cend(), operationPointer->id); return result != entryPoints.cend(); }
void AssetsManager::startUpdate() { if (_updateState != State::NEED_UPDATE) return; _updateState = State::UPDATING; // Clean up before update _failedUnits.clear(); _downloadUnits.clear(); _compressedFiles.clear(); _totalWaitToDownload = _totalToDownload = 0; _percent = _percentByFile = _sizeCollected = _totalSize = 0; _downloadedSize.clear(); _totalEnabled = false; // Temporary manifest exists, resuming previous download if (_tempManifest->isLoaded() && _tempManifest->versionEquals(_remoteManifest)) { _tempManifest->genResumeAssetsList(&_downloadUnits); _totalWaitToDownload = _totalToDownload = (int)_downloadUnits.size(); _downloader->batchDownloadAsync(_downloadUnits, BATCH_UPDATE_ID); std::string msg = StringUtils::format("Resuming from previous unfinished update, %d files remains to be finished.", _totalToDownload); dispatchUpdateEvent(EventAssetsManager::EventCode::UPDATE_PROGRESSION, "", msg); } // Check difference else { // Temporary manifest not exists, // it will be used to register the download states of each asset, // in this case, it equals remote manifest. if(!_tempManifest->isLoaded()) { _tempManifest->release(); _tempManifest = _remoteManifest; } std::unordered_map<std::string, Manifest::AssetDiff> diff_map = _localManifest->genDiff(_remoteManifest); if (diff_map.size() == 0) { _updateState = State::UP_TO_DATE; // Rename temporary manifest to valid manifest _fileUtils->renameFile(_storagePath, TEMP_MANIFEST_FILENAME, MANIFEST_FILENAME); dispatchUpdateEvent(EventAssetsManager::EventCode::ALREADY_UP_TO_DATE); } else { // Generate download units for all assets that need to be updated or added std::string packageUrl = _remoteManifest->getPackageUrl(); for (auto it = diff_map.begin(); it != diff_map.end(); ++it) { Manifest::AssetDiff diff = it->second; if (diff.type == Manifest::DiffType::DELETED) { _fileUtils->removeFile(_storagePath + diff.asset.path); } else { std::string path = diff.asset.path; // Create path _fileUtils->createDirectories(basename(_storagePath + path)); Downloader::DownloadUnit unit; unit.customId = it->first; unit.srcUrl = packageUrl + path; unit.storagePath = _storagePath + path; unit.resumeDownload = false; _downloadUnits.emplace(unit.customId, unit); } } // Set other assets' downloadState to SUCCESSED auto assets = _remoteManifest->getAssets(); for (auto it = assets.cbegin(); it != assets.cend(); ++it) { const std::string &key = it->first; auto diffIt = diff_map.find(key); if (diffIt == diff_map.end()) { _tempManifest->setAssetDownloadState(key, Manifest::DownloadState::SUCCESSED); } } _totalWaitToDownload = _totalToDownload = (int)_downloadUnits.size(); _downloader->batchDownloadAsync(_downloadUnits, BATCH_UPDATE_ID); std::string msg = StringUtils::format("Start to update %d files from remote package.", _totalToDownload); dispatchUpdateEvent(EventAssetsManager::EventCode::UPDATE_PROGRESSION, "", msg); } } _waitToUpdate = false; }
const_reverse_iterator crend() const { return std::reverse_iterator<const_iterator>(cend()); }
EnumType ilist_to_flags(std::initializer_list<EnumType> ilist) { return accumulate(cbegin(ilist), cend(ilist), static_cast<EnumType>(0), std::bit_or<>{}); }
/// Returns an iterator to the end. const_iterator end() const noexcept { return cend(); }
const WayNodeList& nodes() const { return osmium::detail::subitem_of_type<const WayNodeList>(cbegin(), cend()); }
/* ************************************************************************* * Set up the initial guess and problem parameters * * and solve the Stokes problem. We explicitly initialize and * * deallocate the solver state in this example. * ************************************************************************* */ bool Stokes::FAC::solve() { if (!d_hierarchy) { TBOX_ERROR(d_object_name << "Cannot solve using an uninitialized object.\n"); } int ln; /* * Fill in the initial guess. */ for (ln = 0; ln <= d_hierarchy->getFinestLevelNumber(); ++ln) { boost::shared_ptr<SAMRAI::hier::PatchLevel> level = d_hierarchy->getPatchLevel(ln); SAMRAI::hier::PatchLevel::Iterator ip(level->begin()); SAMRAI::hier::PatchLevel::Iterator iend(level->end()); for ( ; ip!=iend; ++ip) { boost::shared_ptr<SAMRAI::hier::Patch> patch = *ip; boost::shared_ptr<SAMRAI::pdat::CellData<double> > p = boost::dynamic_pointer_cast<SAMRAI::pdat::CellData<double> > (patch->getPatchData(p_id)); boost::shared_ptr<SAMRAI::geom::CartesianPatchGeometry> geom = boost::dynamic_pointer_cast<SAMRAI::geom::CartesianPatchGeometry> (patch->getPatchGeometry()); if(p_initial.empty()) { p->fill(0.0); } else { const int dim=d_dim.getValue(); const double *dx=geom->getDx(); std::vector<double> xyz(dim); std::vector<double> dx_p(dim); for(int d=0;d<dim;++d) dx_p[d]=(p_initial_xyz_max[d] - p_initial_xyz_min[d])/(p_initial_ijk[d]-1); std::vector<int> di(dim); di[0]=1; for(int d=1;d<dim;++d) di[d]=di[d-1]*p_initial_ijk[d-1]; SAMRAI::hier::Box pbox = p->getBox(); SAMRAI::pdat::CellIterator cend(SAMRAI::pdat::CellGeometry::end(p->getGhostBox())); for(SAMRAI::pdat::CellIterator ci(SAMRAI::pdat::CellGeometry::begin(p->getGhostBox())); ci!=cend; ++ci) { const SAMRAI::pdat::CellIndex &c(*ci); std::vector<double> xyz(dim); /* VLA's not allowed by clang */ double weight[3][2]; for(int d=0;d<dim;++d) xyz[d]=geom->getXLower()[d] + dx[d]*(c[d]-pbox.lower()[d] + 0.5); int ijk(0); std::vector<int> ddi(dim); for(int d=0;d<dim;++d) { int i=static_cast<int>(xyz[d]*(p_initial_ijk[d]-1) /(p_initial_xyz_max[d] - p_initial_xyz_min[d])); i=std::max(0,std::min(p_initial_ijk[d]-1,i)); ijk+=i*di[d]; if(i==p_initial_ijk[d]-1) { weight[d][0]=1; weight[d][1]=0; ddi[d]=0; } else { weight[d][1]= (xyz[d]-(i*dx_p[d] + p_initial_xyz_min[d]))/dx_p[d]; weight[d][0]=1-weight[d][1]; ddi[d]=di[d]; } } if(dim==2) { (*p)(c)=p_initial[ijk]*weight[0][0]*weight[1][0] + p_initial[ijk+ddi[0]]*weight[0][1]*weight[1][0] + p_initial[ijk+ddi[1]]*weight[0][0]*weight[1][1] + p_initial[ijk+ddi[0]+ddi[1]]*weight[0][1]*weight[1][1]; } else { (*p)(c)=p_initial[ijk]*weight[0][0]*weight[1][0]*weight[2][0] + p_initial[ijk+ddi[0]]*weight[0][1]*weight[1][0]*weight[2][0] + p_initial[ijk+ddi[1]]*weight[0][0]*weight[1][1]*weight[2][0] + p_initial[ijk+ddi[0]+ddi[1]]*weight[0][1]*weight[1][1]*weight[2][0] + p_initial[ijk+ddi[2]]*weight[0][0]*weight[1][0]*weight[2][1] + p_initial[ijk+ddi[0]+ddi[2]]*weight[0][1]*weight[1][0]*weight[2][1] + p_initial[ijk+ddi[1]+ddi[2]]*weight[0][0]*weight[1][1]*weight[2][1] + p_initial[ijk+ddi[0]+ddi[1]+ddi[2]]*weight[0][1]*weight[1][1]*weight[2][1]; } } } boost::shared_ptr<SAMRAI::pdat::SideData<double> > v = boost::dynamic_pointer_cast<SAMRAI::pdat::SideData<double> > (patch->getPatchData(v_id)); v->fill(0.0); } d_stokes_fac_solver.set_boundaries(p_id,v_id,level,false); } fix_viscosity(); d_stokes_fac_solver.initializeSolverState (p_id,cell_viscosity_id,edge_viscosity_id,dp_id,p_rhs_id,v_id,v_rhs_id, d_hierarchy,0,d_hierarchy->getFinestLevelNumber()); SAMRAI::tbox::plog << "solving..." << std::endl; int solver_ret; solver_ret = d_stokes_fac_solver.solveSystem(p_id,p_rhs_id,v_id,v_rhs_id); double avg_factor, final_factor; d_stokes_fac_solver.getConvergenceFactors(avg_factor, final_factor); SAMRAI::tbox::plog << "\t" << (solver_ret ? "" : "NOT ") << "converged " << "\n" << " iterations: " << d_stokes_fac_solver.getNumberOfIterations() << "\n" << " residual: "<< d_stokes_fac_solver.getResidualNorm() << "\n" << " average convergence: "<< avg_factor << "\n" << " final convergence: "<< final_factor << "\n" << std::flush; d_stokes_fac_solver.deallocateSolverState(); return solver_ret; }
FilePath::const_iterator FilePath::end() const { return cend(); }
/// Get the list of tags. const TagList& tags() const { return osmium::detail::subitem_of_type<const TagList>(cbegin(), cend()); }
std::list<CommandLine::segment> CommandLine::GetPrompt() { FN_RETURN_TYPE(CommandLine::GetPrompt) Result; int NewPromptSize = DEFAULT_CMDLINE_WIDTH; const auto& PrefixColor = colors::PaletteColorToFarColor(COL_COMMANDLINEPREFIX); if (Global->Opt->CmdLine.UsePromptFormat) { const string_view Format = Global->Opt->CmdLine.strPromptFormat.Get(); auto Tail = Format.cbegin(); auto Color = PrefixColor; FOR_CONST_RANGE(Format, Iterator) { bool Stop; auto NewColor = PrefixColor; const auto NextIterator = colors::ExtractColorInNewFormat(Iterator, Format.cend(), NewColor, Stop); if (NextIterator == Iterator) { if (Stop) break; continue; } if (Iterator != Format.cbegin()) { Result.emplace_back(segment{ string(Tail, Iterator), Color }); } Iterator = NextIterator; Tail = Iterator; Color = NewColor; } Result.emplace_back(segment{ string(Tail, Format.cend()), Color }); for (auto Iterator = Result.begin(); Iterator != Result.end(); ++Iterator) { const auto strExpandedDestStr = os::env::expand(Iterator->Text); Iterator->Text.clear(); static const std::pair<wchar_t, wchar_t> ChrFmt[] = { {L'A', L'&'}, // $A - & (Ampersand) {L'B', L'|'}, // $B - | (pipe) {L'C', L'('}, // $C - ( (Left parenthesis) {L'F', L')'}, // $F - ) (Right parenthesis) {L'G', L'>'}, // $G - > (greater-than sign) {L'L', L'<'}, // $L - < (less-than sign) {L'Q', L'='}, // $Q - = (equal sign) {L'S', L' '}, // $S - (space) {L'$', L'$'}, // $$ - $ (dollar sign) }; FOR_CONST_RANGE(strExpandedDestStr, it) { auto& strDestStr = Iterator->Text; if (*it == L'$' && it + 1 != strExpandedDestStr.cend()) { const auto Chr = upper(*++it); const auto ItemIterator = std::find_if(CONST_RANGE(ChrFmt, Item) { return Item.first == Chr; }); if (ItemIterator != std::cend(ChrFmt)) { strDestStr += ItemIterator->second; } else { const auto& AddCollapsible = [&](string&& Str) { if (strDestStr.empty()) { strDestStr = std::move(Str); Iterator->Collapsible = true; } else { Iterator = Result.insert(std::next(Iterator), segment{ std::move(Str), Iterator->Colour, true }); } // No need to introduce a new segment if we're at the very end if (std::next(Iterator) != Result.end() && std::next(it) != strExpandedDestStr.cend()) { Iterator = Result.insert(std::next(Iterator), segment{ {}, Iterator->Colour, false }); } }; switch (Chr) { /* эти не реaлизованы $E - Escape code (ASCII code 27) $V - Windows version number $_ - Carriage return and linefeed */ case L'M': // $M - Отображение полного имени удаленного диска, связанного с именем текущего диска, или пустой строки, если текущий диск не является сетевым. { string strTemp; if (DriveLocalToRemoteName(DRIVE_UNKNOWN, m_CurDir[0], strTemp)) { AddCollapsible(std::move(strTemp)); } break; } case L'+': // $+ - Отображение нужного числа знаков плюс (+) в зависимости от текущей глубины стека каталогов PUSHD, по одному знаку на каждый сохраненный путь. { strDestStr.append(ppstack.size(), L'+'); break; } case L'H': // $H - Backspace (erases previous character) { if (!strDestStr.empty()) { strDestStr.pop_back(); } else { auto Prev = Iterator; while (Prev != Result.begin()) { --Prev; if (!Prev->Text.empty()) { Prev->Text.pop_back(); break; } } } break; } case L'@': // $@xx - Admin { if (it + 1 != strExpandedDestStr.cend()) { const auto lb = *++it; if (it + 1 != strExpandedDestStr.cend()) { const auto rb = *++it; if (os::security::is_admin()) { append(strDestStr, lb, msg(lng::MConfigCmdlinePromptFormatAdmin), rb); } } } break; } case L'D': // $D - Current date case L'T': // $T - Current time { strDestStr += MkStrFTime(Chr == L'D'? L"%D" : L"%T"); break; } case L'N': // $N - Current drive { const auto Type = ParsePath(m_CurDir); if(Type == root_type::drive_letter) strDestStr += upper(m_CurDir[0]); else if(Type == root_type::unc_drive_letter) strDestStr += upper(m_CurDir[4]); else strDestStr += L'?'; break; } case L'W': // $W - Текущий рабочий каталог (без указания пути) { const auto pos = FindLastSlash(m_CurDir); if (pos != string::npos) { AddCollapsible(m_CurDir.substr(pos + 1)); } break; } case L'P': // $P - Current drive and path { AddCollapsible(string{ m_CurDir }); break; } case L'#': //$#nn - max prompt width in % { if (it + 1 != strExpandedDestStr.end()) { size_t pos; if (from_string(string(it + 1, strExpandedDestStr.cend()), NewPromptSize, &pos)) it += pos; // else // bad format, NewPromptSize unchanged // TODO: diagnostics } } } if (it == strExpandedDestStr.cend()) { break; } } } else {
static ::Assets::CompilerHelper::CompileResult CompileMaterialScaffold( const ::Assets::ResChar sourceMaterial[], const ::Assets::ResChar sourceModel[], const ::Assets::ResChar destination[]) { // Parameters must be stripped off the source model filename before we get here. // the parameters are irrelevant to the compiler -- so if they stay on the request // name, will we end up with multiple assets that are equivalent assert(MakeFileNameSplitter(sourceModel).ParametersWithDivider().Empty()); // note -- we can throw pending & invalid from here... auto& modelMat = ::Assets::GetAssetComp<RawMatConfigurations>(sourceModel); std::vector<::Assets::DependentFileState> deps; // for each configuration, we want to build a resolved material // Note that this is a bit crazy, because we're going to be loading // and re-parsing the same files over and over again! SerializableVector<std::pair<MaterialGuid, ResolvedMaterial>> resolved; SerializableVector<std::pair<MaterialGuid, std::string>> resolvedNames; resolved.reserve(modelMat._configurations.size()); auto searchRules = ::Assets::DefaultDirectorySearchRules(sourceModel); ::Assets::ResChar resolvedSourceMaterial[MaxPath]; ResolveMaterialFilename(resolvedSourceMaterial, dimof(resolvedSourceMaterial), searchRules, sourceMaterial); searchRules.AddSearchDirectoryFromFilename(resolvedSourceMaterial); AddDep(deps, sourceModel); // we need need a dependency (even if it's a missing file) using Meld = StringMeld<MaxPath, ::Assets::ResChar>; for (auto i=modelMat._configurations.cbegin(); i!=modelMat._configurations.cend(); ++i) { ResolvedMaterial resMat; std::basic_stringstream<::Assets::ResChar> resName; auto guid = MakeMaterialGuid(AsPointer(i->cbegin()), AsPointer(i->cend())); // Our resolved material comes from 3 separate inputs: // 1) model:configuration // 2) material:* // 3) material:configuration // // Some material information is actually stored in the model // source data. This is just for art-pipeline convenience -- // generally texture assignments (and other settings) are // set in the model authoring tool (eg, 3DS Max). The .material // files actually only provide overrides for settings that can't // be set within 3rd party tools. // // We don't combine the model and material information until // this step -- this gives us some flexibility to use the same // model with different material files. The material files can // also override settings from 3DS Max (eg, change texture assignments // etc). This provides a path for reusing the same model with // different material settings (eg, when we want one thing to have // a red version and a blue version) TRY { // resolve in model:configuration auto configName = Conversion::Convert<::Assets::rstring>(*i); Meld meld; meld << sourceModel << ":" << configName; resName << meld; auto& rawMat = RawMaterial::GetAsset(meld); rawMat._asset.Resolve(resMat, searchRules, &deps); } CATCH (const ::Assets::Exceptions::InvalidAsset&) { } CATCH_END if (resolvedSourceMaterial[0] != '\0') { AddDep(deps, resolvedSourceMaterial); // we need need a dependency (even if it's a missing file) TRY { // resolve in material:* Meld meld; meld << resolvedSourceMaterial << ":*"; resName << ";" << meld; auto& rawMat = RawMaterial::GetAsset(meld); rawMat._asset.Resolve(resMat, searchRules, &deps); } CATCH (const ::Assets::Exceptions::InvalidAsset&) { } CATCH_END TRY { // resolve in material:configuration Meld meld; meld << resolvedSourceMaterial << ":" << Conversion::Convert<::Assets::rstring>(*i); resName << ";" << meld; auto& rawMat = RawMaterial::GetAsset(meld); rawMat._asset.Resolve(resMat, searchRules, &deps); } CATCH (const ::Assets::Exceptions::InvalidAsset&) { } CATCH_END } resolved.push_back(std::make_pair(guid, std::move(resMat))); resolvedNames.push_back(std::make_pair(guid, resName.str())); }
std::string BSON::Value::toBSON() const { std::string result; BSON::Value docSize; switch(_type){ case UNDEFINED: result = ""; break; case INT32: result = std::string{(char*) &_int32Value,4}; break; case INT64: result = std::string{(char*) &_int64Value,8}; break; case DOUBLE: result = std::string{(char*) &_doubleValue,8}; break; case BOOL: result.push_back(_boolValue ? '\x01' : '\x00'); break; case STRING: docSize = (int32)_stringValue.size()+1; result = docSize.toBSON().append(_stringValue); result.push_back('\x00'); break; case BINARY: docSize = (int32)_stringValue.size(); result = docSize.toBSON(); result.push_back('\x00'); result.append(_stringValue); break; case DATETIME: result = std::string{(char*) &_datetimeValue,8}; break; case ARRAY: result = ""; for(size_t i=0;i<size();i++){ const BSON::Value & val = _arrayValue[i]; result.append(val.getTypePrefix()) .append(std::to_string(i)) .push_back('\x00'); result.append(val.toBSON()); } docSize = (int32)result.size(); result = docSize.toBSON().append(result); result.push_back('\x00'); break; case OBJECT: result = ""; for(auto it = cbegin(); it!=cend(); ++it){ const std::string name = it->first; const BSON::Value & val = it->second; result.append(val.getTypePrefix()); result.append(name) .push_back('\x00'); result.append(val.toBSON()); } docSize = (int32)result.size()+1; result = docSize.toBSON().append(result); result.push_back('\x00'); break; } return result; }
int main(int argc, char* argv[]) { std::string output_filename; bool verbose = false; static struct option long_options[] = { {"help", no_argument, nullptr, 'h'}, {"output", required_argument, nullptr, 'o'}, {"verbose", no_argument, nullptr, 'v'}, {"version", no_argument, nullptr, 'V'}, {nullptr, 0, nullptr, 0} }; while (true) { const int c = getopt_long(argc, argv, "ho:vV", long_options, nullptr); if (c == -1) { break; } switch (c) { case 'h': print_help(); std::exit(return_code_ok); case 'o': output_filename = optarg; break; case 'v': verbose = true; break; case 'V': std::cout << "osmcoastline_filter " << get_osmcoastline_long_version() << " / " << get_libosmium_version() << '\n' << "Copyright (C) 2012-2019 Jochen Topf <*****@*****.**>\n" << "License: GNU GENERAL PUBLIC LICENSE Version 3 <https://gnu.org/licenses/gpl.html>.\n" << "This is free software: you are free to change and redistribute it.\n" << "There is NO WARRANTY, to the extent permitted by law.\n"; std::exit(return_code_ok); default: std::exit(return_code_fatal); } } if (output_filename.empty()) { std::cerr << "Missing -o/--output=OSMFILE option\n"; std::exit(return_code_cmdline); } if (optind != argc - 1) { std::cerr << "Usage: osmcoastline_filter [OPTIONS] OSMFILE\n"; std::exit(return_code_cmdline); } try { // The vout object is an output stream we can write to instead of // std::cerr. Nothing is written if we are not in verbose mode. // The running time will be prepended to output lines. osmium::util::VerboseOutput vout{verbose}; osmium::io::Header header; header.set("generator", std::string{"osmcoastline_filter/"} + get_osmcoastline_version()); header.add_box(osmium::Box{-180.0, -90.0, 180.0, 90.0}); osmium::io::File infile{argv[optind]}; vout << "Started osmcoastline_filter " << get_osmcoastline_long_version() << " / " << get_libosmium_version() << '\n'; osmium::io::Writer writer{output_filename, header}; auto output_it = osmium::io::make_output_iterator(writer); osmium::index::IdSetSmall<osmium::object_id_type> ids; vout << "Reading ways (1st pass through input file)...\n"; { osmium::io::Reader reader{infile, osmium::osm_entity_bits::way}; const auto ways = osmium::io::make_input_iterator_range<const osmium::Way>(reader); for (const osmium::Way& way : ways) { if (way.tags().has_tag("natural", "coastline")) { *output_it++ = way; for (const auto& nr : way.nodes()) { ids.set(nr.ref()); } } } reader.close(); } vout << "Preparing node ID list...\n"; ids.sort_unique(); vout << "Reading nodes (2nd pass through input file)...\n"; { osmium::io::Reader reader{infile, osmium::osm_entity_bits::node}; const auto nodes = osmium::io::make_input_iterator_range<const osmium::Node>(reader); auto first = ids.cbegin(); const auto last = ids.cend(); std::copy_if(nodes.cbegin(), nodes.cend(), output_it, [&first, &last](const osmium::Node& node){ while (*first < node.id() && first != last) { ++first; } if (node.id() == *first) { if (first != last) { ++first; } return true; } return node.tags().has_tag("natural", "coastline"); }); reader.close(); } writer.close(); vout << "All done.\n"; osmium::MemoryUsage mem; if (mem.current() > 0) { vout << "Memory used: current: " << mem.current() << " MBytes\n" << " peak: " << mem.peak() << " MBytes\n"; } } catch (const std::exception& e) { std::cerr << e.what() << '\n'; std::exit(return_code_fatal); } }
std::shared_ptr<BaseEncodedSegment> _on_encode(const AnySegmentIterable<pmr_string> segment_iterable, const PolymorphicAllocator<pmr_string>& allocator) { /** * First iterate over the values for two reasons. * 1) If all the strings are empty LZ4 will try to compress an empty vector which will cause a segmentation fault. * In this case we can and need to do an early exit. * 2) Sum the length of the strings to improve the performance when copying the data to the char vector. */ auto num_chars = size_t{0u}; segment_iterable.with_iterators([&](auto it, auto end) { for (; it != end; ++it) { if (!it->is_null()) { num_chars += it->value().size(); } } }); // copy values and null flags from value segment auto values = pmr_vector<char>{allocator}; values.reserve(num_chars); auto null_values = pmr_vector<bool>{allocator}; /** * If the null value vector only contains the value false, then the value segment does not have any row value that * is null. In that case, we don't store the null value vector to reduce the LZ4 segment's memory footprint. */ auto segment_contains_null = false; /** * These offsets mark the beginning of strings (and therefore end of the previous string) in the data vector. * These offsets are character offsets. The string at position 0 starts at the offset stored at position 0, which * will always be 0. * Its exclusive end is the offset stored at position 1 (i.e., offsets[1] - 1 is the last character of the string * at position 0). * In case of the last string its end is determined by the end of the data vector. * * The offsets are stored as 32 bit unsigned integer as opposed to 64 bit (size_t) so that they can later be * compressed via vector compression. */ auto offsets = pmr_vector<uint32_t>{allocator}; /** * These are the lengths of each string. They are needed to train the zstd dictionary. */ auto string_samples_lengths = pmr_vector<size_t>{allocator}; segment_iterable.with_iterators([&](auto it, auto end) { const auto segment_size = std::distance(it, end); null_values.resize(segment_size); offsets.resize(segment_size); string_samples_lengths.resize(segment_size); auto offset = uint32_t{0u}; // iterate over the iterator to access the values and increment the row index to write to the values and null // values vectors auto row_index = size_t{0}; for (; it != end; ++it) { const auto segment_element = *it; const auto contains_null = segment_element.is_null(); null_values[row_index] = contains_null; segment_contains_null = segment_contains_null || contains_null; offsets[row_index] = offset; auto sample_size = size_t{0u}; if (!contains_null) { const auto value = segment_element.value(); const auto string_length = value.size(); values.insert(values.cend(), value.begin(), value.end()); Assert(string_length <= std::numeric_limits<uint32_t>::max(), "The size of string row value exceeds the maximum of uint32 in LZ4 encoding."); offset += static_cast<uint32_t>(string_length); sample_size = string_length; } string_samples_lengths[row_index] = sample_size; ++row_index; } }); auto optional_null_values = segment_contains_null ? std::optional<pmr_vector<bool>>{null_values} : std::nullopt; /** * If the input only contained null values and/or empty strings we don't need to compress anything (and LZ4 will * cause an error). We can also throw away the offsets, since they won't be used for decompression. * We can do an early exit and return the (not encoded) segment. */ if (num_chars == 0) { auto empty_blocks = pmr_vector<pmr_vector<char>>{allocator}; auto empty_dictionary = pmr_vector<char>{}; return std::allocate_shared<LZ4Segment<pmr_string>>(allocator, std::move(empty_blocks), std::move(optional_null_values), std::move(empty_dictionary), nullptr, _block_size, 0u, 0u, null_values.size()); } // Compress the offsets with a vector compression method to reduce the memory footprint of the LZ4 segment. auto compressed_offsets = compress_vector(offsets, vector_compression_type(), allocator, {offsets.back()}); /** * Pre-compute a zstd dictionary if the input data is split among multiple blocks. This dictionary allows * independent compression of the blocks, while maintaining a good compression ratio. * If the input data fits into a single block, training of a dictionary is skipped. */ const auto input_size = values.size(); auto dictionary = pmr_vector<char>{allocator}; if (input_size > _block_size) { dictionary = _train_dictionary(values, string_samples_lengths); } /** * Compress the data and calculate the last block size (which may vary from the block size of the previous blocks) * and the total compressed size. The size of the last block is needed for decompression. The total compressed size * is pre-calculated instead of iterating over all blocks when the memory consumption of the LZ4 segment is * estimated. */ auto lz4_blocks = pmr_vector<pmr_vector<char>>{allocator}; _compress(values, lz4_blocks, dictionary); auto last_block_size = input_size % _block_size != 0 ? input_size % _block_size : _block_size; auto total_compressed_size = size_t{0u}; for (const auto& compressed_block : lz4_blocks) { total_compressed_size += compressed_block.size(); } return std::allocate_shared<LZ4Segment<pmr_string>>( allocator, std::move(lz4_blocks), std::move(optional_null_values), std::move(dictionary), std::move(compressed_offsets), _block_size, last_block_size, total_compressed_size, null_values.size()); }
void Load::loadMultipleFiles() { // allFilenames contains "rows" of filenames. If the row has more than 1 file // in it // then that row is to be summed across each file in the row const std::vector<std::vector<std::string>> allFilenames = getProperty("Filename"); std::string outputWsName = getProperty("OutputWorkspace"); std::vector<std::string> wsNames(allFilenames.size()); std::transform(allFilenames.begin(), allFilenames.end(), wsNames.begin(), generateWsNameFromFileNames); auto wsName = wsNames.cbegin(); assert(allFilenames.size() == wsNames.size()); std::vector<API::Workspace_sptr> loadedWsList; loadedWsList.reserve(allFilenames.size()); Workspace_sptr tempWs; // Cycle through the filenames and wsNames. for (auto filenames = allFilenames.cbegin(); filenames != allFilenames.cend(); ++filenames, ++wsName) { auto filename = filenames->cbegin(); Workspace_sptr sumWS = loadFileToWs(*filename, *wsName); ++filename; for (; filename != filenames->cend(); ++filename) { tempWs = loadFileToWs(*filename, "__@loadsum_temp@"); sumWS = plusWs(sumWS, tempWs); } API::WorkspaceGroup_sptr group = boost::dynamic_pointer_cast<WorkspaceGroup>(sumWS); if (group) { std::vector<std::string> childWsNames = group->getNames(); auto childWsName = childWsNames.begin(); size_t count = 1; for (; childWsName != childWsNames.end(); ++childWsName, ++count) { Workspace_sptr childWs = group->getItem(*childWsName); const std::string childName = group->getName() + "_" + std::to_string(count); API::AnalysisDataService::Instance().addOrReplace(childName, childWs); // childWs->setName(group->getName() + "_" + // boost::lexical_cast<std::string>(count)); } } // Add the sum to the list of loaded workspace names. loadedWsList.push_back(sumWS); } // If we only have one loaded ws, set it as the output. if (loadedWsList.size() == 1) { setProperty("OutputWorkspace", loadedWsList[0]); AnalysisDataService::Instance().rename(loadedWsList[0]->getName(), outputWsName); } // Else we have multiple loaded workspaces - group them and set the group as // output. else { API::WorkspaceGroup_sptr group = groupWsList(loadedWsList); setProperty("OutputWorkspace", group); std::vector<std::string> childWsNames = group->getNames(); size_t count = 1; for (auto &childWsName : childWsNames) { if (childWsName == outputWsName) { Mantid::API::Workspace_sptr child = group->getItem(childWsName); // child->setName(child->getName() + "_" + // boost::lexical_cast<std::string>(count)); const std::string childName = child->getName() + "_" + std::to_string(count); API::AnalysisDataService::Instance().addOrReplace(childName, child); count++; } } childWsNames = group->getNames(); count = 1; for (auto &childWsName : childWsNames) { Workspace_sptr childWs = group->getItem(childWsName); std::string outWsPropName = "OutputWorkspace_" + std::to_string(count); ++count; declareProperty(Kernel::make_unique<WorkspaceProperty<Workspace>>( outWsPropName, childWsName, Direction::Output)); setProperty(outWsPropName, childWs); } } // Clean up. if (tempWs) { Algorithm_sptr alg = AlgorithmManager::Instance().createUnmanaged("DeleteWorkspace"); alg->initialize(); alg->setChild(true); alg->setProperty("Workspace", tempWs); alg->execute(); } }
int CGitIndexList::GetStatus(const CString &gitdir,const CString &pathParam, git_wc_status_kind *status, BOOL IsFull, BOOL /*IsRecursive*/, FILL_STATUS_CALLBACK callback, void *pData, CGitHash *pHash, bool * assumeValid, bool * skipWorktree) { __int64 time, filesize = 0; bool isDir = false; CString path = pathParam; if (status) { git_wc_status_kind dirstatus = git_wc_status_none; int result; if (path.IsEmpty()) result = g_Git.GetFileModifyTime(gitdir, &time, &isDir); else result = g_Git.GetFileModifyTime(CombinePath(gitdir, path), &time, &isDir, &filesize); if (result) { *status = git_wc_status_deleted; if (callback && assumeValid && skipWorktree) callback(CombinePath(gitdir, path), git_wc_status_deleted, false, pData, *assumeValid, *skipWorktree); return 0; } if (isDir) { if (!path.IsEmpty()) { if (path.Right(1) != _T("\\")) path += _T("\\"); } int len = path.GetLength(); for (auto it = cbegin(), itend = cend(); it != itend; ++it) { if ((*it).m_FileName.GetLength() > len) { if ((*it).m_FileName.Left(len) == path) { if (!IsFull) { *status = git_wc_status_normal; if (callback) callback(CombinePath(gitdir, path), *status, false, pData, ((*it).m_Flags & GIT_IDXENTRY_VALID) && !((*it).m_Flags & GIT_IDXENTRY_SKIP_WORKTREE), ((*it).m_Flags & GIT_IDXENTRY_SKIP_WORKTREE) != 0); return 0; } else { result = g_Git.GetFileModifyTime(CombinePath(gitdir, (*it).m_FileName), &time, nullptr, &filesize); if (result) continue; *status = git_wc_status_none; if (assumeValid) *assumeValid = false; if (skipWorktree) *skipWorktree = false; GetFileStatus(gitdir, (*it).m_FileName, status, time, filesize, callback, pData, NULL, assumeValid, skipWorktree); // if a file is assumed valid, we need to inform the caller, otherwise the assumevalid flag might not get to the explorer on first open of a repository if (callback && assumeValid && skipWorktree && (*assumeValid || *skipWorktree)) callback(CombinePath(gitdir, path), *status, false, pData, *assumeValid, *skipWorktree); if (*status != git_wc_status_none) { if (dirstatus == git_wc_status_none) { dirstatus = git_wc_status_normal; } if (*status != git_wc_status_normal) { dirstatus = git_wc_status_modified; } } } } } } /* End For */ if (dirstatus != git_wc_status_none) { *status = dirstatus; } else { *status = git_wc_status_unversioned; } if(callback) callback(CombinePath(gitdir, path), *status, false, pData, false, false); return 0; } else { GetFileStatus(gitdir, path, status, time, filesize, callback, pData, pHash, assumeValid, skipWorktree); } } return 0; }
auto end() const { return cend(); }
const_iterator end() const { return cend(); }
/// Returns a reverse_iterator to the beginning. const_reverse_iterator crbegin() const noexcept { return const_reverse_iterator(cend()); }
/** * @brief Class::containsMethods * @param section * @return */ bool Class::containsMethods(Section section) const { return cend(m_Methods) != range::find_if(m_Methods, [&](auto &&method){ return method->section() == section; }); }
void Stokes::FACOps::smooth_Tackley_2D (SAMRAI::solv::SAMRAIVectorReal<double>& solution, const SAMRAI::solv::SAMRAIVectorReal<double>& residual, int ln, int num_sweeps, double residual_tolerance) { const int p_id(solution.getComponentDescriptorIndex(0)), p_rhs_id(residual.getComponentDescriptorIndex(0)), v_id(solution.getComponentDescriptorIndex(1)), v_rhs_id(residual.getComponentDescriptorIndex(1)); #ifdef DEBUG_CHECK_ASSERTIONS if (solution.getPatchHierarchy() != d_hierarchy || residual.getPatchHierarchy() != d_hierarchy) { TBOX_ERROR(d_object_name << ": Vector hierarchy does not match\n" "internal hierarchy."); } #endif boost::shared_ptr<SAMRAI::hier::PatchLevel> level = d_hierarchy->getPatchLevel(ln); /* Only need to sync the rhs once. This sync is needed because calculating a new pressure update requires computing in the ghost region so that the update for the velocity inside the box will be correct. */ p_refine_patch_strategy.setTargetDataId(p_id); v_refine_patch_strategy.setTargetDataId(v_id); set_boundaries(p_id,v_id,level,true); xeqScheduleGhostFillNoCoarse(p_rhs_id,v_rhs_id,ln); if (ln > d_ln_min) { /* * Perform a one-time transfer of data from coarser level, * to fill ghost boundaries that will not change through * the smoothing loop. */ xeqScheduleGhostFill(p_id, v_id, ln); } double theta_momentum=0.7; double theta_continuity=1.0; /* * Smooth the number of sweeps specified or until * the convergence is satisfactory. */ double maxres; /* * Instead of checking residual convergence globally, we check the * converged flag. This avoids possible round-off errors affecting * different processes differently, leading to disagreement on * whether to continue smoothing. */ const SAMRAI::hier::Index ip(1,0), jp(0,1); bool converged = false; for (int sweep=0; sweep < num_sweeps*(1<<(d_ln_max-ln)) && !converged; ++sweep) { maxres=0; /* vx sweep */ xeqScheduleGhostFillNoCoarse(p_id,invalid_id,ln); for(int rb=0;rb<2;++rb) { xeqScheduleGhostFillNoCoarse(invalid_id,v_id,ln); for (SAMRAI::hier::PatchLevel::Iterator pi(level->begin()); pi!=level->end(); ++pi) { boost::shared_ptr<SAMRAI::hier::Patch> patch = *pi; boost::shared_ptr<SAMRAI::pdat::CellData<double> > p_ptr = boost::dynamic_pointer_cast<SAMRAI::pdat::CellData<double> > (patch->getPatchData(p_id)); SAMRAI::pdat::CellData<double> &p(*p_ptr); boost::shared_ptr<SAMRAI::pdat::SideData<double> > v_ptr = boost::dynamic_pointer_cast<SAMRAI::pdat::SideData<double> > (patch->getPatchData(v_id)); SAMRAI::pdat::SideData<double> &v(*v_ptr); boost::shared_ptr<SAMRAI::pdat::SideData<double> > v_rhs_ptr = boost::dynamic_pointer_cast<SAMRAI::pdat::SideData<double> > (patch->getPatchData(v_rhs_id)); SAMRAI::pdat::SideData<double> &v_rhs(*v_rhs_ptr); boost::shared_ptr<SAMRAI::pdat::CellData<double> > cell_visc_ptr = boost::dynamic_pointer_cast<SAMRAI::pdat::CellData<double> > (patch->getPatchData(cell_viscosity_id)); SAMRAI::pdat::CellData<double> &cell_viscosity(*cell_visc_ptr); boost::shared_ptr<SAMRAI::pdat::NodeData<double> > edge_visc_ptr = boost::dynamic_pointer_cast<SAMRAI::pdat::NodeData<double> > (patch->getPatchData(edge_viscosity_id)); SAMRAI::pdat::NodeData<double> &edge_viscosity(*edge_visc_ptr); SAMRAI::hier::Box pbox=patch->getBox(); boost::shared_ptr<SAMRAI::geom::CartesianPatchGeometry> geom = boost::dynamic_pointer_cast<SAMRAI::geom::CartesianPatchGeometry> (patch->getPatchGeometry()); double dx = geom->getDx()[0]; double dy = geom->getDx()[1]; for(int j=pbox.lower(1); j<=pbox.upper(1); ++j) { /* Do the red-black skip */ int i_min=pbox.lower(0) + (abs(pbox.lower(0) + j + rb))%2; for(int i=i_min; i<=pbox.upper(0)+1; i+=2) { SAMRAI::pdat::CellIndex center(SAMRAI::tbox::Dimension(2)); center[0]=i; center[1]=j; /* Update v */ smooth_V_2D(0,pbox,geom,center,ip,jp, p,v,v_rhs,maxres,dx,dy,cell_viscosity, edge_viscosity,theta_momentum); } } } set_boundaries(invalid_id,v_id,level,true); } /* vy sweep */ for(int rb=0;rb<2;++rb) { xeqScheduleGhostFillNoCoarse(invalid_id,v_id,ln); for (SAMRAI::hier::PatchLevel::Iterator pi(level->begin()); pi!=level->end(); ++pi) { boost::shared_ptr<SAMRAI::hier::Patch> patch = *pi; boost::shared_ptr<SAMRAI::pdat::CellData<double> > p_ptr = boost::dynamic_pointer_cast<SAMRAI::pdat::CellData<double> > (patch->getPatchData(p_id)); SAMRAI::pdat::CellData<double> &p(*p_ptr); boost::shared_ptr<SAMRAI::pdat::SideData<double> > v_ptr = boost::dynamic_pointer_cast<SAMRAI::pdat::SideData<double> > (patch->getPatchData(v_id)); SAMRAI::pdat::SideData<double> &v(*v_ptr); boost::shared_ptr<SAMRAI::pdat::SideData<double> > v_rhs_ptr = boost::dynamic_pointer_cast<SAMRAI::pdat::SideData<double> > (patch->getPatchData(v_rhs_id)); SAMRAI::pdat::SideData<double> &v_rhs(*v_rhs_ptr); boost::shared_ptr<SAMRAI::pdat::CellData<double> > cell_visc_ptr = boost::dynamic_pointer_cast<SAMRAI::pdat::CellData<double> > (patch->getPatchData(cell_viscosity_id)); SAMRAI::pdat::CellData<double> &cell_viscosity(*cell_visc_ptr); boost::shared_ptr<SAMRAI::pdat::NodeData<double> > edge_visc_ptr = boost::dynamic_pointer_cast<SAMRAI::pdat::NodeData<double> > (patch->getPatchData(edge_viscosity_id)); SAMRAI::pdat::NodeData<double> &edge_viscosity(*edge_visc_ptr); SAMRAI::hier::Box pbox=patch->getBox(); boost::shared_ptr<SAMRAI::geom::CartesianPatchGeometry> geom = boost::dynamic_pointer_cast<SAMRAI::geom::CartesianPatchGeometry> (patch->getPatchGeometry()); double dx = geom->getDx()[0]; double dy = geom->getDx()[1]; for(int j=pbox.lower(1); j<=pbox.upper(1)+1; ++j) { /* Do the red-black skip */ int i_min=pbox.lower(0) + (abs(pbox.lower(0) + j + rb))%2; for(int i=i_min; i<=pbox.upper(0); i+=2) { SAMRAI::pdat::CellIndex center(SAMRAI::tbox::Dimension(2)); center[0]=i; center[1]=j; /* Update v */ smooth_V_2D(1,pbox,geom,center,jp,ip, p,v,v_rhs,maxres,dy,dx,cell_viscosity, edge_viscosity,theta_momentum); } } } set_boundaries(invalid_id,v_id,level,true); } /* p sweep No need for red-black, because dp does not depend on the pressure. */ xeqScheduleGhostFillNoCoarse(invalid_id,v_id,ln); for (SAMRAI::hier::PatchLevel::Iterator pi(level->begin()); pi!=level->end(); ++pi) { boost::shared_ptr<SAMRAI::hier::Patch> patch = *pi; boost::shared_ptr<SAMRAI::pdat::CellData<double> > p_ptr = boost::dynamic_pointer_cast<SAMRAI::pdat::CellData<double> > (patch->getPatchData(p_id)); SAMRAI::pdat::CellData<double> &p(*p_ptr); boost::shared_ptr<SAMRAI::pdat::CellData<double> > dp_ptr = boost::dynamic_pointer_cast<SAMRAI::pdat::CellData<double> > (patch->getPatchData(dp_id)); SAMRAI::pdat::CellData<double> &dp(*dp_ptr); boost::shared_ptr<SAMRAI::pdat::CellData<double> > p_rhs_ptr = boost::dynamic_pointer_cast<SAMRAI::pdat::CellData<double> > (patch->getPatchData(p_rhs_id)); SAMRAI::pdat::CellData<double> &p_rhs(*p_rhs_ptr); boost::shared_ptr<SAMRAI::pdat::SideData<double> > v_ptr = boost::dynamic_pointer_cast<SAMRAI::pdat::SideData<double> > (patch->getPatchData(v_id)); SAMRAI::pdat::SideData<double> &v(*v_ptr); boost::shared_ptr<SAMRAI::pdat::CellData<double> > cell_visc_ptr = boost::dynamic_pointer_cast<SAMRAI::pdat::CellData<double> > (patch->getPatchData(cell_viscosity_id)); SAMRAI::pdat::CellData<double> &cell_viscosity(*cell_visc_ptr); boost::shared_ptr<SAMRAI::pdat::NodeData<double> > edge_visc_ptr = boost::dynamic_pointer_cast<SAMRAI::pdat::NodeData<double> > (patch->getPatchData(edge_viscosity_id)); SAMRAI::pdat::NodeData<double> &edge_viscosity(*edge_visc_ptr); SAMRAI::hier::Box pbox=patch->getBox(); boost::shared_ptr<SAMRAI::geom::CartesianPatchGeometry> geom = boost::dynamic_pointer_cast<SAMRAI::geom::CartesianPatchGeometry> (patch->getPatchGeometry()); double dx = geom->getDx()[0]; double dy = geom->getDx()[1]; SAMRAI::pdat::CellIterator cend(SAMRAI::pdat::CellGeometry::end(pbox)); for(SAMRAI::pdat::CellIterator ci(SAMRAI::pdat::CellGeometry::begin(pbox)); ci!=cend; ++ci) { const SAMRAI::pdat::CellIndex ¢er(*ci); const SAMRAI::pdat::SideIndex x(center,0,SAMRAI::pdat::SideIndex::Lower), y(center,1,SAMRAI::pdat::SideIndex::Lower); /* Update p */ double dvx_dx=(v(x+ip) - v(x))/dx; double dvy_dy=(v(y+jp) - v(y))/dy; double delta_R_continuity= p_rhs(center) - dvx_dx - dvy_dy; /* No scaling here, though there should be. */ maxres=std::max(maxres,std::fabs(delta_R_continuity)); dp(center)=delta_R_continuity*theta_continuity /Stokes_dRc_dp_2D(pbox,center,x,y,cell_viscosity,edge_viscosity,v,dx,dy); p(center)+=dp(center); } } set_boundaries(p_id,invalid_id,level,true); /* fix v sweep */ xeqScheduleGhostFillNoCoarse(dp_id,invalid_id,ln); for (SAMRAI::hier::PatchLevel::Iterator pi(level->begin()); pi!=level->end(); ++pi) { boost::shared_ptr<SAMRAI::hier::Patch> patch = *pi; boost::shared_ptr<SAMRAI::pdat::CellData<double> > dp_ptr = boost::dynamic_pointer_cast<SAMRAI::pdat::CellData<double> > (patch->getPatchData(dp_id)); SAMRAI::pdat::CellData<double> &dp(*dp_ptr); boost::shared_ptr<SAMRAI::pdat::SideData<double> > v_ptr = boost::dynamic_pointer_cast<SAMRAI::pdat::SideData<double> > (patch->getPatchData(v_id)); SAMRAI::pdat::SideData<double> &v(*v_ptr); boost::shared_ptr<SAMRAI::pdat::CellData<double> > cell_visc_ptr = boost::dynamic_pointer_cast<SAMRAI::pdat::CellData<double> > (patch->getPatchData(cell_viscosity_id)); SAMRAI::pdat::CellData<double> &cell_viscosity(*cell_visc_ptr); boost::shared_ptr<SAMRAI::pdat::NodeData<double> > edge_visc_ptr = boost::dynamic_pointer_cast<SAMRAI::pdat::NodeData<double> > (patch->getPatchData(edge_viscosity_id)); SAMRAI::pdat::NodeData<double> &edge_viscosity(*edge_visc_ptr); SAMRAI::hier::Box pbox=patch->getBox(); boost::shared_ptr<SAMRAI::geom::CartesianPatchGeometry> geom = boost::dynamic_pointer_cast<SAMRAI::geom::CartesianPatchGeometry> (patch->getPatchGeometry()); double dx = geom->getDx()[0]; double dy = geom->getDx()[1]; pbox.growUpper(SAMRAI::hier::IntVector::getOne(d_dim)); SAMRAI::pdat::CellIterator cend(SAMRAI::pdat::CellGeometry::end(pbox)); for(SAMRAI::pdat::CellIterator ci(SAMRAI::pdat::CellGeometry::begin(pbox)); ci!=cend; ++ci) { const SAMRAI::pdat::CellIndex ¢er(*ci); const SAMRAI::pdat::SideIndex x(center,0,SAMRAI::pdat::SideIndex::Lower), y(center,1,SAMRAI::pdat::SideIndex::Lower); const SAMRAI::pdat::NodeIndex edge(center,SAMRAI::pdat::NodeIndex::LowerLeft); /* Update v */ if(center[1]<pbox.upper(1)) { if(!((center[0]==pbox.lower(0) && v(x-ip)==boundary_value) || (center[0]==pbox.upper(0) && v(x+ip)==boundary_value))) v(x)+=(dp(center) - dp(center-ip)) /(dx*Stokes_dRm_dv_2D(cell_viscosity,edge_viscosity,center, center-ip,edge+jp,edge,dx,dy)); } if(center[0]<pbox.upper(0)) { if(!((center[1]==pbox.lower(1) && v(y-jp)==boundary_value) || (center[1]==pbox.upper(1) && v(y+jp)==boundary_value))) v(y)+=(dp(center) - dp(center-jp)) /(dy*Stokes_dRm_dv_2D(cell_viscosity,edge_viscosity,center, center-jp,edge+ip,edge,dy,dx)); } } } set_boundaries(invalid_id,v_id,level,true); // if (residual_tolerance >= 0.0) { /* * Check for early end of sweeps due to convergence * only if it is numerically possible (user gave a * non negative value for residual tolerance). */ converged = maxres < residual_tolerance; const SAMRAI::tbox::SAMRAI_MPI& mpi(d_hierarchy->getMPI()); int tmp= converged ? 1 : 0; if (mpi.getSize() > 1) { mpi.AllReduce(&tmp, 1, MPI_MIN); } converged=(tmp==1); // if (d_enable_logging) // SAMRAI::tbox::plog // // << d_object_name << "\n" // << "Tackley " << ln << " " << sweep << " : " << maxres << "\n"; // } } }
/** * @brief Class::containsFields * @param section * @return */ bool Class::containsFields(Section section) const { return range::find_if(m_Fields, [&](auto &&f){ return f->section() == section; }) != cend(m_Fields); }
void ModuleMgr::startUpdate() { // Clean up before update _failedUnits.clear(); _downloadUnits.clear(); _compressedFiles.clear(); _totalWaitToDownload = _totalToDownload = 0; _percent = _percentByFile = _sizeCollected = _totalSize = 0; _downloadedSize.clear(); _totalEnabled = false; std::unordered_map<std::string, ModuleManifest::AssetDiff> diff_map = _remoteManifest->genDiff(); if (diff_map.size() == 0) { _updateState = State::UP_TO_DATE; // Rename temporary manifest to valid manifest _fileUtils->removeFile(_remoteManifestPath); dispatchUpdateEvent(ModuleMgrEvent::EventCode::ALREADY_UP_TO_DATE); } else { if (false == _waitToUpdate) { _updateState = State::NEED_UPDATE; dispatchUpdateEvent(ModuleMgrEvent::EventCode::NEW_VERSION_FOUND); return; } _updateState = State::UPDATING; // Generate download units for all assets that need to be updated or added std::string packageUrl = _remoteManifest->getPackageUrl(); for (auto it = diff_map.begin(); it != diff_map.end(); ++it) { ModuleManifest::AssetDiff diff = it->second; if (diff.type == ModuleManifest::DiffType::DELETED) { _fileUtils->removeFile(_storagePath + diff.asset.path); } else { std::string path = diff.asset.path; // Create path _fileUtils->createDirectory(basename(_storagePath + path)); ModuleDownloader::DownloadUnit unit; unit.customId = it->first; unit.srcUrl = packageUrl + path; unit.storagePath = _storagePath + path; unit.resumeDownload = false; _downloadUnits.emplace(unit.customId, unit); } } // Set other assets' downloadState to SUCCESSED auto assets = _remoteManifest->getAssets(); for (auto it = assets.cbegin(); it != assets.cend(); ++it) { const std::string &key = it->first; auto diffIt = diff_map.find(key); if (diffIt == diff_map.end()) { _remoteManifest->setAssetDownloadState(key, ModuleManifest::DownloadState::SUCCESSED); } } _totalWaitToDownload = _totalToDownload = (int)_downloadUnits.size(); _downloader->batchDownloadAsync(_downloadUnits, BATCH_UPDATE_ID); std::string msg = StringUtils::format("Start to update %d files from remote package.", _totalToDownload); dispatchUpdateEvent(ModuleMgrEvent::EventCode::UPDATE_PROGRESSION, "", msg); } _waitToUpdate = false; }
void ItemSerializer::toGeoJson(std::ostream& out, const sserialize::Static::spatial::GeoShape& gs) const { sserialize::spatial::GeoShapeType gst = gs.type(); out << "{\"type\":\""; switch(gst) { case sserialize::spatial::GS_POINT: out << "Point"; break; case sserialize::spatial::GS_WAY: out << "LineString"; break; case sserialize::spatial::GS_POLYGON: out << "Polygon"; break; case sserialize::spatial::GS_MULTI_POLYGON: out << "MultiPolygon"; break; default: break; } out << "\",\"coordinates\":"; switch(gst) { case sserialize::spatial::GS_POINT: { auto gp = gs.get<sserialize::spatial::GS_POINT>(); out << "[" << gp->lon() << "," << gp->lat() << "]"; break; } case sserialize::spatial::GS_WAY: { auto gw = gs.get<sserialize::spatial::GS_WAY>(); out << "["; toGeoJson(out, gw->cbegin(), gw->cend()); out << "]"; break; } case sserialize::spatial::GS_POLYGON: { auto gw = gs.get<sserialize::spatial::GS_POLYGON>(); out << "[["; toGeoJson(out, gw->cbegin(), gw->cend()); out << "]]"; break; } case sserialize::spatial::GS_MULTI_POLYGON: { auto gmw = gs.get<sserialize::spatial::GS_MULTI_POLYGON>(); out << "["; toGeoJson(out, gmw->outerPolygons().cbegin(), gmw->outerPolygons().cend()); if (gmw->innerPolygons().size()) { out << ","; toGeoJson(out, gmw->innerPolygons().cbegin(), gmw->innerPolygons().cend()); } out << "]"; break; } default: break; } out << '}'; }
bool GetFileFormat(api::File& file, uintptr_t& nCodePage, bool* pSignatureFound, bool bUseHeuristics) { DWORD dwTemp=0; bool bSignatureFound = false; bool bDetect=false; DWORD Readed = 0; if (file.Read(&dwTemp, sizeof(dwTemp), Readed) && Readed > 1 ) // minimum signature size is 2 bytes { if (LOWORD(dwTemp) == SIGN_UNICODE) { nCodePage = CP_UNICODE; file.SetPointer(2, nullptr, FILE_BEGIN); bSignatureFound = true; } else if (LOWORD(dwTemp) == SIGN_REVERSEBOM) { nCodePage = CP_REVERSEBOM; file.SetPointer(2, nullptr, FILE_BEGIN); bSignatureFound = true; } else if ((dwTemp & 0x00FFFFFF) == SIGN_UTF8) { nCodePage = CP_UTF8; file.SetPointer(3, nullptr, FILE_BEGIN); bSignatureFound = true; } else { file.SetPointer(0, nullptr, FILE_BEGIN); } } if (bSignatureFound) { bDetect = true; } else if (bUseHeuristics) { file.SetPointer(0, nullptr, FILE_BEGIN); DWORD Size=0x8000; // BUGBUG. TODO: configurable char_ptr Buffer(Size); DWORD ReadSize = 0; bool ReadResult = file.Read(Buffer.get(), Size, ReadSize); file.SetPointer(0, nullptr, FILE_BEGIN); if (ReadResult && ReadSize) { int test= IS_TEXT_UNICODE_STATISTICS| IS_TEXT_UNICODE_REVERSE_STATISTICS| IS_TEXT_UNICODE_CONTROLS| IS_TEXT_UNICODE_REVERSE_CONTROLS| IS_TEXT_UNICODE_ILLEGAL_CHARS| IS_TEXT_UNICODE_ODD_LENGTH| IS_TEXT_UNICODE_NULL_BYTES; if (IsTextUnicode(Buffer.get(), ReadSize, &test)) { if (!(test&IS_TEXT_UNICODE_ODD_LENGTH) && !(test&IS_TEXT_UNICODE_ILLEGAL_CHARS)) { if ((test&IS_TEXT_UNICODE_NULL_BYTES) || (test&IS_TEXT_UNICODE_CONTROLS) || (test&IS_TEXT_UNICODE_REVERSE_CONTROLS)) { if ((test&IS_TEXT_UNICODE_CONTROLS) || (test&IS_TEXT_UNICODE_STATISTICS)) { nCodePage=CP_UNICODE; bDetect=true; } else if ((test&IS_TEXT_UNICODE_REVERSE_CONTROLS) || (test&IS_TEXT_UNICODE_REVERSE_STATISTICS)) { nCodePage=CP_REVERSEBOM; bDetect=true; } } } } else if (IsTextUTF8(Buffer.get(), ReadSize)) { nCodePage=CP_UTF8; bDetect=true; } else { int cp = GetCpUsingUniversalDetector(Buffer.get(), ReadSize); if ( cp >= 0 ) { if (Global->Opt->strNoAutoDetectCP.Get() == L"-1") { if ( Global->Opt->CPMenuMode ) { if ( static_cast<UINT>(cp) != GetACP() && static_cast<UINT>(cp) != GetOEMCP() ) { long long selectType = Global->CodePages->GetFavorite(cp); if (0 == (selectType & CPST_FAVORITE)) cp = -1; } } } else { const auto BannedCpList = StringToList(Global->Opt->strNoAutoDetectCP, STLF_UNIQUE); if (std::find(ALL_CONST_RANGE(BannedCpList), std::to_wstring(cp)) != BannedCpList.cend()) { cp = -1; } } } if (cp != -1) { nCodePage = cp; bDetect = true; } } } } if (pSignatureFound) { *pSignatureFound = bSignatureFound; } return bDetect; }
void CPathProductor::GetLaneData(void) { DBLane dbLane; auto vecLane = dbLane.GetLaneTable(1); set<int> setPoint; for (auto it = vecLane.cbegin(); it != vecLane.cend(); ++it) { //printf("Lane: %d, s: %d e: %d, p: %d n: %d, f: %d, t: %d, l: %d\r\n", // it->nID, it->nStart, it->nEnd, it->nPrevLane, it->nNextLane, it->nNextFork, it->nType, // it->nLength); setPoint.insert(it->nStart); setPoint.insert(it->nEnd); mapLane[it->nID] = *it; } m_arrayBarCode.clear(); m_arrayLocation.clear(); int nIndex = 0; for(auto it = setPoint.cbegin(); it != setPoint.cend(); ++it) { int nBarCode = *it; m_arrayBarCode.push_back(nBarCode); location loc; loc.x = nBarCode; loc.y = 10; m_arrayLocation.push_back(loc); mapPoint[nBarCode] = nIndex; nIndex++; } m_arrayEdge.clear(); m_arrayWeights.clear(); int nLoopNodeStart = -1; int nLoopNodeEnd = -1; for (auto it = vecLane.cbegin(); it != vecLane.cend(); ++it) { int nIndexStart = 0; int nIndexEnd = 0; auto ms = mapPoint.find(it->nStart); auto me = mapPoint.find(it->nEnd); if (ms != mapPoint.cend() && me != mapPoint.cend()) { nIndexStart = ms->second; nIndexEnd = me->second; m_arrayEdge.push_back(lane_edge(nIndexStart, nIndexEnd)); m_arrayWeights.push_back(it->nLength); // todo: find nLoopNodeStart, nLoopNodeEnd auto itNextLane = mapLane.find(it->nNextLane); if ( itNextLane != mapLane.cend()) { // the end point of the last lane don't equal the start point of the first lane. // but the two points will be the same point. if (it->nEnd != itNextLane->second.nStart) { nLoopNodeStart = nIndexEnd; } // the first lane have not prev lane, the prevlane id is -1 else if(it->nPrevLane == -1) { nLoopNodeEnd = nIndexStart; } } } } // add virtual lane to complete the rail to a loop if (nLoopNodeStart >= 0 && nLoopNodeEnd >= 0) { m_arrayEdge.push_back(lane_edge(nLoopNodeStart, nLoopNodeEnd)); m_arrayWeights.push_back(0); } }