/** \brief Return a vector of absolute paths to files in the given path * * @param[in] path relative or absolute directory (searched recursively) * @return Any regular files in * @return if absolute directory: path * @return if relative directory: GetResourceDir() / path */ std::vector<fs::path> ListDir(const fs::path& path) { std::vector<fs::path> retval; bool is_rel = path.is_relative(); if (!is_rel && (fs::is_empty(path) || !fs::is_directory(path))) { DebugLogger() << "ListDir: File " << PathToString(path) << " was not included as it is empty or not a directoy"; } else { const fs::path& default_path = is_rel ? GetResourceDir() / path : path; for (fs::recursive_directory_iterator dir_it(default_path); dir_it != fs::recursive_directory_iterator(); ++dir_it) { if (fs::is_regular_file(dir_it->status())) { retval.push_back(dir_it->path()); } else if (!fs::is_directory(dir_it->status())) { TraceLogger() << "Parse: Unknown file not included: " << PathToString(dir_it->path()); } } } if (retval.empty()) { DebugLogger() << "ListDir: No files found for " << path.string(); } return retval; }
std::vector< std::string > getImagesList(const char *dir_path) { std::vector< std::string > ret; boost::filesystem::path dir(dir_path); if(!exists(dir)) { std::cout << "O diretório de entrada não existe." << std::endl; return ret; } boost::filesystem::recursive_directory_iterator dir_it(dir); boost::filesystem::recursive_directory_iterator end_it; while(dir_it != end_it) { if(is_regular_file((*dir_it).path())) { if((*dir_it).path().filename().string() != ".gitignore") { ret.push_back((*dir_it).path().string()); } } ++dir_it; } std::random_shuffle(ret.begin(), ret.end()); return ret; }
ImageSequenceAsset::ImageSequenceAsset( const char* dir, bool load_to_memory ) : VideoAsset( dir ) , m_DirectoryName( dir ) , m_MaxMemUsed( 0 ) , m_CacheFrames(load_to_memory) , m_IsOpen(false) { m_Format.m_FrameTime = 1000.0/FPS_BASE; bfs::path anim_path( bfs::system_complete( bfs::path( dir ) ) ); if ( bfs::exists( anim_path ) && bfs::is_directory( anim_path ) ) { // get the directory name and call the resource according to it m_ResourceName = anim_path.stem(); bfs::directory_iterator dir_end; bfs::directory_iterator dir_it( anim_path ); while ( dir_it != dir_end ) { try { if ( bfs::is_regular( dir_it->status() )) { std::string file_string = dir_it->path().file_string(); m_FileNames.push_back( file_string ); } } catch ( const std::exception& ex ) { std::cerr << "Exception: " << ex.what() << std::endl; } dir_it++; } if ( m_FileNames.size() > 0 ) { std::sort( m_FileNames.begin(), m_FileNames.end() ); } } }
void StorageSetOrJoinBase::restore() { Poco::File tmp_dir(path + "tmp/"); if (!tmp_dir.exists()) { tmp_dir.createDirectories(); return; } static const auto file_suffix = ".bin"; static const auto file_suffix_size = strlen(".bin"); Poco::DirectoryIterator dir_end; for (Poco::DirectoryIterator dir_it(path); dir_end != dir_it; ++dir_it) { const auto & name = dir_it.name(); if (dir_it->isFile() && endsWith(name, file_suffix) && dir_it->getSize() > 0) { /// Calculate the maximum number of available files with a backup to add the following files with large numbers. UInt64 file_num = parse<UInt64>(name.substr(0, name.size() - file_suffix_size)); if (file_num > increment) increment = file_num; restoreFromFile(dir_it->path()); } } }
bool AlarmTableDefs::initialize(std::string& path) { std::map<unsigned int, unsigned int> dup_check; _key_to_def.clear(); bool rc = true; boost::filesystem::path p(path); if ((boost::filesystem::exists(p)) && (boost::filesystem::is_directory(p))) { boost::filesystem::directory_iterator dir_it(p); boost::filesystem::directory_iterator end_it; while ((dir_it != end_it) && (rc)) { if ((boost::filesystem::is_regular_file(dir_it->status())) && (dir_it->path().extension() == ".json")) { rc = populate_map(dir_it->path().c_str(), dup_check); } dir_it++; } } else { TRC_ERROR("Unable to open directory at %s", path.c_str()); rc = false; } return rc; }
void StorageSetOrJoinBase::restore() { Poco::File tmp_dir(path + "tmp/"); if (!tmp_dir.exists()) { tmp_dir.createDirectories(); return; } static const auto file_suffix = ".bin"; static const auto file_suffix_size = strlen(".bin"); Poco::DirectoryIterator dir_end; for (Poco::DirectoryIterator dir_it(path); dir_end != dir_it; ++dir_it) { const auto & name = dir_it.name(); if (dir_it->isFile() && endsWith(name, file_suffix) && dir_it->getSize() > 0) { /// Вычисляем максимальный номер имеющихся файлов с бэкапом, чтобы добавлять следующие файлы с большими номерами. UInt64 file_num = parse<UInt64>(name.substr(0, name.size() - file_suffix_size)); if (file_num > increment) increment = file_num; restoreFromFile(dir_it->path()); } } }
std::vector<std::string> FileUtility::getFilesFromDir(boost::filesystem::path dir) { std::vector<std::string> files; boost::filesystem::directory_iterator dir_it(dir); while (dir_it != boost::filesystem::directory_iterator()) { if (boost::filesystem::is_regular_file(*dir_it)) files.push_back(dir_it->filename()); ++dir_it; } return files; }
unsigned int violet::FileUtility::getFilesCountFromDir(boost::filesystem::path dir) { unsigned int count = 0; boost::filesystem::directory_iterator dir_it(dir); while (dir_it != boost::filesystem::directory_iterator()) { if (boost::filesystem::is_regular_file(*dir_it)) ++count; ++dir_it; } return count; }
unsigned int violet::FileUtility::getSubDirsCountFromDir(boost::filesystem::path dir) { boost::filesystem::directory_iterator dir_it(dir); unsigned int count = 0; while (dir_it != boost::filesystem::directory_iterator()) { if (boost::filesystem::is_directory(*dir_it)) if (dir_it->path().filename().string()[0] != '.') ++count; ++dir_it; } return count; }
std::vector<std::string> violet::FileUtility::getSubDirsFromDir(boost::filesystem::path dir) { std::vector<std::string> subDirs; boost::filesystem::directory_iterator dir_it(dir); while (dir_it != boost::filesystem::directory_iterator()) { if (boost::filesystem::is_directory(*dir_it)) if (dir_it->path().filename().string()[0] != '.') subDirs.push_back(dir_it->path().filename().string()); ++dir_it; } return subDirs; }
void rescaleBoundaryHists(std::string infile, int numSamples=-1){ TFile* f = new TFile(infile.c_str(), "UPDATE"); TDirectory* dir = 0; TIter dir_it(f->GetListOfKeys()); TKey* dir_k; while ((dir_k = (TKey *)dir_it())) { if (TString(dir_k->GetClassName()) != "TDirectoryFile") continue; std::string dir_name = std::string(dir_k->GetTitle()); if(dir_name == "") continue; dir = (TDirectory*)dir_k->ReadObj(); if(dir == 0) continue; TIter hist_it(dir->GetListOfKeys(), kIterBackward); TKey* hist_k; while ((hist_k = (TKey *)hist_it())) { std::string hist_name = (hist_k->GetTitle()); if (hist_name.find("_HI") != std::string::npos || hist_name.find("_LOW") != std::string::npos || hist_name.find("h_n_mt2bins") != std::string::npos) { TH1* h = (TH1*)hist_k->ReadObj(); if(numSamples==-1) h->Scale(1.0/h->GetEntries()); else h->Scale(1.0/numSamples); dir->cd(); h->Write("",TObject::kOverwrite); } } } delete dir; gDirectory->GetList()->Delete(); f->Write("",TObject::kOverwrite); f->Close(); delete f; }
Compiler::Compiler(const std::string & path_, size_t threads) : path(path_), pool(threads) { Poco::File(path).createDirectory(); Poco::DirectoryIterator dir_end; for (Poco::DirectoryIterator dir_it(path); dir_end != dir_it; ++dir_it) { std::string name = dir_it.name(); if (endsWith(name, ".so")) { files.insert(name.substr(0, name.size() - 3)); } } LOG_INFO(log, "Having " << files.size() << " compiled files from previous start."); }
std::vector<std::string>CListFiles::ListFiltredFiles(std::string dir, std::string extention) { std::vector<std::string> retval; boost::filesystem::directory_iterator end_it; boost::filesystem::path path = dir; try { if(boost::filesystem::exists(path) && boost::filesystem::is_directory(path)) { for(boost::filesystem::directory_iterator dir_it(path); dir_it != end_it; ++dir_it) { if(dir_it->path().extension() == extention) { retval.push_back(dir_it->path().filename().string()); } } } else { std::cout << "Does not exist..." << std::endl; } } catch(const boost::filesystem::filesystem_error& ex) { std::cout << ex.what() << std::endl; } return retval; }
void PluginManager::load_libraries() { fs::path path("/home/armin/src/shared_lib_demo/plugins/"); if (!fs::exists(path) || !fs::is_directory(path)) { std::cerr << path.leaf() << "does not exist or " << "is not a directory." << std::endl; return; } fs::directory_iterator dir_it(path), dir_it_end; for (dir_it; dir_it != dir_it_end; dir_it++) { if (fs::is_symlink(*dir_it)) { //std::cerr << (*dir_it).leaf() << " is a symlink\n"; std::string file_path = (*dir_it).leaf(); if (!load_plugin(file_path)) { return; } } } }
int main( int argc, char **argv ) { if( argc <= 0 ) return EXIT_FAILURE; if( argc != 2 && argc != 3 ) { fcppt::io::cerr() << FCPPT_TEXT("Usage: ") << fcppt::from_std_string( argv[0] ) << FCPPT_TEXT(" <build_dir> [src_dir]\n"); return EXIT_FAILURE; } boost::filesystem::path const build_dir( fcppt::from_std_string( argv[1] ) ); boost::filesystem::path const temp_dir( build_dir / FCPPT_TEXT("temp_make_headers") ); boost::filesystem::path const make_file( temp_dir / FCPPT_TEXT("Makefile") ); boost::filesystem::path const log_file( temp_dir / FCPPT_TEXT("log.txt") ); boost::filesystem::remove( log_file ); fcppt::string const source_subdir( argc == 3 ? fcppt::from_std_string( argv[2] ) : FCPPT_TEXT("src") ); if( !boost::filesystem::exists( temp_dir ) && !boost::filesystem::create_directory( temp_dir ) ) { fcppt::io::cerr() << FCPPT_TEXT("Cannot create ") << temp_dir << FCPPT_TEXT('\n'); return EXIT_FAILURE; } for( boost::filesystem::recursive_directory_iterator dir_it( FCPPT_TEXT("include") ), end_it; dir_it != end_it; ++dir_it ) { boost::filesystem::path const &path( dir_it->path() ); if( fcppt::filesystem::extension( path ) != FCPPT_TEXT(".hpp") ) continue; if( std::find( path.begin(), path.end(), fcppt::string( FCPPT_TEXT("impl") ) ) != path.end() ) continue; boost::filesystem::path::iterator path_it( path.begin() ); if( path_it == path.end() ) continue; // skip include/ ++path_it; if( path_it == path.end() ) continue; boost::filesystem::path const include_file( ::make_path_from_iter( path_it, path.end() ) ); // skip project name ++path_it; if( path_it == path.end() ) continue; // descend into the build dir as far as possible boost::filesystem::path make_path( build_dir / source_subdir ); while( boost::filesystem::exists( make_path ) ) make_path /= *path_it++; make_path = make_path.parent_path() / FCPPT_TEXT("CMakeFiles"); if( !boost::filesystem::exists( make_path ) ) { fcppt::io::cerr() << make_path << FCPPT_TEXT(" does not exist.\n"); continue; } optional_path const cmake_dir( ::find_cmake_dir( make_path ) ); if( !cmake_dir ) { fcppt::io::cerr() << FCPPT_TEXT("No *.dir found in ") << make_path << FCPPT_TEXT('\n'); continue; } boost::filesystem::path const flags_file( *cmake_dir / FCPPT_TEXT("flags.make") ); if( !boost::filesystem::exists( flags_file ) ) { fcppt::io::cerr() << flags_file << FCPPT_TEXT(" does not exist!\n"); continue; } boost::filesystem::path const source_file( temp_dir / boost::algorithm::replace_all_copy( fcppt::filesystem::path_to_string( fcppt::filesystem::replace_extension( include_file, FCPPT_TEXT("cpp") ) ), FCPPT_TEXT("/"), FCPPT_TEXT("_") ) ); if( !::write_file( make_file, FCPPT_TEXT("include ") + fcppt::filesystem::path_to_string( flags_file ) + FCPPT_TEXT("\n\n") + FCPPT_TEXT("check-syntax:\n") + FCPPT_TEXT("\tg++ -o /dev/null ${CXX_FLAGS} ${CXX_DEFINES} -S ") + fcppt::filesystem::path_to_string( source_file ) + FCPPT_TEXT(" -fsyntax-only\n\n") + FCPPT_TEXT(".PHONY: check-syntax*/\n") ) ) return EXIT_FAILURE; if( !::write_file( source_file, FCPPT_TEXT("#include <") + fcppt::filesystem::path_to_string( include_file ) + FCPPT_TEXT(">\n") ) ) return EXIT_FAILURE; int const system_ret( std::system( ( std::string( "make -f " ) + fcppt::to_std_string( fcppt::filesystem::path_to_string( make_file ) ) + " 2>>" + fcppt::to_std_string( fcppt::filesystem::path_to_string( log_file ) ) + " 1>/dev/null" ).c_str() ) ); FCPPT_PP_PUSH_WARNING FCPPT_PP_DISABLE_GCC_WARNING(-Wcast-qual) FCPPT_PP_DISABLE_GCC_WARNING(-Wold-style-cast) if( system_ret == -1 || ( WIFSIGNALED( system_ret ) && ( WTERMSIG( system_ret ) == SIGINT || WTERMSIG( system_ret ) == SIGQUIT ) ) ) return EXIT_FAILURE; FCPPT_PP_POP_WARNING } }
int main( int argc, char **argv ) try { if( argc != 2 ) { fcppt::io::cerr() << FCPPT_TEXT("Usage: ") << argv[0] << FCPPT_TEXT(" <dir1>\n"); return EXIT_FAILURE; } boost::filesystem::path const dir( fcppt::from_std_string( argv[1] ) ); for( boost::filesystem::recursive_directory_iterator dir_it( dir ), dir_end; dir_it != dir_end; ++dir_it ) { if( !needs_header( dir_it->path() ) ) continue; boost::filesystem::path const header( make_header( dir_it->path() ) ); fcppt::filesystem::ofstream file( header, std::ios_base::trunc ); string_vector filenames; if( !file.is_open() ) { fcppt::io::cerr() << FCPPT_TEXT("Failed to open ") << header << FCPPT_TEXT('\n'); return EXIT_FAILURE; } for( boost::filesystem::directory_iterator file_it( dir_it->path() ), file_end; file_it != file_end; ++file_it ) { if( file_it->path() == header ) continue; if( needs_header( file_it->path() ) ) filenames.push_back( fcppt::filesystem::path_to_string( make_header( file_it->path() ) ) ); else if( !boost::filesystem::is_directory( file_it->path() ) ) filenames.push_back( fcppt::filesystem::path_to_string( file_it->path() ) ); } std::sort( filenames.begin(), filenames.end() ); fcppt::string const include_guard_name( make_include_guard( header ) ); file << FCPPT_TEXT("#ifndef ") << include_guard_name << FCPPT_TEXT("\n#define ") << include_guard_name << FCPPT_TEXT("\n\n"); for( string_vector::const_iterator cur_name( filenames.begin() ); cur_name != filenames.end(); ++cur_name ) file << FCPPT_TEXT("#include <") << *cur_name << FCPPT_TEXT(">\n"); file << FCPPT_TEXT("\n#endif\n"); } } catch( fcppt::exception const &_error ) { fcppt::io::cerr() << _error.string() << FCPPT_TEXT('\n'); return EXIT_FAILURE; } catch( std::exception const &_error ) { std::cerr << _error.what() << '\n'; return EXIT_FAILURE; }
void DatabaseOrdinary::loadTables(Context & context, boost::threadpool::pool * thread_pool) { log = &Logger::get("DatabaseOrdinary (" + name + ")"); using FileNames = std::vector<std::string>; FileNames file_names; Poco::DirectoryIterator dir_end; for (Poco::DirectoryIterator dir_it(path); dir_it != dir_end; ++dir_it) { /// Для директории .svn и файла .gitignore if (dir_it.name().at(0) == '.') continue; /// Есть файлы .sql.bak - пропускаем. if (endsWith(dir_it.name(), ".sql.bak")) continue; /// Есть файлы .sql.tmp - удаляем. if (endsWith(dir_it.name(), ".sql.tmp")) { LOG_INFO(log, "Removing file " << dir_it->path()); Poco::File(dir_it->path()).remove(); continue; } /// Нужные файлы имеют имена вида table_name.sql if (endsWith(dir_it.name(), ".sql")) file_names.push_back(dir_it.name()); else throw Exception("Incorrect file extension: " + dir_it.name() + " in metadata directory " + path, ErrorCodes::INCORRECT_FILE_NAME); } /** Таблицы быстрее грузятся, если их грузить в сортированном (по именам) порядке. * Иначе (для файловой системы ext4) DirectoryIterator перебирает их в некотором порядке, * который не соответствует порядку создания таблиц и не соответствует порядку их расположения на диске. */ std::sort(file_names.begin(), file_names.end()); size_t total_tables = file_names.size(); LOG_INFO(log, "Total " << total_tables << " tables."); String data_path = context.getPath() + "/data/" + escapeForFileName(name) + "/"; StopwatchWithLock watch; size_t tables_processed = 0; auto task_function = [&](FileNames::const_iterator begin, FileNames::const_iterator end) { for (FileNames::const_iterator it = begin; it != end; ++it) { const String & table = *it; /// Сообщения, чтобы было не скучно ждать, когда сервер долго загружается. if (__sync_add_and_fetch(&tables_processed, 1) % PRINT_MESSAGE_EACH_N_TABLES == 0 || watch.lockTestAndRestart(PRINT_MESSAGE_EACH_N_SECONDS)) { LOG_INFO(log, std::fixed << std::setprecision(2) << tables_processed * 100.0 / total_tables << "%"); watch.restart(); } loadTable(context, path, *this, name, data_path, table); } }; /** packaged_task используются, чтобы исключения автоматически прокидывались в основной поток. * Недостаток - исключения попадают в основной поток только после окончания работы всех task-ов. */ const size_t bunch_size = TABLES_PARALLEL_LOAD_BUNCH_SIZE; size_t num_bunches = (total_tables + bunch_size - 1) / bunch_size; std::vector<std::packaged_task<void()>> tasks(num_bunches); for (size_t i = 0; i < num_bunches; ++i) { auto begin = file_names.begin() + i * bunch_size; auto end = (i + 1 == num_bunches) ? file_names.end() : (file_names.begin() + (i + 1) * bunch_size); tasks[i] = std::packaged_task<void()>(std::bind(task_function, begin, end)); if (thread_pool) thread_pool->schedule([i, &tasks]{ tasks[i](); }); else tasks[i](); } if (thread_pool) thread_pool->wait(); for (auto & task : tasks) task.get_future().get(); }
void DatabaseOrdinary::loadTables( Context & context, ThreadPool * thread_pool, bool has_force_restore_data_flag) { using FileNames = std::vector<std::string>; FileNames file_names; Poco::DirectoryIterator dir_end; for (Poco::DirectoryIterator dir_it(metadata_path); dir_it != dir_end; ++dir_it) { /// For '.svn', '.gitignore' directory and similar. if (dir_it.name().at(0) == '.') continue; /// There are .sql.bak files - skip them. if (endsWith(dir_it.name(), ".sql.bak")) continue; /// There are files .sql.tmp - delete. if (endsWith(dir_it.name(), ".sql.tmp")) { LOG_INFO(log, "Removing file " << dir_it->path()); Poco::File(dir_it->path()).remove(); continue; } /// The required files have names like `table_name.sql` if (endsWith(dir_it.name(), ".sql")) file_names.push_back(dir_it.name()); else throw Exception("Incorrect file extension: " + dir_it.name() + " in metadata directory " + metadata_path, ErrorCodes::INCORRECT_FILE_NAME); } /** Tables load faster if they are loaded in sorted (by name) order. * Otherwise (for the ext4 filesystem), `DirectoryIterator` iterates through them in some order, * which does not correspond to order tables creation and does not correspond to order of their location on disk. */ std::sort(file_names.begin(), file_names.end()); size_t total_tables = file_names.size(); LOG_INFO(log, "Total " << total_tables << " tables."); AtomicStopwatch watch; std::atomic<size_t> tables_processed {0}; auto task_function = [&](FileNames::const_iterator begin, FileNames::const_iterator end) { for (auto it = begin; it != end; ++it) { const String & table = *it; /// Messages, so that it's not boring to wait for the server to load for a long time. if ((++tables_processed) % PRINT_MESSAGE_EACH_N_TABLES == 0 || watch.compareAndRestart(PRINT_MESSAGE_EACH_N_SECONDS)) { LOG_INFO(log, std::fixed << std::setprecision(2) << tables_processed * 100.0 / total_tables << "%"); watch.restart(); } loadTable(context, metadata_path, *this, name, data_path, table, has_force_restore_data_flag); } }; const size_t bunch_size = TABLES_PARALLEL_LOAD_BUNCH_SIZE; size_t num_bunches = (total_tables + bunch_size - 1) / bunch_size; for (size_t i = 0; i < num_bunches; ++i) { auto begin = file_names.begin() + i * bunch_size; auto end = (i + 1 == num_bunches) ? file_names.end() : (file_names.begin() + (i + 1) * bunch_size); auto task = std::bind(task_function, begin, end); if (thread_pool) thread_pool->schedule(task); else task(); } if (thread_pool) thread_pool->wait(); /// After all tables was basically initialized, startup them. startupTables(thread_pool); }