RepoWrapper::RepoWrapper(const char* repoSchema, const std::string& configFile) { if (setenv("HHVM_RUNTIME_REPO_SCHEMA", repoSchema, 1 /* overwrite */)) { fprintf(stderr, "Could not set repo schema"); exit(EXIT_FAILURE); } printf("# Config file: %s\n", configFile.c_str()); printf("# Repo schema: %s\n", repoSchemaId().begin()); register_process_init(); initialize_repo(); hphp_thread_init(); g_context.getCheck(); IniSetting::Map ini = IniSetting::Map::object; Hdf config; if (!configFile.empty()) { Config::ParseConfigFile(configFile, ini, config); // Disable logging to suppress harmless errors about setrlimit. config["Log"]["Level"] = "None"; } RuntimeOption::Load(ini, config); RuntimeOption::RepoCommit = false; compile_file(nullptr, 0, MD5(), nullptr); repo = &Repo::get(); RuntimeOption::AlwaysUseRelativePath = false; RuntimeOption::SafeFileAccess = false; RuntimeOption::EvalAllowHhas = true; Option::WholeProgram = false; LitstrTable::init(); LitstrTable::get().setWriting(); RuntimeOption::RepoAuthoritative = true; repo->loadGlobalData(true /* allowFailure */); std::string hhasLib; auto const phpLib = get_systemlib(&hhasLib); always_assert(!hhasLib.empty() && !phpLib.empty()); auto phpUnit = compile_string(phpLib.c_str(), phpLib.size(), "systemlib.php"); addUnit(phpUnit); auto hhasUnit = compile_string(hhasLib.c_str(), hhasLib.size(), "systemlib.hhas"); addUnit(hhasUnit); SystemLib::s_inited = true; LitstrTable::get().setReading(); }
void preloadRepo() { auto& repo = Repo::get(); auto units = repo.enumerateUnits(RepoIdLocal, true, false); if (units.size() == 0) { units = repo.enumerateUnits(RepoIdCentral, true, false); } if (!units.size()) return; std::vector<std::thread> workers; auto numWorkers = Process::GetCPUCount(); // Compute a batch size that causes each thread to process approximately 16 // batches. Even if the batches are somewhat imbalanced in what they contain, // the straggler workers are very unlikey to take more than 10% longer than // the first worker to finish. size_t batchSize{std::max(units.size() / numWorkers / 16, size_t(1))}; std::atomic<size_t> index{0}; for (auto worker = 0; worker < numWorkers; ++worker) { workers.push_back(std::thread([&] { hphp_thread_init(); hphp_session_init(); hphp_context_init(); while (true) { auto begin = index.fetch_add(batchSize); auto end = std::min(begin + batchSize, units.size()); if (begin >= end) break; auto unitCount = end - begin; for (auto i = size_t{0}; i < unitCount; ++i) { auto& kv = units[begin + i]; try { lookupUnit(String(RuntimeOption::SourceRoot + kv.first).get(), "", nullptr); } catch (...) { // swallow errors silently } } } hphp_context_exit(); hphp_session_exit(); hphp_thread_exit(); })); } for (auto& worker : workers) { worker.join(); } }
void HPHPWorkerThread::setup() { WorkerThread::setup(); hphp_thread_init(); }
/* * This is the entry point for offline bytecode generation. */ void emitAllHHBC(AnalysisResultPtr&& ar) { auto ues = ar->getHhasFiles(); decltype(ues) ues_to_print; auto const outputPath = ar->getOutputPath(); std::thread wp_thread, dispatcherThread; auto unexpectedException = [&] (const char* what) { if (dispatcherThread.joinable()) { Logger::Error("emitAllHHBC exited via an exception " "before dispatcherThread was joined: %s", what); } if (wp_thread.joinable()) { Logger::Error("emitAllHHBC exited via an exception " "before wp_thread was joined: %s", what); } throw; }; try { { SCOPE_EXIT { genText(ues_to_print, outputPath); }; auto commitSome = [&] (decltype(ues)& emitters) { batchCommit(emitters); if (Option::GenerateTextHHBC || Option::GenerateHhasHHBC) { std::move(emitters.begin(), emitters.end(), std::back_inserter(ues_to_print)); } emitters.clear(); }; if (!RuntimeOption::EvalUseHHBBC && ues.size()) { commitSome(ues); } auto commitLoop = [&] { folly::Optional<Timer> commitTime; // kBatchSize needs to strike a balance between reducing // transaction commit overhead (bigger batches are better), and // limiting the cost incurred by failed commits due to identical // units that require rollback and retry (smaller batches have // less to lose). Empirical results indicate that a value in // the 2-10 range is reasonable. static const unsigned kBatchSize = 8; while (auto ue = s_ueq.pop()) { if (!commitTime) { commitTime.emplace(Timer::WallTime, "committing units to repo"); } ues.push_back(std::move(ue)); if (ues.size() == kBatchSize) { commitSome(ues); } } if (ues.size()) commitSome(ues); }; LitstrTable::get().setReading(); ar->finish(); ar.reset(); if (!RuntimeOption::EvalUseHHBBC) { if (Option::GenerateBinaryHHBC) { commitGlobalData(std::unique_ptr<ArrayTypeTable::Builder>{}); } return; } RuntimeOption::EvalJit = false; // For HHBBC to invoke builtins. std::unique_ptr<ArrayTypeTable::Builder> arrTable; wp_thread = std::thread([&] { Timer timer(Timer::WallTime, "running HHBBC"); hphp_thread_init(); hphp_session_init(Treadmill::SessionKind::CompilerEmit); SCOPE_EXIT { hphp_context_exit(); hphp_session_exit(); hphp_thread_exit(); }; HHBBC::whole_program( std::move(ues), s_ueq, arrTable, Option::ParserThreadCount > 0 ? Option::ParserThreadCount : 0); }); commitLoop(); commitGlobalData(std::move(arrTable)); } wp_thread.join(); } catch (std::exception& ex) {