static String HHVM_FUNCTION(server_warmup_status) { // Fail if we jitted more than 25kb of code. size_t begin, end; jit::mcg->codeEmittedThisRequest(begin, end); auto const diff = end - begin; auto constexpr kMaxTCBytes = 25 << 10; if (diff > kMaxTCBytes) { return folly::format("Translation cache grew by {} bytes to {} bytes.", diff, begin).str(); } // Fail if we spent more than 0.5ms in the JIT. auto const jittime = jit::Timer::CounterValue(jit::Timer::translate); auto constexpr kMaxJitTimeNS = 500000; if (jittime.total > kMaxJitTimeNS) { return folly::format("Spent {}us in the JIT.", jittime.total / 1000).str(); } if (!isStandardRequest()) { return "Warmup is still in progress."; } if (requestCount() <= RuntimeOption::EvalJitProfileRequests) { return "PGO profiling translations are still enabled."; } auto tpc_diff = jit::s_perfCounters[jit::tpc_interp_bb] - jit::s_perfCounters[jit::tpc_interp_bb_force]; if (tpc_diff) { return folly::sformat("Interpreted {} non-forced basic blocks.", tpc_diff); } return empty_string(); }
void logLoad( StructuredLogEntry& ent, StringData* path, const char* cwd, String rpath, const CachedUnit& cu ) { ent.setStr("include_path", path->data()); ent.setStr("current_dir", cwd); ent.setStr("resolved_path", rpath.data()); if (auto const u = cu.unit) { const StringData* err; int line; if (u->compileTimeFatal(err, line)) { ent.setStr("result", "compile_fatal"); ent.setStr("error", err->data()); } else if (u->parseFatal(err, line)) { ent.setStr("result", "parse_fatal"); ent.setStr("error", err->data()); } else { ent.setStr("result", "success"); } ent.setStr("sha1", u->sha1().toString()); ent.setStr("repo_sn", folly::to<std::string>(u->sn())); ent.setStr("repo_id", folly::to<std::string>(u->repoID())); ent.setInt("bc_len", u->bclen()); ent.setInt("num_litstrs", u->numLitstrs()); ent.setInt("num_funcs", u->funcs().size()); ent.setInt("num_classes", u->preclasses().size()); ent.setInt("num_type_aliases", u->typeAliases().size()); } else { ent.setStr("result", "file_not_found"); } switch (rl_typeProfileLocals->requestKind) { case RequestKind::Warmup: ent.setStr("request_kind", "warmup"); break; case RequestKind::Standard: ent.setStr("request_kind", "standard"); break; case RequestKind::NonVM: ent.setStr("request_kind", "nonVM"); break; } ent.setInt("request_count", requestCount()); StructuredLog::log("hhvm_unit_cache", ent); }
bool profileFunc(const Func* func) { if (!shouldPGOFunc(func)) return false; // If retranslateAll is enabled and we already passed the point that it should // be scheduled to execute (via the treadmill), then we can't emit more // Profile translations. This is to ensure that, when retranslateAll() runs, // no more Profile translations are being added to ProfData. if (RuntimeOption::EvalJitRetranslateAllRequest != 0 && hasEnoughProfDataToRetranslateAll()) { return false; } if (profData()->optimized(func->getFuncId())) return false; // If we already started profiling `func', then we return true and skip the // other checks below. if (profData()->profiling(func->getFuncId())) return true; // Don't start profiling new functions if the size of either main or // prof is already above Eval.JitAMaxUsage and we already filled hot. auto tcUsage = std::max(code().main().used(), code().prof().used()); if (tcUsage >= CodeCache::AMaxUsage && !code().hotEnabled()) { return false; } // We have two knobs to control the number of functions we're allowed to // profile: Eval.JitProfileRequests and Eval.JitProfileBCSize. We profile new // functions until either of these limits is exceeded. In practice, we expect // to hit the bytecode size limit first, but we keep the request limit around // as a safety net. if (RuntimeOption::EvalJitProfileBCSize > 0 && profData()->profilingBCSize() >= RuntimeOption::EvalJitProfileBCSize) { return false; } return requestCount() <= RuntimeOption::EvalJitProfileRequests; }
bool profileSrcKey(SrcKey sk) { if (!shouldPGOFunc(*sk.func())) return false; if (profData()->optimized(sk.funcID())) return false; if (profData()->profiling(sk.funcID())) return true; // Don't start profiling new functions if the size of either main or // prof is already above Eval.JitAMaxUsage and we already filled hot. auto tcUsage = std::max(code().main().used(), code().prof().used()); if (tcUsage >= CodeCache::AMaxUsage && !code().hotEnabled()) { return false; } // We have two knobs to control the number of functions we're allowed to // profile: Eval.JitProfileRequests and Eval.JitProfileBCSize. We profile new // functions until either of these limits is exceeded. In practice we expect // to hit the bytecode size limit first but we keep the request limit around // as a safety net. if (RuntimeOption::EvalJitProfileBCSize > 0 && profData()->profilingBCSize() >= RuntimeOption::EvalJitProfileBCSize) { return false; } return requestCount() <= RuntimeOption::EvalJitProfileRequests; }
static inline bool doneProfiling() { return requestCount() >= RuntimeOption::EvalJitProfileInterpRequests || (RuntimeOption::ClientExecutionMode() && !RuntimeOption::EvalJitProfileRecord); }