static void checkRFH(int64_t finished) { auto i = nextRFH.load(std::memory_order_relaxed); if (i == rfhBuckets.size() || !StructuredLog::enabled()) { return; } auto const uptime = f_server_uptime(); if (uptime == -1) return; assertx(uptime >= 0); while (i < rfhBuckets.size() && uptime >= rfhBuckets[i]) { assertx(i == 0 || rfhBuckets[i - 1] < rfhBuckets[i]); if (!nextRFH.compare_exchange_strong(i, i + 1, std::memory_order_relaxed)) { // Someone else reported the sample at i. Try again with the current // value of nextRFH. continue; } // "bucket" and "uptime" will always be the same as long as the server // retires at least one request in each second of wall time. StructuredLogEntry cols; cols.setInt("requests", finished); cols.setInt("bucket", rfhBuckets[i]); cols.setInt("uptime", uptime); StructuredLog::log("hhvm_rfh", cols); ++i; } }
/* * If the jit maturity counter is enabled, update it with the current amount of * emitted code. */ void reportJitMaturity(const CodeCache& code) { auto static jitMaturityCounter = ServiceData::createCounter("jit.maturity"); // Optimized translations are faster than profiling translations, which are // faster than the interpreter. But when optimized translations are // generated, some profiling translations will become dead. We assume the // incremental value of an optimized translation over the corresponding // profiling translations is comparable to the incremental value of a // profiling translation of similar size; thus we don't have to apply // different weights to code in different regions. auto const codeSize = code.hot().used() + code.main().used() + code.prof().used(); if (jitMaturityCounter) { // EvalJitMatureSize is supposed to to be set to approximately 20% of the // code that will give us full performance, so recover the "fully mature" // size with some math. auto const fullSize = RuntimeOption::EvalJitMatureSize * 5; auto const after = codeSize >= fullSize ? 100 : (codeSize * 100 / fullSize); auto const before = jitMaturityCounter->getValue(); if (after > before) jitMaturityCounter->setValue(after); } if (!s_loggedJitMature.load(std::memory_order_relaxed) && StructuredLog::enabled() && codeSize >= RuntimeOption::EvalJitMatureSize && !s_loggedJitMature.exchange(true, std::memory_order_relaxed)) { StructuredLogEntry cols; cols.setInt("jit_mature_sec", time(nullptr) - HttpServer::StartTime); StructuredLog::log("hhvm_warmup", cols); } }
void XenonRequestLocalData::log(Xenon::SampleType t, const char* info, c_WaitableWaitHandle* wh) { if (!m_inRequest) return; TRACE(1, "XenonRequestLocalData::log\n"); time_t now = time(nullptr); auto bt = createBacktrace(BacktraceArgs() .skipTop(t == Xenon::EnterSample) .skipInlined(t == Xenon::EnterSample) .fromWaitHandle(wh) .withMetadata() .ignoreArgs()); auto logDest = RuntimeOption::XenonStructLogDest; if (!logDest.empty()) { StructuredLogEntry cols; cols.setStr("type", Xenon::show(t)); if (info) { cols.setStr("info", info); } addBacktraceToStructLog(bt, cols); StructuredLog::log(logDest, cols); } else { m_stackSnapshots.append(make_darray( s_time, now, s_stack, bt, s_isWait, !Xenon::isCPUTime(t), s_type, Xenon::show(t), s_info, info ? info : "" )); } }
void BootStats::done() { if (!s_started) return; s_started = false; auto total = ResourceUsage::sinceEpoch() - s_start; Logger::Info( folly::sformat("BootStats: all done, took {} total", total.toString())); BootStats::s_instance->add("TOTAL", total); BootStats::s_instance->dumpMarks(); if (StructuredLog::enabled()) { std::lock_guard<std::mutex> lock(s_instance->m_marks_guard_); StructuredLogEntry cols; for (auto sample : s_instance->m_marks) { cols.setInt(sample.first, sample.second.wall().count()); cols.setInt(sample.first + " CPU", sample.second.cpu().count()); } StructuredLog::log("hhvm_boot_timer", cols); cols.clear(); for (auto sample : s_instance->m_marks) { cols.setInt(sample.first, sample.second.rssMb() * (1 << 20)); // To bytes. } StructuredLog::log("hhvm_boot_memory", cols); } }
int64_t logTime( StructuredLogEntry& log, int64_t t, const char* name, bool first = false ) { if (!RuntimeOption::EvalLogExternCompilerPerf) return 0; int64_t current = Timer::GetCurrentTimeMicros(); if (first) return current; int64_t diff = current - t; log.setInt(name, diff); FTRACE(2, "{} took {} us\n", name, diff); return current; }
void logSerDes(const char* format, const char* op, const String& serialized, const Variant& value) { StructuredLogEntry sample; sample.setStr("format", format); sample.setStr("operation", op); DataType t = value.getType(); sample.setStr("type", tname(t)); if (isArrayType(t)) { sample.setInt("array_len", value.toCArrRef().length()); } if (auto sd = serialized.get()) { sample.setInt("length_ser", sd->size()); sample.setInt("hash_ser", sd->hash()); } StackTrace st; sample.setStackTrace("stack", st); StructuredLog::log("hhvm_serdes", sample); }
void logLoad( StructuredLogEntry& ent, StringData* path, const char* cwd, String rpath, const CachedUnit& cu ) { ent.setStr("include_path", path->data()); ent.setStr("current_dir", cwd); ent.setStr("resolved_path", rpath.data()); if (auto const u = cu.unit) { const StringData* err; int line; if (u->compileTimeFatal(err, line)) { ent.setStr("result", "compile_fatal"); ent.setStr("error", err->data()); } else if (u->parseFatal(err, line)) { ent.setStr("result", "parse_fatal"); ent.setStr("error", err->data()); } else { ent.setStr("result", "success"); } ent.setStr("sha1", u->sha1().toString()); ent.setStr("repo_sn", folly::to<std::string>(u->sn())); ent.setStr("repo_id", folly::to<std::string>(u->repoID())); ent.setInt("bc_len", u->bclen()); ent.setInt("num_litstrs", u->numLitstrs()); ent.setInt("num_funcs", u->funcs().size()); ent.setInt("num_classes", u->preclasses().size()); ent.setInt("num_type_aliases", u->typeAliases().size()); } else { ent.setStr("result", "file_not_found"); } switch (rl_typeProfileLocals->requestKind) { case RequestKind::Warmup: ent.setStr("request_kind", "warmup"); break; case RequestKind::Standard: ent.setStr("request_kind", "standard"); break; case RequestKind::NonVM: ent.setStr("request_kind", "nonVM"); break; } ent.setInt("request_count", requestCount()); StructuredLog::log("hhvm_unit_cache", ent); }
void logTranslation(const TransEnv& env) { auto nanos = HPHP::Timer::GetThreadCPUTimeNanos() - env.unit->startNanos(); StructuredLogEntry cols; auto& context = env.unit->context(); auto kind = show(context.kind); cols.setStr("trans_kind", !debug ? kind : kind + "_debug"); if (context.func) { cols.setStr("func", context.func->fullName()->data()); } cols.setInt("jit_sample_rate", RuntimeOption::EvalJitSampleRate); // timing info cols.setInt("jit_micros", nanos / 1000); // hhir stats cols.setInt("max_tmps", env.unit->numTmps()); cols.setInt("max_blocks", env.unit->numBlocks()); cols.setInt("max_insts", env.unit->numInsts()); auto hhir_blocks = rpoSortCfg(*env.unit); cols.setInt("num_blocks", hhir_blocks.size()); size_t num_insts = 0; for (auto b : hhir_blocks) num_insts += b->instrs().size(); cols.setInt("num_insts", num_insts); // vasm stats cols.setInt("max_vreg", env.vunit->next_vr); cols.setInt("max_vblocks", env.vunit->blocks.size()); cols.setInt("max_vcalls", env.vunit->vcallArgs.size()); size_t max_vinstr = 0; for (auto& blk : env.vunit->blocks) max_vinstr += blk.code.size(); cols.setInt("max_vinstr", max_vinstr); cols.setInt("num_vconst", env.vunit->constToReg.size()); auto vblocks = sortBlocks(*env.vunit); size_t num_vinstr[kNumAreas] = {0, 0, 0}; size_t num_vblocks[kNumAreas] = {0, 0, 0}; for (auto b : vblocks) { const auto& block = env.vunit->blocks[b]; num_vinstr[(int)block.area_idx] += block.code.size(); num_vblocks[(int)block.area_idx]++; } cols.setInt("num_vinstr_main", num_vinstr[(int)AreaIndex::Main]); cols.setInt("num_vinstr_cold", num_vinstr[(int)AreaIndex::Cold]); cols.setInt("num_vinstr_frozen", num_vinstr[(int)AreaIndex::Frozen]); cols.setInt("num_vblocks_main", num_vblocks[(int)AreaIndex::Main]); cols.setInt("num_vblocks_cold", num_vblocks[(int)AreaIndex::Cold]); cols.setInt("num_vblocks_frozen", num_vblocks[(int)AreaIndex::Frozen]); // finish & log StructuredLog::log("hhvm_jit", cols); }
std::unique_ptr<UnitEmitter> compile( const char* filename, const MD5& md5, folly::StringPiece code, const Native::FuncTable& nativeFuncs, bool forDebuggerEval, AsmCallbacks* callbacks ) { if (!isRunning()) { start(); } std::string prog; std::unique_ptr<Unit> u; try { m_compilations++; StructuredLogEntry log; log.setStr("filename", filename); int64_t t = logTime(log, 0, nullptr, true); writeProgram(filename, md5, code, forDebuggerEval); t = logTime(log, t, "send_source"); prog = readResult(&log); t = logTime(log, t, "receive_hhas"); auto ue = assemble_string(prog.data(), prog.length(), filename, md5, nativeFuncs, false /* swallow errors */, callbacks ); logTime(log, t, "assemble_hhas"); if (RuntimeOption::EvalLogExternCompilerPerf) { StructuredLog::log("hhvm_detailed_frontend_performance", log); } return ue; } catch (CompileException& ex) { stop(); if (m_options.verboseErrors) { Logger::FError("ExternCompiler Error: {}", ex.what()); } throw; } catch (CompilerFatal& ex) { // this catch is here so we don't fall into the std::runtime_error one throw; } catch (FatalErrorException&) { // we want these to propagate out of the compiler throw; } catch (AssemblerUnserializationError& ex) { // This (probably) has nothing to do with the php/hhas, so don't do the // verbose error handling we have in the AssemblerError case. throw; } catch (AssemblerError& ex) { if (m_options.verboseErrors) { auto const msg = folly::sformat( "{}\n" "========== PHP Source ==========\n" "{}\n" "========== ExternCompiler Result ==========\n" "{}\n", ex.what(), code, prog); Logger::FError("ExternCompiler Generated a bad unit: {}", msg); // Throw the extended message to ensure the fataling unit contains the // additional context throw AssemblerError(msg); } throw; } catch (std::runtime_error& ex) { if (m_options.verboseErrors) { Logger::FError("ExternCompiler Runtime Error: {}", ex.what()); } throw; } }