Exemplo n.º 1
0
static void checkRFH(int64_t finished) {
  auto i = nextRFH.load(std::memory_order_relaxed);
  if (i == rfhBuckets.size() || !StructuredLog::enabled()) {
    return;
  }

  auto const uptime = f_server_uptime();
  if (uptime == -1) return;
  assertx(uptime >= 0);

  while (i < rfhBuckets.size() && uptime >= rfhBuckets[i]) {
    assertx(i == 0 || rfhBuckets[i - 1] < rfhBuckets[i]);
    if (!nextRFH.compare_exchange_strong(i, i + 1, std::memory_order_relaxed)) {
      // Someone else reported the sample at i. Try again with the current
      // value of nextRFH.
      continue;
    }

    // "bucket" and "uptime" will always be the same as long as the server
    // retires at least one request in each second of wall time.
    StructuredLogEntry cols;
    cols.setInt("requests", finished);
    cols.setInt("bucket", rfhBuckets[i]);
    cols.setInt("uptime", uptime);
    StructuredLog::log("hhvm_rfh", cols);

    ++i;
  }
}
Exemplo n.º 2
0
void BootStats::done() {
  if (!s_started) return;
  s_started = false;

  auto total = ResourceUsage::sinceEpoch() - s_start;
  Logger::Info(
    folly::sformat("BootStats: all done, took {} total", total.toString()));

  BootStats::s_instance->add("TOTAL", total);
  BootStats::s_instance->dumpMarks();

  if (StructuredLog::enabled()) {
    std::lock_guard<std::mutex> lock(s_instance->m_marks_guard_);
    StructuredLogEntry cols;
    for (auto sample : s_instance->m_marks) {
      cols.setInt(sample.first, sample.second.wall().count());
      cols.setInt(sample.first + " CPU", sample.second.cpu().count());
    }
    StructuredLog::log("hhvm_boot_timer", cols);
    cols.clear();
    for (auto sample : s_instance->m_marks) {
      cols.setInt(sample.first, sample.second.rssMb() * (1 << 20)); // To bytes.
    }
    StructuredLog::log("hhvm_boot_memory", cols);
  }
}
Exemplo n.º 3
0
/*
 * If the jit maturity counter is enabled, update it with the current amount of
 * emitted code.
 */
void reportJitMaturity(const CodeCache& code) {
  auto static jitMaturityCounter = ServiceData::createCounter("jit.maturity");

  // Optimized translations are faster than profiling translations, which are
  // faster than the interpreter.  But when optimized translations are
  // generated, some profiling translations will become dead.  We assume the
  // incremental value of an optimized translation over the corresponding
  // profiling translations is comparable to the incremental value of a
  // profiling translation of similar size; thus we don't have to apply
  // different weights to code in different regions.
  auto const codeSize =
    code.hot().used() + code.main().used() + code.prof().used();
  if (jitMaturityCounter) {
    // EvalJitMatureSize is supposed to to be set to approximately 20% of the
    // code that will give us full performance, so recover the "fully mature"
    // size with some math.
    auto const fullSize = RuntimeOption::EvalJitMatureSize * 5;
    auto const after = codeSize >= fullSize ? 100
                                            : (codeSize * 100 / fullSize);
    auto const before = jitMaturityCounter->getValue();
    if (after > before) jitMaturityCounter->setValue(after);
  }

  if (!s_loggedJitMature.load(std::memory_order_relaxed) &&
      StructuredLog::enabled() &&
      codeSize >= RuntimeOption::EvalJitMatureSize &&
      !s_loggedJitMature.exchange(true, std::memory_order_relaxed)) {
    StructuredLogEntry cols;
    cols.setInt("jit_mature_sec", time(nullptr) - HttpServer::StartTime);
    StructuredLog::log("hhvm_warmup", cols);
  }
}
Exemplo n.º 4
0
void logLoad(
  StructuredLogEntry& ent,
  StringData* path,
  const char* cwd,
  String rpath,
  const CachedUnit& cu
) {
  ent.setStr("include_path", path->data());
  ent.setStr("current_dir", cwd);
  ent.setStr("resolved_path", rpath.data());
  if (auto const u = cu.unit) {
    const StringData* err;
    int line;
    if (u->compileTimeFatal(err, line)) {
      ent.setStr("result", "compile_fatal");
      ent.setStr("error", err->data());
    } else if (u->parseFatal(err, line)) {
      ent.setStr("result", "parse_fatal");
      ent.setStr("error", err->data());
    } else {
      ent.setStr("result", "success");
    }

    ent.setStr("sha1", u->sha1().toString());
    ent.setStr("repo_sn", folly::to<std::string>(u->sn()));
    ent.setStr("repo_id", folly::to<std::string>(u->repoID()));

    ent.setInt("bc_len", u->bclen());
    ent.setInt("num_litstrs", u->numLitstrs());
    ent.setInt("num_funcs", u->funcs().size());
    ent.setInt("num_classes", u->preclasses().size());
    ent.setInt("num_type_aliases", u->typeAliases().size());
  } else {
    ent.setStr("result", "file_not_found");
  }

  switch (rl_typeProfileLocals->requestKind) {
  case RequestKind::Warmup: ent.setStr("request_kind", "warmup"); break;
  case RequestKind::Standard: ent.setStr("request_kind", "standard"); break;
  case RequestKind::NonVM: ent.setStr("request_kind", "nonVM"); break;
  }
  ent.setInt("request_count", requestCount());

  StructuredLog::log("hhvm_unit_cache", ent);
}
Exemplo n.º 5
0
void logSerDes(const char* format, const char* op,
               const String& serialized, const Variant& value) {
  StructuredLogEntry sample;
  sample.setStr("format", format);
  sample.setStr("operation", op);
  DataType t = value.getType();
  sample.setStr("type", tname(t));
  if (isArrayType(t)) {
    sample.setInt("array_len", value.toCArrRef().length());
  }
  if (auto sd = serialized.get()) {
    sample.setInt("length_ser", sd->size());
    sample.setInt("hash_ser", sd->hash());
  }
  StackTrace st;
  sample.setStackTrace("stack", st);
  StructuredLog::log("hhvm_serdes", sample);
}
Exemplo n.º 6
0
 int64_t logTime(
   StructuredLogEntry& log,
   int64_t t,
   const char* name,
   bool first = false
 ) {
   if (!RuntimeOption::EvalLogExternCompilerPerf) return 0;
   int64_t current = Timer::GetCurrentTimeMicros();
   if (first) return current;
   int64_t diff = current - t;
   log.setInt(name, diff);
   FTRACE(2, "{} took {} us\n", name, diff);
   return current;
 }
Exemplo n.º 7
0
void logTranslation(const TransEnv& env) {
  auto nanos = HPHP::Timer::GetThreadCPUTimeNanos() - env.unit->startNanos();
  StructuredLogEntry cols;
  auto& context = env.unit->context();
  auto kind = show(context.kind);
  cols.setStr("trans_kind", !debug ? kind : kind + "_debug");
  if (context.func) {
    cols.setStr("func", context.func->fullName()->data());
  }
  cols.setInt("jit_sample_rate", RuntimeOption::EvalJitSampleRate);
  // timing info
  cols.setInt("jit_micros", nanos / 1000);
  // hhir stats
  cols.setInt("max_tmps", env.unit->numTmps());
  cols.setInt("max_blocks", env.unit->numBlocks());
  cols.setInt("max_insts", env.unit->numInsts());
  auto hhir_blocks = rpoSortCfg(*env.unit);
  cols.setInt("num_blocks", hhir_blocks.size());
  size_t num_insts = 0;
  for (auto b : hhir_blocks) num_insts += b->instrs().size();
  cols.setInt("num_insts", num_insts);
  // vasm stats
  cols.setInt("max_vreg", env.vunit->next_vr);
  cols.setInt("max_vblocks", env.vunit->blocks.size());
  cols.setInt("max_vcalls", env.vunit->vcallArgs.size());
  size_t max_vinstr = 0;
  for (auto& blk : env.vunit->blocks) max_vinstr += blk.code.size();
  cols.setInt("max_vinstr", max_vinstr);
  cols.setInt("num_vconst", env.vunit->constToReg.size());
  auto vblocks = sortBlocks(*env.vunit);
  size_t num_vinstr[kNumAreas] = {0, 0, 0};
  size_t num_vblocks[kNumAreas] = {0, 0, 0};
  for (auto b : vblocks) {
    const auto& block = env.vunit->blocks[b];
    num_vinstr[(int)block.area_idx] += block.code.size();
    num_vblocks[(int)block.area_idx]++;
  }
  cols.setInt("num_vinstr_main", num_vinstr[(int)AreaIndex::Main]);
  cols.setInt("num_vinstr_cold", num_vinstr[(int)AreaIndex::Cold]);
  cols.setInt("num_vinstr_frozen", num_vinstr[(int)AreaIndex::Frozen]);
  cols.setInt("num_vblocks_main", num_vblocks[(int)AreaIndex::Main]);
  cols.setInt("num_vblocks_cold", num_vblocks[(int)AreaIndex::Cold]);
  cols.setInt("num_vblocks_frozen", num_vblocks[(int)AreaIndex::Frozen]);
  // finish & log
  StructuredLog::log("hhvm_jit", cols);
}