Exemple #1
0
FuncAnalysis do_analyze_collect(const Index& index,
                                Context const inputCtx,
                                CollectedInfo& collect,
                                ClassAnalysis* clsAnalysis,
                                const std::vector<Type>* knownArgs) {
  auto const ctx = adjust_closure_context(inputCtx);
  FuncAnalysis ai(ctx);

  Trace::Bump bumper{Trace::hhbbc, kTraceFuncBump,
    is_trace_function(ctx.cls, ctx.func)};
  FTRACE(2, "{:-^70}\n-- {}\n", "Analyze", show(ctx));

  /*
   * Set of RPO ids that still need to be visited.
   *
   * Initially, we need each entry block in this list.  As we visit
   * blocks, we propagate states to their successors and across their
   * back edges---when state merges cause a change to the block
   * stateIn, we will add it to this queue so it gets visited again.
   */
  auto incompleteQ = prepare_incompleteQ(index, ai, clsAnalysis, knownArgs);

  /*
   * There are potentially infinitely growing types when we're using
   * union_of to merge states, so occasonially we need to apply a
   * widening operator.
   *
   * Currently this is done by having a straight-forward hueristic: if
   * you visit a block too many times, we'll start doing all the
   * merges with the widening operator until we've had a chance to
   * visit the block again.  We must then continue iterating in case
   * the actual fixed point is higher than the result of widening.
   *
   * Terminiation is guaranteed because the widening operator has only
   * finite chains in the type lattice.
   */
  auto nonWideVisits = std::vector<uint32_t>(ctx.func->nextBlockId);

  // For debugging, count how many times basic blocks get interpreted.
  auto interp_counter = uint32_t{0};

  /*
   * Iterate until a fixed point.
   *
   * Each time a stateIn for a block changes, we re-insert the block's
   * rpo ID in incompleteQ.  Since incompleteQ is ordered, we'll
   * always visit blocks with earlier RPO ids first, which hopefully
   * means less iterations.
   */
  while (!incompleteQ.empty()) {
    auto const blk = ai.rpoBlocks[incompleteQ.pop()];

    if (nonWideVisits[blk->id]++ > options.analyzeFuncWideningLimit) {
      nonWideVisits[blk->id] = 0;
    }

    FTRACE(2, "block #{}\nin {}{}", blk->id,
      state_string(*ctx.func, ai.bdata[blk->id].stateIn),
      property_state_string(collect.props));
    ++interp_counter;

    auto propagate = [&] (php::Block& target, const State& st) {
      auto const needsWiden =
        nonWideVisits[target.id] >= options.analyzeFuncWideningLimit;

      // We haven't optimized the widening operator much, because it
      // doesn't happen in practice right now.  We want to know when
      // it starts happening:
      if (needsWiden) {
        std::fprintf(stderr, "widening in %s on %s\n",
          ctx.unit->filename->data(),
          ctx.func->name->data());
      }

      FTRACE(2, "     {}-> {}\n", needsWiden ? "widening " : "", target.id);
      FTRACE(4, "target old {}",
        state_string(*ctx.func, ai.bdata[target.id].stateIn));

      auto const changed =
        needsWiden ? widen_into(ai.bdata[target.id].stateIn, st)
                   : merge_into(ai.bdata[target.id].stateIn, st);
      if (changed) {
        incompleteQ.push(rpoId(ai, &target));
      }
      FTRACE(4, "target new {}",
        state_string(*ctx.func, ai.bdata[target.id].stateIn));
    };

    auto stateOut = ai.bdata[blk->id].stateIn;
    auto interp   = Interp { index, ctx, collect, blk, stateOut };
    auto flags    = run(interp, propagate);
    if (flags.returned) {
      ai.inferredReturn = union_of(std::move(ai.inferredReturn),
                                   std::move(*flags.returned));
    }
  }

  ai.closureUseTypes = std::move(collect.closureUseTypes);

  if (ctx.func->isGenerator) {
    if (ctx.func->isAsync) {
      // Async generators always return AsyncGenerator object.
      ai.inferredReturn = objExact(index.builtin_class(s_AsyncGenerator.get()));
    } else {
      // Non-async generators always return Generator object.
      ai.inferredReturn = objExact(index.builtin_class(s_Generator.get()));
    }
  } else if (ctx.func->isAsync) {
    // Async functions always return WaitH<T>, where T is the type returned
    // internally.
    ai.inferredReturn = wait_handle(index, ai.inferredReturn);
  }

  /*
   * If inferredReturn is TBottom, the callee didn't execute a return
   * at all.  (E.g. it unconditionally throws, or is an abstract
   * function body.)
   *
   * In this case, we leave the return type as TBottom, to indicate
   * the same to callers.
   */
  assert(ai.inferredReturn.subtypeOf(TGen));

  // For debugging, print the final input states for each block.
  FTRACE(2, "{}", [&] {
    auto const bsep = std::string(60, '=') + "\n";
    auto const sep = std::string(60, '-') + "\n";
    auto ret = folly::format(
      "{}function {} ({} block interps):\n{}",
      bsep,
      show(ctx),
      interp_counter,
      bsep
    ).str();
    for (auto& bd : ai.bdata) {
      ret += folly::format(
        "{}block {}:\nin {}",
        sep,
        ai.rpoBlocks[bd.rpoId]->id,
        state_string(*ctx.func, bd.stateIn)
      ).str();
    }
    ret += sep + bsep;
    folly::format(&ret,
      "Inferred return type: {}\n", show(ai.inferredReturn));
    ret += bsep;
    return ret;
  }());

  return ai;
}