Ejemplo n.º 1
0
bool merge_impl(State& dst, const State& src, JoinOp join) {
  if (!dst.initialized) {
    dst = src;
    return true;
  }

  assert(src.initialized);
  assert(dst.locals.size() == src.locals.size());
  assert(dst.iters.size() == src.iters.size());
  assert(dst.stack.size() == src.stack.size());
  assert(dst.fpiStack.size() == src.fpiStack.size());

  auto changed = false;

  auto const available = dst.thisAvailable && src.thisAvailable;
  if (available != dst.thisAvailable) {
    changed = true;
    dst.thisAvailable = available;
  }

  for (auto i = size_t{0}; i < dst.stack.size(); ++i) {
    auto newT = join(dst.stack[i], src.stack[i]);
    if (dst.stack[i] != newT) {
      changed = true;
      dst.stack[i] = std::move(newT);
    }
  }

  for (auto i = size_t{0}; i < dst.locals.size(); ++i) {
    auto newT = join(dst.locals[i], src.locals[i]);
    if (dst.locals[i] != newT) {
      changed = true;
      dst.locals[i] = std::move(newT);
    }
  }

  for (auto i = size_t{0}; i < dst.iters.size(); ++i) {
    if (merge_into(dst.iters[i], src.iters[i], join)) {
      changed = true;
    }
  }

  for (auto i = size_t{0}; i < dst.fpiStack.size(); ++i) {
    if (merge_into(dst.fpiStack[i], src.fpiStack[i])) {
      changed = true;
    }
  }

  return changed;
}
Ejemplo n.º 2
0
bool merge_into(LocationState<tag>& dst, const LocationState<tag>& src) {
  auto changed = false;

  changed |= merge_util(dst.type, dst.type | src.type);

  // Get the least common ancestor across both states.
  changed |= merge_util(dst.value, least_common_ancestor(dst.value, src.value));

  // We may have changed either dst.value or dst.type in a way that could fail
  // to preserve LocationState invariants.  So check if we can't keep the value.
  if (dst.value != nullptr && dst.value->type() != dst.type) {
    dst.value = nullptr;
    changed = true;
  }

  changed |= merge_into(dst.typeSrcs, src.typeSrcs);

  if (!dst.maybeChanged && src.maybeChanged) {
    dst.maybeChanged = true;
    changed = true;
  }

  changed |= merge_util(dst.predictedType,
                       dst.predictedType | src.predictedType);
  return changed;
}
Ejemplo n.º 3
0
static pos_set *merge(pos_set *left, pos_set *right)
{
	pos_set *merged = new pos_set(*left);
	merge_into(merged, right);

	return merged;
}
Ejemplo n.º 4
0
/*
 * Merge two state-stacks.  The stacks must have the same depth.  Returns
 * whether any states changed.
 */
bool merge_into(jit::vector<FrameState>& dst, const jit::vector<FrameState>& src) {
  always_assert(src.size() == dst.size());
  auto changed = false;
  for (auto idx = uint32_t{0}; idx < dst.size(); ++idx) {
    changed |= merge_into(dst[idx], src[idx]);
  }
  return changed;
}
Ejemplo n.º 5
0
StarNode::StarNode(RegexNode *node) {
	this->node = node;

	this->nullable = true;

	this->first = new pos_set(*node->first);
	this->last = new pos_set(*node->last);

	for (Leaf *l : *this->last) {
		merge_into(l->follow, this->first);
	}
}
Ejemplo n.º 6
0
bool merge_memory_stack_into(jit::vector<StackState>& dst,
                             const jit::vector<StackState>& src) {
  auto changed = false;
  // We may need to merge different-sized memory stacks, because a predecessor
  // may not touch some stack memory that another pred did.  We just need to
  // conservatively throw away slots that aren't tracked on all preds.
  auto const result_size = std::min(dst.size(), src.size());
  dst.resize(result_size);
  for (auto i = uint32_t{0}; i < result_size; ++i) {
    changed |= merge_into(dst[i], src[i]);
  }
  return changed;
}
Ejemplo n.º 7
0
ConcatNode::ConcatNode(RegexNode *left, RegexNode *right) {
	this->left = left;
	this->right = right;

	this->nullable = left->nullable && right->nullable;

	if (left->nullable) this->first = merge(left->first, right->first);
	else this->first = new pos_set(*left->first);

	if (right->nullable) this->last = merge(left->last, right->last);
	else this->last = new pos_set(*right->last);

	for (Leaf* l : *left->last) merge_into(l->follow, right->first);
}
Ejemplo n.º 8
0
DFA RegexParser::parse(RegexNode *root)
{
	std::vector<pos_set> states;
	states.push_back(*root->first);

	int first_unmarked = 0;

	DFA dfa;
	dfa.add_state();

	/* TODO: destroy the tree data structure */

	while (first_unmarked < states.size())
	{
		pos_set t = states[first_unmarked];

		/* TODO: adapt this to work with Unicode */
		for (int c = 0; c < 256; c++)
		{
			pos_set u;

			for (Leaf *l : t)
				if (l->value == c) merge_into(&u, l->follow);

			if (u.size() > 0)
			{
				int pos = std::find(states.begin(), states.end(), u) - states.begin();

				if (pos == states.size())
				{
					states.push_back(u);
					int state = dfa.add_state();

					int accept = DFA_OK;
					for (Leaf* l : u)
						if (l->end) accept = MAX(accept, l->value);

					dfa.set_accept(state, accept);
				}

				dfa.set_trans(first_unmarked, c, pos);
			}
		}

		first_unmarked++;
	}

	return dfa;
}
Ejemplo n.º 9
0
void galois_hopper::advance_galois()
{
    assert(level_ != 0);
    ++level_pos_;
    if (level_pos_ == size_/2) {
        ++level_;
        level_pos_ = 0;
        factor_ *= 2;
        skip_ *= 2;
    }
    current_ = (current_ + 2 * skip_) % (size_ + 1);
    assert(current_ != size_);

    // We have completed the cycle. Make sure skip does not overflow
    if (current_ == 0 && merge_into() == 1) {
        assert(level_pos_ == 0);
        ++cycle_;
    }
}
Ejemplo n.º 10
0
FuncAnalysis do_analyze_collect(const Index& index,
                                Context const inputCtx,
                                CollectedInfo& collect,
                                ClassAnalysis* clsAnalysis,
                                const std::vector<Type>* knownArgs) {
  auto const ctx = adjust_closure_context(inputCtx);
  FuncAnalysis ai(ctx);

  Trace::Bump bumper{Trace::hhbbc, kTraceFuncBump,
    is_trace_function(ctx.cls, ctx.func)};
  FTRACE(2, "{:-^70}\n-- {}\n", "Analyze", show(ctx));

  /*
   * Set of RPO ids that still need to be visited.
   *
   * Initially, we need each entry block in this list.  As we visit
   * blocks, we propagate states to their successors and across their
   * back edges---when state merges cause a change to the block
   * stateIn, we will add it to this queue so it gets visited again.
   */
  auto incompleteQ = prepare_incompleteQ(index, ai, clsAnalysis, knownArgs);

  /*
   * There are potentially infinitely growing types when we're using
   * union_of to merge states, so occasonially we need to apply a
   * widening operator.
   *
   * Currently this is done by having a straight-forward hueristic: if
   * you visit a block too many times, we'll start doing all the
   * merges with the widening operator until we've had a chance to
   * visit the block again.  We must then continue iterating in case
   * the actual fixed point is higher than the result of widening.
   *
   * Terminiation is guaranteed because the widening operator has only
   * finite chains in the type lattice.
   */
  auto nonWideVisits = std::vector<uint32_t>(ctx.func->nextBlockId);

  // For debugging, count how many times basic blocks get interpreted.
  auto interp_counter = uint32_t{0};

  /*
   * Iterate until a fixed point.
   *
   * Each time a stateIn for a block changes, we re-insert the block's
   * rpo ID in incompleteQ.  Since incompleteQ is ordered, we'll
   * always visit blocks with earlier RPO ids first, which hopefully
   * means less iterations.
   */
  while (!incompleteQ.empty()) {
    auto const blk = ai.rpoBlocks[incompleteQ.pop()];

    if (nonWideVisits[blk->id]++ > options.analyzeFuncWideningLimit) {
      nonWideVisits[blk->id] = 0;
    }

    FTRACE(2, "block #{}\nin {}{}", blk->id,
      state_string(*ctx.func, ai.bdata[blk->id].stateIn),
      property_state_string(collect.props));
    ++interp_counter;

    auto propagate = [&] (php::Block& target, const State& st) {
      auto const needsWiden =
        nonWideVisits[target.id] >= options.analyzeFuncWideningLimit;

      // We haven't optimized the widening operator much, because it
      // doesn't happen in practice right now.  We want to know when
      // it starts happening:
      if (needsWiden) {
        std::fprintf(stderr, "widening in %s on %s\n",
          ctx.unit->filename->data(),
          ctx.func->name->data());
      }

      FTRACE(2, "     {}-> {}\n", needsWiden ? "widening " : "", target.id);
      FTRACE(4, "target old {}",
        state_string(*ctx.func, ai.bdata[target.id].stateIn));

      auto const changed =
        needsWiden ? widen_into(ai.bdata[target.id].stateIn, st)
                   : merge_into(ai.bdata[target.id].stateIn, st);
      if (changed) {
        incompleteQ.push(rpoId(ai, &target));
      }
      FTRACE(4, "target new {}",
        state_string(*ctx.func, ai.bdata[target.id].stateIn));
    };

    auto stateOut = ai.bdata[blk->id].stateIn;
    auto interp   = Interp { index, ctx, collect, blk, stateOut };
    auto flags    = run(interp, propagate);
    if (flags.returned) {
      ai.inferredReturn = union_of(std::move(ai.inferredReturn),
                                   std::move(*flags.returned));
    }
  }

  ai.closureUseTypes = std::move(collect.closureUseTypes);

  if (ctx.func->isGenerator) {
    if (ctx.func->isAsync) {
      // Async generators always return AsyncGenerator object.
      ai.inferredReturn = objExact(index.builtin_class(s_AsyncGenerator.get()));
    } else {
      // Non-async generators always return Generator object.
      ai.inferredReturn = objExact(index.builtin_class(s_Generator.get()));
    }
  } else if (ctx.func->isAsync) {
    // Async functions always return WaitH<T>, where T is the type returned
    // internally.
    ai.inferredReturn = wait_handle(index, ai.inferredReturn);
  }

  /*
   * If inferredReturn is TBottom, the callee didn't execute a return
   * at all.  (E.g. it unconditionally throws, or is an abstract
   * function body.)
   *
   * In this case, we leave the return type as TBottom, to indicate
   * the same to callers.
   */
  assert(ai.inferredReturn.subtypeOf(TGen));

  // For debugging, print the final input states for each block.
  FTRACE(2, "{}", [&] {
    auto const bsep = std::string(60, '=') + "\n";
    auto const sep = std::string(60, '-') + "\n";
    auto ret = folly::format(
      "{}function {} ({} block interps):\n{}",
      bsep,
      show(ctx),
      interp_counter,
      bsep
    ).str();
    for (auto& bd : ai.bdata) {
      ret += folly::format(
        "{}block {}:\nin {}",
        sep,
        ai.rpoBlocks[bd.rpoId]->id,
        state_string(*ctx.func, bd.stateIn)
      ).str();
    }
    ret += sep + bsep;
    folly::format(&ret,
      "Inferred return type: {}\n", show(ai.inferredReturn));
    ret += bsep;
    return ret;
  }());

  return ai;
}
Ejemplo n.º 11
0
void region_prune_arcs(RegionDesc& region) {
  FTRACE(4, "region_prune_arcs\n");

  region.sortBlocks();
  auto const sortedBlocks = region.blocks();

  // Maps region block ids to their RPO ids.
  auto blockToRPO = std::unordered_map<RegionDesc::BlockId,uint32_t>{};

  auto blockInfos = std::vector<BlockInfo>(sortedBlocks.size());
  auto workQ = dataflow_worklist<uint32_t>(sortedBlocks.size());
  for (auto rpoID = uint32_t{0}; rpoID < sortedBlocks.size(); ++rpoID) {
    auto const& b = sortedBlocks[rpoID];
    auto& binfo = blockInfos[rpoID];
    binfo.blockID = b->id();
    blockToRPO[binfo.blockID] = rpoID;
  }
  workQ.push(0);
  blockInfos[0].in = entry_state(region);

  FTRACE(4, "Iterating:\n");
  do {
    auto const rpoID = workQ.pop();
    auto& binfo = blockInfos[rpoID];
    FTRACE(4, "B{}\n", binfo.blockID);

    binfo.out = binfo.in;
    apply_transfer_function(
      binfo.out,
      region.block(binfo.blockID)->postConds()
    );

    for (auto& succ : region.succs(binfo.blockID)) {
      auto const succRPO = blockToRPO.find(succ);
      assertx(succRPO != end(blockToRPO));
      auto& succInfo = blockInfos[succRPO->second];
      if (preconds_may_pass(*region.block(succInfo.blockID), binfo.out)) {
        if (merge_into(succInfo.in, binfo.out)) {
          FTRACE(5, "  -> {}\n", succInfo.blockID);
          workQ.push(succRPO->second);
        }
      }
    }
  } while (!workQ.empty());

  FTRACE(2, "\nPostConds fixed point:\n{}\n",
    [&] () -> std::string {
      auto ret = std::string{};
      for (auto& s : blockInfos) {
        folly::format(&ret, "B{}:\n{}", s.blockID, show(s.in));
      }
      return ret;
    }()
  );

  // Now remove any edge that looks like it will unconditionally fail type
  // predictions, and completely remove any block that can't be reached.
  using ArcIDs = std::pair<RegionDesc::BlockId,RegionDesc::BlockId>;
  auto toRemove = std::vector<ArcIDs>{};
  for (auto rpoID = uint32_t{0}; rpoID < sortedBlocks.size(); ++rpoID) {
    auto const& binfo = blockInfos[rpoID];

    for (auto& succ : region.succs(binfo.blockID)) {
      auto const succRPO = blockToRPO.find(succ);
      assertx(succRPO != end(blockToRPO));
      auto const& succInfo = blockInfos[succRPO->second];
      if (!binfo.in.initialized ||
          !succInfo.in.initialized ||
          !preconds_may_pass(*region.block(succInfo.blockID), binfo.out)) {
        FTRACE(2, "Pruning arc: B{} -> B{}\n",
               binfo.blockID,
               succInfo.blockID);
        toRemove.emplace_back(binfo.blockID, succInfo.blockID);
      }
    }

    for (auto& r : toRemove) region.removeArc(r.first, r.second);
    toRemove.clear();
  }

  // Get rid of the completely unreachable blocks, now that any arcs to/from
  // them are gone.
  for (auto rpoID = uint32_t{0}; rpoID < sortedBlocks.size(); ++rpoID) {
    auto const& binfo = blockInfos[rpoID];
    if (!binfo.in.initialized) {
      FTRACE(2, "Pruning block: B{}\n", binfo.blockID);
      region.deleteBlock(binfo.blockID);
    }
  }
  FTRACE(2, "\n");
}
Ejemplo n.º 12
0
void region_prune_arcs(RegionDesc& region) {
  FTRACE(4, "region_prune_arcs\n");

  region.sortBlocks();
  auto const sortedBlocks = region.blocks();

  // Maps region block ids to their RPO ids.
  auto blockToRPO = std::unordered_map<RegionDesc::BlockId,uint32_t>{};

  auto blockInfos = std::vector<BlockInfo>(sortedBlocks.size());
  auto workQ = dataflow_worklist<uint32_t>(sortedBlocks.size());
  for (auto rpoID = uint32_t{0}; rpoID < sortedBlocks.size(); ++rpoID) {
    auto const& b = sortedBlocks[rpoID];
    auto& binfo = blockInfos[rpoID];
    binfo.blockID = b->id();
    blockToRPO[binfo.blockID] = rpoID;
  }
  workQ.push(0);
  blockInfos[0].in = entry_state(region);

  FTRACE(4, "Iterating:\n");
  do {
    auto const rpoID = workQ.pop();
    auto& binfo = blockInfos[rpoID];
    FTRACE(4, "B{}\n", binfo.blockID);

    /*
     * This code currently assumes inlined functions were entirely contained
     * within a single profiling translation, and will need updates if we
     * inline bigger things in a way visible to region selection.
     *
     * Note: inlined blocks /may/ have postConditions, if they are the last
     * blocks from profiling translations.  Currently any locations referred to
     * in postconditions for these blocks are for the outermost caller, so this
     * code handles that correctly.
     */
    if (region.block(binfo.blockID)->inlineLevel() != 0) {
      assertx(region.block(binfo.blockID)->typePreConditions().empty());
    }

    binfo.out = binfo.in;
    apply_transfer_function(
      binfo.out,
      region.block(binfo.blockID)->postConds()
    );

    for (auto& succ : region.succs(binfo.blockID)) {
      auto const succRPO = blockToRPO.find(succ);
      assertx(succRPO != end(blockToRPO));
      auto& succInfo = blockInfos[succRPO->second];
      if (preconds_may_pass(*region.block(succInfo.blockID), binfo.out)) {
        if (merge_into(succInfo.in, binfo.out)) {
          FTRACE(5, "  -> {}\n", succInfo.blockID);
          workQ.push(succRPO->second);
        }
      }
    }
  } while (!workQ.empty());

  FTRACE(2, "\nPostConds fixed point:\n{}\n",
    [&] () -> std::string {
      auto ret = std::string{};
      for (auto& s : blockInfos) {
        folly::format(&ret, "B{}:\n{}", s.blockID, show(s.in));
      }
      return ret;
    }()
  );

  // Now remove any edge that looks like it will unconditionally fail type
  // predictions, and completely remove any block that can't be reached.
  using ArcIDs = std::pair<RegionDesc::BlockId,RegionDesc::BlockId>;
  auto toRemove = std::vector<ArcIDs>{};
  for (auto rpoID = uint32_t{0}; rpoID < sortedBlocks.size(); ++rpoID) {
    auto const& binfo = blockInfos[rpoID];

    for (auto& succ : region.succs(binfo.blockID)) {
      auto const succRPO = blockToRPO.find(succ);
      assertx(succRPO != end(blockToRPO));
      auto const& succInfo = blockInfos[succRPO->second];
      if (!binfo.in.initialized ||
          !succInfo.in.initialized ||
          !preconds_may_pass(*region.block(succInfo.blockID), binfo.out)) {
        FTRACE(2, "Pruning arc: B{} -> B{}\n",
               binfo.blockID,
               succInfo.blockID);
        toRemove.emplace_back(binfo.blockID, succInfo.blockID);
      }
    }

    for (auto& r : toRemove) region.removeArc(r.first, r.second);
    toRemove.clear();
  }

  // Get rid of the completely unreachable blocks, now that any arcs to/from
  // them are gone.
  for (auto rpoID = uint32_t{0}; rpoID < sortedBlocks.size(); ++rpoID) {
    auto const& binfo = blockInfos[rpoID];
    if (!binfo.in.initialized) {
      FTRACE(2, "Pruning block: B{}\n", binfo.blockID);
      region.deleteBlock(binfo.blockID);
    }
  }
  FTRACE(2, "\n");
}
Ejemplo n.º 13
0
/*
 * Merge one FrameState into another, returning whether it changed.  Frame
 * pointers and stack depth must match.  If the stack pointer tmps are
 * different, clear the tracked value (we can make a new one, given fp and
 * irSPOff).
 */
bool merge_into(FrameState& dst, const FrameState& src) {
  auto changed = false;

  // Cannot merge irSPOff state, so assert they match.
  always_assert(dst.irSPOff == src.irSPOff);
  always_assert(dst.curFunc == src.curFunc);

  // The only thing that can change the FP is inlining, but we can't have one
  // of the predecessors in an inlined callee while the other isn't.
  always_assert(dst.fpValue == src.fpValue);

  // FrameState for the same function must always have the same number of
  // locals.
  always_assert(src.locals.size() == dst.locals.size());

  // We must always have the same spValue.
  always_assert(dst.spValue == src.spValue);

  if (dst.needRatchet != src.needRatchet) {
    dst.needRatchet = true;
    changed = true;
  }

  if (dst.mbase.value != src.mbase.value) {
    dst.mbase.value = nullptr;
    changed = true;
  }

  if (dst.mbr.ptr != src.mbr.ptr) {
    dst.mbr.ptr = nullptr;
    changed = true;
  }
  changed |= merge_util(dst.mbr.pointee, dst.mbr.pointee | src.mbr.pointee);
  changed |= merge_util(dst.mbr.ptrType, dst.mbr.ptrType | src.mbr.ptrType);

  // The tracked FPI state must always be the same, notice that the size of the
  // FPI stacks may differ as the FPush associated with one of the merged blocks
  // may be outside the region. In this case we must drop the unknown state.
  dst.fpiStack.resize(std::min(dst.fpiStack.size(), src.fpiStack.size()));
  for (int i = 0; i < dst.fpiStack.size(); ++i) {
    auto& dstInfo = dst.fpiStack[i];
    auto const& srcInfo = src.fpiStack[i];

    always_assert(dstInfo.returnSP == srcInfo.returnSP);
    always_assert(dstInfo.returnSPOff == srcInfo.returnSPOff);
    always_assert(isFPush(dstInfo.fpushOpc) &&
                  dstInfo.fpushOpc == srcInfo.fpushOpc);

    // If one of the merged edges was interp'ed mark the result as interp'ed
    if (!dstInfo.interp && srcInfo.interp) {
      dstInfo.interp = true;
      changed = true;
    }

    // If one of the merged edges spans a call then mark them both as spanning
    if (!dstInfo.spansCall && srcInfo.spansCall) {
      dstInfo.spansCall = true;
      changed = true;
    }

    // Merge the contexts from the respective spills
    if (dstInfo.ctx != srcInfo.ctx) {
      dstInfo.ctx = least_common_ancestor(dstInfo.ctx, srcInfo.ctx);
      changed = true;
    }

    if (dstInfo.ctxType != srcInfo.ctxType) {
      dstInfo.ctxType |= srcInfo.ctxType;
      changed = true;
    }

    // Merge the Funcs
    if (dstInfo.func != nullptr && dstInfo.func != srcInfo.func) {
      dstInfo.func = nullptr;
      changed = true;
    }
  }

  // This is available iff it's available in both states
  changed |= merge_util(dst.thisAvailable,
                        dst.thisAvailable && src.thisAvailable);

  // The frame may span a call if it could have done so in either state.
  changed |= merge_util(dst.frameMaySpanCall,
                        dst.frameMaySpanCall || src.frameMaySpanCall);

  for (auto i = uint32_t{0}; i < src.locals.size(); ++i) {
    changed |= merge_into(dst.locals[i], src.locals[i]);
  }

  changed |= merge_memory_stack_into(dst.stack, src.stack);

  changed |= merge_util(dst.stackModified,
                        dst.stackModified || src.stackModified);

  // Eval stack depth should be the same at merge points.
  always_assert(dst.bcSPOff == src.bcSPOff);

  for (auto const& srcPair : src.predictedTypes) {
    auto dstIt = dst.predictedTypes.find(srcPair.first);
    if (dstIt == dst.predictedTypes.end()) {
      dst.predictedTypes.emplace(srcPair);
      changed = true;
      continue;
    }

    auto const newType = dstIt->second | srcPair.second;
    if (newType != dstIt->second) {
      dstIt->second = newType;
      changed = true;
    }
  }

  return changed;
}
Ejemplo n.º 14
0
bool merge_impl(State& dst, const State& src, JoinOp join) {
  if (!dst.initialized) {
    dst = src;
    return true;
  }

  assert(src.initialized);
  assert(dst.locals.size() == src.locals.size());
  assert(dst.iters.size() == src.iters.size());
  assert(dst.stack.size() == src.stack.size());
  assert(dst.fpiStack.size() == src.fpiStack.size());

  if (src.unreachable) {
    // If we're coming from unreachable code and the dst is already
    // initialized, it doesn't change the dst (whether it is reachable or not).
    return false;
  }
  if (dst.unreachable) {
    // If we're going to code currently believed to be unreachable, take the
    // src state, and consider the dest state changed only if the source state
    // was reachable.
    dst = src;
    return !src.unreachable;
  }

  auto changed = false;

  auto const available = dst.thisAvailable && src.thisAvailable;
  if (available != dst.thisAvailable) {
    changed = true;
    dst.thisAvailable = available;
  }

  for (auto i = size_t{0}; i < dst.stack.size(); ++i) {
    auto newT = join(dst.stack[i].type, src.stack[i].type);
    if (dst.stack[i].type != newT) {
      changed = true;
      dst.stack[i].type = std::move(newT);
    }
    if (dst.stack[i].equivLocal != src.stack[i].equivLocal) {
      changed = true;
      dst.stack[i].equivLocal = NoLocalId;
    }
  }

  for (auto i = size_t{0}; i < dst.locals.size(); ++i) {
    auto newT = join(dst.locals[i], src.locals[i]);
    if (dst.locals[i] != newT) {
      changed = true;
      dst.locals[i] = std::move(newT);
    }
  }

  for (auto i = size_t{0}; i < dst.iters.size(); ++i) {
    if (merge_into(dst.iters[i], src.iters[i], join)) {
      changed = true;
    }
  }

  for (auto i = size_t{0}; i < dst.fpiStack.size(); ++i) {
    if (merge_into(dst.fpiStack[i], src.fpiStack[i])) {
      changed = true;
    }
  }

  dst.equivLocals.resize(
    std::max(dst.equivLocals.size(), src.equivLocals.size()), NoLocalId
  );
  for (auto i = size_t{0}; i < dst.equivLocals.size(); ++i) {
    auto const dstLoc = dst.equivLocals[i];
    auto const srcLoc =
      (i < src.equivLocals.size()) ? src.equivLocals[i] : NoLocalId;
    auto const newLoc = (dstLoc == srcLoc) ? dstLoc : NoLocalId;
    if (newLoc != dstLoc) {
      changed = true;
      dst.equivLocals[i] = newLoc;
    }
  }

  return changed;
}
Ejemplo n.º 15
0
FuncAnalysis do_analyze_collect(const Index& index,
                                Context const ctx,
                                CollectedInfo& collect,
                                ClassAnalysis* clsAnalysis,
                                const std::vector<Type>* knownArgs) {
  assertx(ctx.cls == adjust_closure_context(ctx).cls);
  FuncAnalysis ai{ctx};

  auto const bump = trace_bump_for(ctx.cls, ctx.func);
  Trace::Bump bumper1{Trace::hhbbc, bump};
  Trace::Bump bumper2{Trace::hhbbc_cfg, bump};

  if (knownArgs) {
    FTRACE(2, "{:.^70}\n", "Inline Interp");
  }
  SCOPE_EXIT {
    if (knownArgs) {
      FTRACE(2, "{:.^70}\n", "End Inline Interp");
    }
  };

  FTRACE(2, "{:-^70}\n-- {}\n", "Analyze", show(ctx));

  /*
   * Set of RPO ids that still need to be visited.
   *
   * Initially, we need each entry block in this list.  As we visit
   * blocks, we propagate states to their successors and across their
   * back edges---when state merges cause a change to the block
   * stateIn, we will add it to this queue so it gets visited again.
   */
  auto incompleteQ = prepare_incompleteQ(index, ai, clsAnalysis, knownArgs);

  /*
   * There are potentially infinitely growing types when we're using union_of to
   * merge states, so occasionally we need to apply a widening operator.
   *
   * Currently this is done by having a straight-forward hueristic: if you visit
   * a block too many times, we'll start doing all the merges with the widening
   * operator. We must then continue iterating in case the actual fixed point is
   * higher than the result of widening. Likewise if we loop too much because of
   * local static types changing, we'll widen those.
   *
   * Termination is guaranteed because the widening operator has only finite
   * chains in the type lattice.
   */
  auto totalVisits = std::vector<uint32_t>(ctx.func->blocks.size());
  auto totalLoops = uint32_t{0};

  // For debugging, count how many times basic blocks get interpreted.
  auto interp_counter = uint32_t{0};

  // Used to force blocks that depended on the types of local statics
  // to be re-analyzed when the local statics change.
  std::unordered_map<borrowed_ptr<const php::Block>, std::map<LocalId, Type>>
    usedLocalStatics;

  /*
   * Iterate until a fixed point.
   *
   * Each time a stateIn for a block changes, we re-insert the block's
   * rpo ID in incompleteQ.  Since incompleteQ is ordered, we'll
   * always visit blocks with earlier RPO ids first, which hopefully
   * means less iterations.
   */
  do {
    while (!incompleteQ.empty()) {
      auto const blk = ai.rpoBlocks[incompleteQ.pop()];

      totalVisits[blk->id]++;

      FTRACE(2, "block #{}\nin {}{}", blk->id,
             state_string(*ctx.func, ai.bdata[blk->id].stateIn, collect),
             property_state_string(collect.props));
      ++interp_counter;

      auto propagate = [&] (BlockId target, const State* st) {
        if (!st) {
          FTRACE(2, "     Force reprocess: {}\n", target);
          incompleteQ.push(rpoId(ai, target));
          return;
        }

        auto const needsWiden =
          totalVisits[target] >= options.analyzeFuncWideningLimit;

        FTRACE(2, "     {}-> {}\n", needsWiden ? "widening " : "", target);
        FTRACE(4, "target old {}",
               state_string(*ctx.func, ai.bdata[target].stateIn, collect));

        auto const changed =
          needsWiden ? widen_into(ai.bdata[target].stateIn, *st)
                     : merge_into(ai.bdata[target].stateIn, *st);
        if (changed) {
          incompleteQ.push(rpoId(ai, target));
        }
        FTRACE(4, "target new {}",
               state_string(*ctx.func, ai.bdata[target].stateIn, collect));
      };

      auto stateOut = ai.bdata[blk->id].stateIn;
      auto interp   = Interp { index, ctx, collect, blk, stateOut };
      auto flags    = run(interp, propagate);
      if (any(collect.opts & CollectionOpts::EffectFreeOnly) &&
          !collect.effectFree) {
        break;
      }
      // We only care about the usedLocalStatics from the last visit
      if (flags.usedLocalStatics) {
        usedLocalStatics[blk] = std::move(*flags.usedLocalStatics);
      } else {
        usedLocalStatics.erase(blk);
      }

      if (flags.returned) {
        ai.inferredReturn |= std::move(*flags.returned);
      }
    }

    if (any(collect.opts & CollectionOpts::EffectFreeOnly) &&
        !collect.effectFree) {
      break;
    }

    // maybe some local statics changed type since the last time their
    // blocks were visited.

    if (totalLoops++ >= options.analyzeFuncWideningLimit) {
      // If we loop too many times because of static locals, widen them to
      // ensure termination.
      for (auto& t : collect.localStaticTypes) {
        t = widen_type(std::move(t));
      }
    }

    for (auto const& elm : usedLocalStatics) {
      for (auto const& ls : elm.second) {
        if (collect.localStaticTypes[ls.first] != ls.second) {
          incompleteQ.push(rpoId(ai, elm.first->id));
          break;
        }
      }
    }
  } while (!incompleteQ.empty());

  ai.closureUseTypes = std::move(collect.closureUseTypes);
  ai.cnsMap = std::move(collect.cnsMap);
  ai.readsUntrackedConstants = collect.readsUntrackedConstants;
  ai.mayUseVV = collect.mayUseVV;
  ai.effectFree = collect.effectFree;
  ai.unfoldableFuncs = collect.unfoldableFuncs;

  index.fixup_return_type(ctx.func, ai.inferredReturn);

  /*
   * If inferredReturn is TBottom, the callee didn't execute a return
   * at all.  (E.g. it unconditionally throws, or is an abstract
   * function body.)
   *
   * In this case, we leave the return type as TBottom, to indicate
   * the same to callers.
   */
  assert(ai.inferredReturn.subtypeOf(TGen));

  // For debugging, print the final input states for each block.
  FTRACE(2, "{}", [&] {
    auto const bsep = std::string(60, '=') + "\n";
    auto const sep = std::string(60, '-') + "\n";
    auto ret = folly::format(
      "{}function {} ({} block interps):\n{}",
      bsep,
      show(ctx),
      interp_counter,
      bsep
    ).str();
    for (auto& bd : ai.bdata) {
      folly::format(
        &ret,
        "{}block {}:\nin {}",
        sep,
        ai.rpoBlocks[bd.rpoId]->id,
        state_string(*ctx.func, bd.stateIn, collect)
      );
    }
    ret += sep + bsep;
    folly::format(&ret, "Inferred return type: {}\n", show(ai.inferredReturn));
    ret += bsep;
    return ret;
  }());

  // Do this after the tracing above
  ai.localStaticTypes = std::move(collect.localStaticTypes);
  return ai;
}