Пример #1
0
Файл: cfg.cpp Проект: Yermo/hhvm
bool splitCriticalEdges(IRUnit& unit) {
  FTRACE(2, "splitting critical edges\n");
  auto modified = removeUnreachable(unit);
  auto const startBlocks = unit.numBlocks();

  for (auto* block : unit.main()->blocks()) {
    splitCriticalEdge(unit, block->takenEdge());
    splitCriticalEdge(unit, block->nextEdge());
  }

  return modified || unit.numBlocks() != startBlocks;
}
Пример #2
0
bool splitCriticalEdges(IRUnit& unit) {
  FTRACE(2, "splitting critical edges\n");
  auto modified = removeUnreachable(unit);
  if (modified) reflowTypes(unit);
  auto const startBlocks = unit.numBlocks();

  // Try to split outgoing edges of each reachable block.  This is safe in
  // a postorder walk since we visit blocks after visiting successors.
  postorderWalk(unit, [&](Block* b) {
    splitCriticalEdge(unit, b->takenEdge());
    splitCriticalEdge(unit, b->nextEdge());
  });

  return modified || unit.numBlocks() != startBlocks;
}
Пример #3
0
/*
 * This pass tries to merge blocks and cleanup the CFG.
 *
 * In each pass, it visits blocks in reverse post order and tries to
 * (1) convert the conditional branch at the end of the block into a Jmp;
 * (2) merge the block with its unique successor block, if it is the unique
 * predecessor of its successor;
 * (3) fold Jmp, if it fits the Jmp to Jmp pattern.
 *
 * The reverse post order is not essential to the transformation; in the current
 * implementation it helps skipping some blocks after a change happens.
 */
void cleanCfg(IRUnit& unit) {
    PassTracer tracer { &unit, Trace::hhir_cfg, "cleanCfg" };
    Timer timer(Timer::optimize_cleancfg);
    do {
        auto const blocks = rpoSortCfg(unit);
        for (auto block : blocks) {
            // Skip malformed unreachable blocks that can appear transiently.
            if (block->empty()) continue;

            // keep working on the current block until no further changes are made.
            // Since we are visiting in reverse post order, we are sure that after a
            // block is changed here, no more opportunity is exposed in its upstream
            // blocks.
            while (true) {
                simplify(unit, &(block->back()));
                if (absorbDstBlock(unit, block)) continue;
                if (foldJmp(unit, block)) continue;
                break;
            }
        }
    } while (removeUnreachable(unit));
}
Пример #4
0
bool splitCriticalEdges(IRUnit& unit) {
  FTRACE(2, "splitting critical edges\n");
  auto modified = removeUnreachable(unit);
  if (modified) reflowTypes(unit);
  auto const startBlocks = unit.numBlocks();

  std::unordered_set<Block*> newCatches;
  std::unordered_set<Block*> oldCatches;

  // Try to split outgoing edges of each reachable block.  This is safe in
  // a postorder walk since we visit blocks after visiting successors.
  postorderWalk(unit, [&](Block* b) {
    auto bnew = splitCriticalEdge(unit, b->takenEdge());
    splitCriticalEdge(unit, b->nextEdge());

    assertx(!b->next() || !b->next()->isCatch());
    if (bnew && b->taken()->isCatch()) {
      newCatches.emplace(bnew);
      oldCatches.emplace(b->taken());
    }
  });

  for (auto b : newCatches) {
    auto bc = b->next()->begin();
    assertx(bc->is(BeginCatch));
    b->prepend(unit.gen(BeginCatch, bc->bcctx()));
  }

  for (auto b : oldCatches) {
    auto bc = b->begin();
    assertx(bc->is(BeginCatch));
    b->erase(bc);
  }

  return modified || unit.numBlocks() != startBlocks;
}
Пример #5
0
void eliminateDeadCode(Trace* trace, IRFactory* irFactory) {
  auto removeEmptyExitTraces = [&] {
    trace->getExitTraces().remove_if([](Trace* exit) {
      return exit->getBlocks().empty();
    });
  };

  // kill unreachable code and remove any traces that are now empty
  BlockList blocks = removeUnreachable(trace, irFactory);
  removeEmptyExitTraces();

  // mark the essential instructions and add them to the initial
  // work list; this will also mark reachable exit traces. All
  // other instructions marked dead.
  DceState state(irFactory, DceFlags());
  WorkList wl = initInstructions(trace, blocks, state, irFactory);

  // process the worklist
  while (!wl.empty()) {
    auto* inst = wl.front();
    wl.pop_front();
    for (uint32_t i = 0; i < inst->getNumSrcs(); i++) {
      SSATmp* src = inst->getSrc(i);
      if (src->getInstruction()->getOpcode() == DefConst) {
        continue;
      }
      IRInstruction* srcInst = src->getInstruction();
      if (state[srcInst].isDead()) {
        state[srcInst].setLive();
        wl.push_back(srcInst);
      }
      // <inst> consumes <srcInst> which is an IncRef, so we mark <srcInst> as
      // REFCOUNT_CONSUMED. If the source instruction is a GuardType and guards
      // to a maybeCounted type, we need to trace through to the source for
      // refcounting purposes.
      while (srcInst->getOpcode() == GuardType &&
             srcInst->getTypeParam().maybeCounted()) {
        srcInst = srcInst->getSrc(0)->getInstruction();
      }
      if (inst->consumesReference(i) && srcInst->getOpcode() == IncRef) {
        if (inst->getTrace()->isMain() || !srcInst->getTrace()->isMain()) {
          // <srcInst> is consumed from its own trace.
          state[srcInst].setCountConsumed();
        } else {
          // <srcInst> is consumed off trace.
          if (!state[srcInst].countConsumed()) {
            // mark <srcInst> as REFCOUNT_CONSUMED_OFF_TRACE unless it is
            // also consumed from its own trace.
            state[srcInst].setCountConsumedOffTrace();
          }
        }
      }
    }
  }

  // Optimize IncRefs and DecRefs.
  forEachTrace(trace, [&](Trace* t) { optimizeRefCount(t, state); });

  if (RuntimeOption::EvalHHIREnableSinking) {
    // Sink IncRefs consumed off trace.
    sinkIncRefs(trace, irFactory, state);
  }

  // now remove instructions whose id == DEAD
  removeDeadInstructions(trace, state);
  for (Trace* exit : trace->getExitTraces()) {
    removeDeadInstructions(exit, state);
  }

  // and remove empty exit traces
  removeEmptyExitTraces();
}