Beispiel #1
0
void LinearScan::removeUnusedSpillsAux(Trace* trace) {
  IRInstruction::List& instList = trace->getInstructionList();
  for (IRInstruction::Iterator it = instList.begin();
       it != instList.end(); ) {
    IRInstruction::Iterator next = it; ++next;
    IRInstruction* inst = *it;
    if (inst->getOpcode() == Spill && inst->getDst()->getUseCount() == 0) {
      instList.erase(it);
      SSATmp* src = inst->getSrc(0);
      if (src->decUseCount() == 0) {
        Opcode srcOpc = src->getInstruction()->getOpcode();
        // Not all instructions are able to take noreg as its dest
        // reg.  We pick LdLoc and IncRef because they occur often.
        if (srcOpc == IncRef || srcOpc == LdLoc) {
          for (int locIndex = 0;
               locIndex < src->numNeededRegs();
               ++locIndex) {
            src->setReg(InvalidReg, locIndex);
          }
        }
      }
    }
    it = next;
  }
}
Beispiel #2
0
void MemMap::sinkStores(StoreList& stores) {
  // sink dead stores into exit edges that occur between the dead store and the
  // next store
  StoreList::reverse_iterator it, end;
  for (it = stores.rbegin(), end = stores.rend(); it != end; ++it) {
    IRInstruction* store = it->first;

    if (store->getId() != DEAD) {
      continue;
    }

    std::vector<IRInstruction*>::iterator i, e;
    for (i = it->second.begin(), e = it->second.end(); i != e; ++i) {
      IRInstruction* guard = *i;

      IRInstruction* clone = store->clone(factory);
      if (store->getDst() != NULL) {
        factory->getSSATmp(clone);
      }

      guard->getLabel()->getParent()->prependInstruction(clone);
    }

    // StRefs cannot just be removed, they have to be converted into Movs
    // as the destination of the StRef still has the DecRef attached to it.
    if (store->getOpcode() == StRef || store->getOpcode() == StRefNT) {
      store->setOpcode(Mov);
      store->setSrc(1, NULL);
      store->setNumSrcs(1);
      store->setId(LIVE);
    }
  }
}
Beispiel #3
0
// If main trace ends with a conditional jump with no side-effects on exit,
// hook it to the exitTrace and make it a TraceExitType::NormalCc
static void hoistConditionalJumps(Trace* trace, IRFactory* irFactory) {
  IRInstruction::List& instList = trace->getInstructionList();
  IRInstruction::Iterator tail  = instList.end();
  IRInstruction* jccInst        = nullptr;
  IRInstruction* exitInst       = nullptr;
  IRInstruction* exitCcInst     = nullptr;
  Opcode opc = OpAdd;
  // Normally Jcc comes before a Marker
  for (int idx = 3; idx >= 0; idx--) {
    tail--; // go back to the previous instruction
    IRInstruction* inst = *tail;
    opc = inst->getOpcode();
    if (opc == ExitTrace) {
      exitInst = inst;
      continue;
    }
    if (opc == Marker) {
      continue;
    }
    if (jccCanBeDirectExit(opc)) {
      jccInst = inst;
      break;
    }
    break;
  }
  if (jccCanBeDirectExit(opc)) {
    SSATmp* dst = jccInst->getDst();
    Trace* targetTrace = jccInst->getLabel()->getParent();
    IRInstruction::List& targetInstList = targetTrace->getInstructionList();
    IRInstruction::Iterator targetInstIter = targetInstList.begin();
    targetInstIter++; // skip over label

    // Check for a NormalCc exit with no side effects
    for (IRInstruction::Iterator it = targetInstIter;
         it != targetInstList.end();
         ++it) {
      IRInstruction* instr = (*it);
      // Extend to support ExitSlow, ExitSlowNoProgress, ...
      Opcode opc = instr->getOpcode();
      if (opc == ExitTraceCc) {
        exitCcInst = instr;
        break;
      } else if (opc == Marker) {
        continue;
      } else {
        // Do not optimize if there are other instructions
        break;
      }
    }

    if (exitInst && exitCcInst) {
      // Found both exits, link them to Jcc for codegen
      assert(dst);
      exitCcInst->appendSrc(irFactory->arena(), dst);
      exitInst->appendSrc(irFactory->arena(), dst);
      // Set flag so Jcc and exits know this is active
      dst->setTCA(kIRDirectJccJmpActive);
    }
  }
}
// Create a spill slot for <tmp>.
uint32_t LinearScan::createSpillSlot(SSATmp* tmp) {
  uint32_t slotId = m_slots.size();
  tmp->setSpillSlot(slotId);
  IRInstruction* spillInst = m_irFactory->gen(Spill, tmp);
  SSATmp* spillTmp = spillInst->getDst();
  SlotInfo si;
  si.m_spillTmp = spillTmp;
  si.m_latestReload = tmp;
  m_slots.push_back(si);
  // The spill slot inherits the last use ID of the spilled tmp.
  si.m_spillTmp->setLastUseId(tmp->getLastUseId());
  return slotId;
}
Beispiel #5
0
/*
 * Insert a DbgAssertTv instruction for each stack location stored to by
 * a SpillStack instruction
 */
static void insertSpillStackAsserts(IRInstruction& inst, IRFactory* factory) {
  SSATmp* sp = inst.getDst();
  auto const vals = inst.getSrcs().subpiece(2);
  auto* block = inst.getBlock();
  auto pos = block->iteratorTo(&inst); ++pos;
  for (unsigned i = 0, offset = 0, n = vals.size(); i < n; ++i) {
    Type t = vals[i]->getType();
    if (t == Type::ActRec) {
      offset += kNumActRecCells;
      i += kSpillStackActRecExtraArgs;
    } else {
      if (t.subtypeOf(Type::Gen)) {
        IRInstruction* addr = factory->gen(LdStackAddr, sp,
                                           factory->defConst(offset));
        block->insert(pos, addr);
        IRInstruction* check = factory->gen(DbgAssertPtr, addr->getDst());
        block->insert(pos, check);
      }
      ++offset;
    }
  }
}
Beispiel #6
0
static void insertRefCountAssertsAux(Trace* trace, IRFactory* factory) {
    IRInstruction::List& instructions = trace->getInstructionList();
    IRInstruction::Iterator it;
    for (it = instructions.begin(); it != instructions.end(); ) {
        IRInstruction* inst = *it;
        it++;
        SSATmp* dst = inst->getDst();
        if (dst &&
                Type::isStaticallyKnown(dst->getType()) &&
                Type::isRefCounted(dst->getType())) {
            auto* assertInst = factory->gen(DbgAssertRefCount, dst);
            assertInst->setParent(trace);
            instructions.insert(it, assertInst);
        }
    }
}
Beispiel #7
0
void LinearScan::allocRegsToTraceAux(Trace* trace) {
  IRInstruction::List& instructionList = trace->getInstructionList();
  IRInstruction::Iterator it;
  for (it = instructionList.begin();
       it != instructionList.end();
       it++) {
    IRInstruction* inst = *it;
    allocRegToInstruction(trace, it);
    if (RuntimeOption::EvalDumpIR > 3) {
      std::cout << "--- allocated to instruction: ";
      inst->print(std::cout);
      std::cout << "\n";
    }
    if (inst->isControlFlowInstruction()) {
      // This instruction may transfer control to another trace
      // If this is the last instruction in the trace that can branch
      // to this target trace, then allocate registers to the target
      // trace, effectively linearizing the target trace after inst.
      LabelInstruction* label = inst->getLabel();
      if (label != NULL && label->getId() == inst->getId() + 1) {
        allocRegsToTraceAux(label->getTrace());
      }
    }
  }

  // Insert spill instructions.
  // Reload instructions are already added in <allocRegsToTrace>.
  for (it = instructionList.begin(); it != instructionList.end(); ) {
    IRInstruction::Iterator next = it; ++next;
    IRInstruction* inst = *it;
    if (inst->getOpcode() != Reload) {
      // Reloaded SSATmps needn't be spilled again.
      if (SSATmp* dst = inst->getDst()) {
        int32 slotId = dst->getSpillSlot();
        if (slotId != -1) {
          // If this instruction is marked to be spilled,
          // add a spill right afterwards.
          IRInstruction* spillInst =
            m_slots[slotId].m_slotTmp->getInstruction();
          instructionList.insert(next, spillInst);
          spillInst->setParent(trace);
        }
      }
    }
    it = next;
  }
}
Beispiel #8
0
/**
 * Called to clear out the tracked local values at a call site.
 * Calls kill all registers, so we don't want to keep locals in
 * registers across calls. We do continue tracking the types in
 * locals, however.
 */
void TraceBuilder::killLocals() {
  for (uint32_t i = 0; i < m_localValues.size(); i++) {
    SSATmp* t = m_localValues[i];
    // should not kill DefConst, and LdConst should be replaced by DefConst
    if (!t || t->inst()->op() == DefConst) {
      continue;
    }
    if (t->inst()->op() == LdConst) {
      // make the new DefConst instruction
      IRInstruction* clone = t->inst()->clone(&m_irFactory);
      clone->setOpcode(DefConst);
      m_localValues[i] = clone->getDst();
      continue;
    }
    assert(!t->isConst());
    m_localValues[i] = nullptr;
  }
}
void LinearScan::removeUnusedSpills() {
  for (SlotInfo& slot : m_slots) {
    IRInstruction* spill = slot.m_spillTmp->getInstruction();
    if (spill->getDst()->getUseCount() == 0) {
      Block* block = spill->getBlock();
      block->erase(block->iteratorTo(spill));
      SSATmp* src = spill->getSrc(0);
      if (src->decUseCount() == 0) {
        Opcode srcOpc = src->getInstruction()->getOpcode();
        // Not all instructions are able to take noreg as its dest
        // reg.  We pick LdLoc and IncRef because they occur often.
        if (srcOpc == IncRef || srcOpc == LdLoc) {
          for (int i = 0, n = src->numNeededRegs(); i < n; ++i) {
            src->setReg(InvalidReg, i);
          }
        }
      }
    }
  }
}
Beispiel #10
0
/*
 * Insert asserts at various points in the IR.
 * TODO: t2137231 Insert DbgAssertPtr at points that use or produces a GenPtr
 */
static void insertAsserts(Trace* trace, IRFactory* factory) {
  forEachTraceBlock(trace, [=](Block* block) {
    for (auto it = block->begin(), end = block->end(); it != end; ) {
      IRInstruction& inst = *it;
      ++it;
      if (inst.getOpcode() == SpillStack) {
        insertSpillStackAsserts(inst, factory);
        continue;
      }
      if (inst.getOpcode() == Call) {
        SSATmp* sp = inst.getDst();
        IRInstruction* addr = factory->gen(LdStackAddr, sp, factory->defConst(0));
        insertAfter(&inst, addr);
        insertAfter(addr, factory->gen(DbgAssertPtr, addr->getDst()));
        continue;
      }
      if (!inst.isBlockEnd()) insertRefCountAsserts(inst, factory);
    }
  });
}
Beispiel #11
0
void LinearScan::preAllocSpillLocAux(Trace* trace, uint32 numSpillLocs) {
  IRInstruction::List& instList = trace->getInstructionList();
  for (IRInstruction::Iterator it = instList.begin();
       it != instList.end();
       ++it) {
    IRInstruction* inst = *it;
    if (inst->getOpcode() == Spill) {
      SSATmp* dst = inst->getDst();
      for (int index = 0; index < dst->numNeededRegs(); ++index) {
        ASSERT(!dst->hasReg(index));
        if (dst->getSpillInfo(index).type() == SpillInfo::Memory) {
          uint32 spillLoc = dst->getSpillInfo(index).mem();
          // Native stack layout:
          // |               |
          // +---------------+
          // |               |  <-- spill[5..]
          // | pre allocated |  <-- spill[4]
          // |  (16 slots)   |  <-- spill[3]
          // +---------------+
          // |  return addr  |
          // +---------------+
          // |    extra      |  <-- spill[2]
          // |    spill      |  <-- spill[1]
          // |  locations    |  <-- spill[0]
          // +---------------+  <-- %rsp
          // If a spill location falls into the pre-allocated region, we
          // need to increase its index by 1 to avoid overwriting the
          // return address.
          if (spillLoc + NumPreAllocatedSpillLocs >= numSpillLocs) {
            dst->setSpillInfo(index, SpillInfo(spillLoc + 1));
          }
        }
      }
    }
  }
}
Beispiel #12
0
/*
 * Sink IncRefs consumed off trace.
 * Assumptions: Flow graph must not have critical edges, and the instructions
 * have been annotated already by the DCE algorithm.  This pass uses
 * the REFCOUNT_CONSUMED* flags to copy IncRefs from the main trace to each
 * exit trace that consumes the incremented pointer.
 * 1. toSink = {}
 * 2. iterate forwards over the main trace:
 *    * when a movable IncRef is found, insert into toSink list and mark
 *      it as DEAD.
 *    * If a decref of a dead incref is found, remove the corresponding
 *      incref from toSink, and mark the decref DEAD because too.
 *    * the first time we see a branch to an exit trace, process the
 *      exit tace.
 * 3. to process an exit trace:
 *    * clone each IncRef found in toSink then prepend to the exit trace.
 *    * replace each use of the original incref's result with the new
 *      incref's result.
 */
void sinkIncRefs(Trace* trace, IRFactory* irFactory, DceState& state) {
  assert(trace->isMain());

  auto copyPropTrace = [] (Trace* trace) {
    forEachInst(trace, copyProp);
  };

  WorkList toSink;

  auto processExit = [&] (Trace* exit) {
    // Sink REFCOUNT_CONSUMED_OFF_TRACE IncRefs before the first non-label
    // instruction, and create a mapping between the original tmps to the sunk
    // tmps so that we can later replace the original ones with the sunk ones.
    std::vector<SSATmp*> sunkTmps(irFactory->numTmps(), nullptr);
    for (auto* inst : boost::adaptors::reverse(toSink)) {
      // prepend inserts an instruction to the beginning of a block, after
      // the label. Therefore, we iterate through toSink in the reversed order.
      IRInstruction* sunkInst = irFactory->gen(IncRef, inst->getSrc(0));
      state[sunkInst].setLive();
      exit->front()->prepend(sunkInst);

      auto dstId = inst->getDst()->getId();
      assert(!sunkTmps[dstId]);
      sunkTmps[dstId] = sunkInst->getDst();
    }
    forEachInst(exit, [&](IRInstruction* inst) {
      // Replace the original tmps with the sunk tmps.
      for (uint32_t i = 0; i < inst->getNumSrcs(); ++i) {
        SSATmp* src = inst->getSrc(i);
        if (SSATmp* sunkTmp = sunkTmps[src->getId()]) {
          inst->setSrc(i, sunkTmp);
        }
      }
    });
    // Do copyProp at last, because we need to keep REFCOUNT_CONSUMED_OFF_TRACE
    // Movs as the prototypes for sunk instructions.
    copyPropTrace(exit);
  };

  // An exit trace may be entered from multiple exit points. We keep track of
  // which exit traces we already pushed sunk IncRefs to, so that we won't push
  // them multiple times.
  boost::dynamic_bitset<> pushedTo(irFactory->numBlocks());
  forEachInst(trace, [&](IRInstruction* inst) {
    if (inst->getOpcode() == IncRef) {
      // Must be REFCOUNT_CONSUMED or REFCOUNT_CONSUMED_OFF_TRACE;
      // otherwise, it should be already removed in optimizeRefCount.
      if (state[inst].countConsumedOffTrace()) {
        inst->setOpcode(Mov);
        // Mark them as dead so that they'll be removed later.
        state[inst].setDead();
        // Put all REFCOUNT_CONSUMED_OFF_TRACE IncRefs to the sinking list.
        toSink.push_back(inst);
      } else if (!state[inst].isDead()) {
        assert(state[inst].countConsumed());
      }
    }
    if (inst->getOpcode() == DecRefNZ) {
      IRInstruction* srcInst = inst->getSrc(0)->getInstruction();
      if (state[srcInst].isDead()) {
        state[inst].setDead();
        // This may take O(I) time where I is the number of IncRefs
        // in the main trace.
        toSink.remove(srcInst);
      }
    }
    if (Block* target = inst->getTaken()) {
      if (!pushedTo[target->getId()]) {
        pushedTo[target->getId()] = 1;
        Trace* exit = target->getTrace();
        if (exit != trace) processExit(exit);
      }
    }
  });

  // Do copyProp at last, because we need to keep REFCOUNT_CONSUMED_OFF_TRACE
  // Movs as the prototypes for sunk instructions.
  copyPropTrace(trace);
}
Beispiel #13
0
void LinearScan::rematerializeAux(Trace* trace,
                                  SSATmp* curSp,
                                  SSATmp* curFp,
                                  std::vector<SSATmp*> localValues) {
  IRInstruction::List& instList = trace->getInstructionList();
  for (IRInstruction::Iterator it = instList.begin();
       it != instList.end();
       ++it) {
    IRInstruction* inst = *it;
    Opcode opc = inst->getOpcode();
    SSATmp* dst = inst->getDst();
    if (opc == DefFP || opc == FreeActRec) {
      curFp = dst;
      ASSERT(dst && dst->getReg() == rVmFp);
    }
    if (opc == Reload) {
      // s = Spill t0
      // t = Reload s
      SSATmp* spilledTmp = getSpilledTmp(dst);
      IRInstruction* spilledInst = spilledTmp->getInstruction();
      IRInstruction* newInst = NULL;
      if (spilledInst->isRematerializable() ||
          (spilledInst->getOpcode() == LdStack &&
           spilledInst->getSrc(0) == curSp)) {
        // XXX: could change <newInst> to the non-check version.
        // Rematerialize those rematerializable instructions (i.e.,
        // isRematerializable returns true) and LdStack.
        newInst = spilledInst->clone(m_irFactory);
        // The new instruction needn't have an exit label, because it is always
        // dominated by the original instruction.
        newInst->setLabel(NULL);
      } else {
        // Rematerialize LdLoc.
        std::vector<SSATmp*>::iterator pos =
          std::find(localValues.begin(),
                    localValues.end(),
                    canonicalize(spilledTmp));
        // Search for a local that stores the value of <spilledTmp>.
        if (pos != localValues.end()) {
          size_t locId = pos - localValues.begin();
          ASSERT(curFp != NULL);
          ConstInstruction constInst(curFp, Local(locId));
          IRInstruction* ldHomeInst =
            m_irFactory->cloneInstruction(&constInst);
          newInst = m_irFactory->ldLoc(m_irFactory->getSSATmp(ldHomeInst),
                                       dst->getType(),
                                       NULL);
        }
      }
      if (newInst) {
        newInst->setDst(dst);
        newInst->getDst()->setInstruction(newInst);
        *it = newInst;
        newInst->setParent(trace);
      }
    }

    // Updating <curSp> and <localValues>.
    if (dst && dst->getReg() == rVmSp) {
      // <inst> modifies the stack pointer.
      curSp = dst;
    }
    if (opc == LdLoc || opc == StLoc || opc == StLocNT) {
      // dst = LdLoc home
      // StLoc/StLocNT home, src
      int locId = getLocalIdFromHomeOpnd(inst->getSrc(0));
      SSATmp* localValue = (opc == LdLoc ? dst : inst->getSrc(1));
      if (int(localValues.size()) < locId + 1) {
        localValues.resize(locId + 1);
      }
      localValues[locId] = canonicalize(localValue);
    }

    if (inst->isControlFlowInstruction()) {
      LabelInstruction* label = inst->getLabel();
      if (label != NULL && label->getId() == inst->getId() + 1) {
        rematerializeAux(label->getTrace(), curSp, curFp, localValues);
      }
    }
  }
}
Beispiel #14
0
uint32 LinearScan::assignSpillLocAux(Trace* trace,
                                     uint32 nextSpillLoc,
                                     uint32 nextMmxReg) {
  IRInstruction::List& instructionList = trace->getInstructionList();
  for (IRInstruction::Iterator it = instructionList.begin();
       it != instructionList.end();
       ++it) {
    IRInstruction* inst = *it;
    if (getNextNative() == inst) {
      ASSERT(!m_natives.empty());
      m_natives.pop_front();
    }
    if (inst->getOpcode() == Spill) {
      SSATmp* dst = inst->getDst();
      SSATmp* src = inst->getSrc(0);
      for (int locIndex = 0;
           locIndex < src->numNeededRegs();
           ++locIndex) {
        if (dst->getLastUseId() <= getNextNativeId()) {
          TRACE(3, "[counter] 1 spill a tmp that does not span native\n");
        } else {
          TRACE(3, "[counter] 1 spill a tmp that spans native\n");
        }

        const bool allowMmxSpill = RuntimeOption::EvalHHIREnableMmx &&
          // The live range of the spill slot doesn't span native calls,
          // and we still have free MMX registers.
          dst->getLastUseId() <= getNextNativeId() &&
          nextMmxReg < (uint32)NumMmxRegs;

        dst->setSpillInfo(locIndex,
          allowMmxSpill
            ? SpillInfo(RegNumber(nextMmxReg++))
            : SpillInfo(nextSpillLoc++)
        );
        if (allowMmxSpill) {
          TRACE(3, "[counter] 1 spill to mmx\n");
        } else {
          TRACE(3, "[counter] 1 spill to memory\n");
        }
      }
    }
    if (inst->getOpcode() == Reload) {
      SSATmp* src = inst->getSrc(0);
      for (int locIndex = 0;
           locIndex < src->numNeededRegs();
           ++locIndex) {
        if (src->getSpillInfo(locIndex).type() == SpillInfo::MMX) {
          TRACE(3, "[counter] reload from mmx\n");
        } else {
          TRACE(3, "[counter] reload from memory\n");
        }
      }
    }
    if (inst->isControlFlowInstruction()) {
      LabelInstruction* label = inst->getLabel();
      if (label != NULL && label->getId() == inst->getId() + 1) {
        nextSpillLoc = assignSpillLocAux(label->getTrace(),
                                         nextSpillLoc,
                                         nextMmxReg);
      }
    }
  }
  return nextSpillLoc;
}
Beispiel #15
0
void eliminateDeadCode(Trace* trace, IRFactory* irFactory) {
  IRInstruction::List wl; // worklist of live instructions
  Trace::List& exitTraces = trace->getExitTraces();
  // first mark all exit traces as unreachable by setting the id on
  // their labels to 0
  for (Trace::Iterator it = exitTraces.begin();
       it != exitTraces.end();
       it++) {
    Trace* trace = *it;
    trace->getLabel()->setId(DEAD);
  }

  // mark the essential instructions and add them to the initial
  // work list; also mark the exit traces that are reachable by
  // any control flow instruction in the main trace.
  initInstructions(trace, wl);
  for (Trace::Iterator it = exitTraces.begin();
       it != exitTraces.end();
       it++) {
    // only process those exit traces that are reachable from
    // the main trace
    Trace* trace = *it;
    if (trace->getLabel()->getId() != DEAD) {
      initInstructions(trace, wl);
    }
  }

  // process the worklist
  while (!wl.empty()) {
    IRInstruction* inst = wl.front();
    wl.pop_front();
    for (uint32 i = 0; i < inst->getNumSrcs(); i++) {
      SSATmp* src = inst->getSrc(i);
      if (src->getInstruction()->isDefConst()) {
        continue;
      }
      IRInstruction* srcInst = src->getInstruction();
      if (srcInst->getId() == DEAD) {
        srcInst->setId(LIVE);
        wl.push_back(srcInst);
      }
      // <inst> consumes <srcInst> which is an IncRef,
      // so we mark <srcInst> as REFCOUNT_CONSUMED.
      if (inst->consumesReference(i) && srcInst->getOpcode() == IncRef) {
        if (inst->getParent()->isMain() || !srcInst->getParent()->isMain()) {
          // <srcInst> is consumed from its own trace.
          srcInst->setId(REFCOUNT_CONSUMED);
        } else {
          // <srcInst> is consumed off trace.
          if (srcInst->getId() != REFCOUNT_CONSUMED) {
            // mark <srcInst> as REFCOUNT_CONSUMED_OFF_TRACE unless it is
            // also consumed from its own trace.
            srcInst->setId(REFCOUNT_CONSUMED_OFF_TRACE);
          }
        }
      }
    }
  }

  // Optimize IncRefs and DecRefs.
  optimizeRefCount(trace);
  for (Trace::Iterator it = exitTraces.begin(); it != exitTraces.end(); ++it) {
    optimizeRefCount(*it);
  }

  if (RuntimeOption::EvalHHIREnableSinking) {
    // Sink IncRefs consumed off trace.
    IRInstruction::List toSink;
    sinkIncRefs(trace, irFactory, toSink);
  }

  // now remove instructions whose id == DEAD
  removeDeadInstructions(trace);
  for (Trace::Iterator it = exitTraces.begin(); it != exitTraces.end(); it++) {
    removeDeadInstructions(*it);
  }

  // If main trace ends with an unconditional jump, copy the target of
  // the jump to the end of the trace
  IRInstruction::List& instList = trace->getInstructionList();
  IRInstruction::Iterator lastInst = instList.end();
  lastInst--; // go back to the last instruction
  IRInstruction* jmpInst = *lastInst;
  if (jmpInst->getOpcode() == Jmp_) {
    Trace* targetTrace = jmpInst->getLabel()->getTrace();
    IRInstruction::List& targetInstList = targetTrace->getInstructionList();
    IRInstruction::Iterator instIter = targetInstList.begin();
    instIter++; // skip over label
    // update the parent trace of the moved instructions
    for (IRInstruction::Iterator it = instIter;
         it != targetInstList.end();
         ++it) {
      (*it)->setParent(trace);
    }
    instList.splice(lastInst, targetInstList, instIter, targetInstList.end());
    // delete the jump instruction
    instList.erase(lastInst);
  }

  // If main trace ends with a conditional jump with no side-effects on exit,
  // hook it to the exitTrace and make it a TraceExitType::NormalCc
  if (RuntimeOption::EvalHHIRDirectExit) {
    IRInstruction::List& instList = trace->getInstructionList();
    IRInstruction::Iterator tail  = instList.end();
    IRInstruction* jccInst        = NULL;
    IRInstruction* exitInst       = NULL;
    IRInstruction* exitCcInst     = NULL;
    Opcode opc = OpAdd;
    // Normally Jcc comes before a Marker
    for (int idx = 3; idx >= 0; idx--) {
      tail--; // go back to the previous instruction
      IRInstruction* inst = *tail;
      opc = inst->getOpcode();
      if (opc == ExitTrace) {
        exitInst = *tail;
        continue;
      }
      if (opc == Marker) {
        continue;
      }
      if (jccCanBeDirectExit(opc)) {
        jccInst = inst;
        break;
      }
      break;
    }
    if (jccCanBeDirectExit(opc)) {
      SSATmp* dst = jccInst->getDst();
      Trace* targetTrace = jccInst->getLabel()->getTrace();
      IRInstruction::List& targetInstList = targetTrace->getInstructionList();
      IRInstruction::Iterator targetInstIter = targetInstList.begin();
      targetInstIter++; // skip over label

      // Check for a NormalCc exit with no side effects
      for (IRInstruction::Iterator it = targetInstIter;
           it != targetInstList.end();
           ++it) {
        IRInstruction* instr = (*it);
        // Extend to support ExitSlow, ExitSlowNoProgress, ...
        Opcode opc = instr->getOpcode();
        if (opc == ExitTraceCc) {
          exitCcInst = instr;
          break;
        } else if (opc == Marker) {
          continue;
        } else {
          // Do not optimize if there are other instructions
          break;
        }
      }

      if (exitInst && exitCcInst &&
          exitCcInst->getNumSrcs() > NUM_FIXED_SRCS &&
          exitInst->getNumSrcs() > NUM_FIXED_SRCS) {
        // Found both exits, link them to Jcc for codegen
        ASSERT(dst);
        ExtendedInstruction* exCcInst = (ExtendedInstruction*)exitCcInst;
        exCcInst->appendExtendedSrc(*irFactory, dst);
        ExtendedInstruction* exInst = (ExtendedInstruction*)exitInst;
        exInst->appendExtendedSrc(*irFactory, dst);
        // Set flag so Jcc and exits know this is active
        dst->setTCA(kIRDirectJccJmpActive);
      }
    }
  }

  // If main trace starts with guards, have them generate a patchable jump
  // to the anchor trace
  if (RuntimeOption::EvalHHIRDirectExit) {
    LabelInstruction* guardLabel = NULL;
    IRInstruction::List& instList = trace->getInstructionList();
    // Check the beginning of the trace for guards
    for (IRInstruction::Iterator it = instList.begin(); it != instList.end();
         ++it) {
      IRInstruction* inst = *it;
      Opcode opc = inst->getOpcode();
      if (inst->getLabel() &&
          (opc == LdLoc    || opc == LdStack ||
           opc == GuardLoc || opc == GuardStk)) {
        LabelInstruction* exitLabel = inst->getLabel();
        // Find the GuardFailure's label and confirm this branches there
        if (guardLabel == NULL) {
          Trace* exitTrace = exitLabel->getTrace();
          IRInstruction::List& xList = exitTrace->getInstructionList();
          IRInstruction::Iterator instIter = xList.begin();
          instIter++; // skip over label
          // Confirm this is a GuardExit
          for (IRInstruction::Iterator it = instIter; it != xList.end(); ++it) {
            IRInstruction* i = *it;
            Opcode op = i->getOpcode();
            if (op == Marker) {
              continue;
            }
            if (op == ExitGuardFailure) {
              guardLabel = exitLabel;
            }
            // Do not optimize if other instructions are on exit trace
            break;
          }
        }
        if (exitLabel == guardLabel) {
          inst->setTCA(kIRDirectGuardActive);
          continue;
        }
        break;
      }
      if (opc == Marker || opc == DefLabel || opc == DefSP || opc == DefFP ||
          opc == LdStack) {
        continue;
      }
      break;
    }
  }
}
Beispiel #16
0
void LinearScan::rematerializeAux(Trace* trace,
                                  SSATmp* curSp,
                                  SSATmp* curFp,
                                  std::vector<SSATmp*> localValues) {
  IRInstruction::List& instList = trace->getInstructionList();
  for (IRInstruction::Iterator it = instList.begin();
       it != instList.end();
       ++it) {
    IRInstruction* inst = *it;
    Opcode opc = inst->getOpcode();
    SSATmp* dst = inst->getDst();
    if (opc == DefFP || opc == FreeActRec) {
      curFp = dst;
      assert(dst && dst->getReg() == rVmFp);
    }
    if (opc == Reload) {
      // s = Spill t0
      // t = Reload s
      SSATmp* spilledTmp = getSpilledTmp(dst);
      IRInstruction* spilledInst = spilledTmp->getInstruction();
      IRInstruction* newInst = NULL;
      if (spilledInst->isRematerializable() ||
          (spilledInst->getOpcode() == LdStack &&
           spilledInst->getSrc(0) == curSp)) {
        // XXX: could change <newInst> to the non-check version.
        // Rematerialize those rematerializable instructions (i.e.,
        // isRematerializable returns true) and LdStack.
        newInst = spilledInst->clone(m_irFactory);
        // The new instruction needn't have an exit label, because it is always
        // dominated by the original instruction.
        newInst->setLabel(NULL);
      } else {
        // Rematerialize LdLoc.
        std::vector<SSATmp*>::iterator pos =
          std::find(localValues.begin(),
                    localValues.end(),
                    canonicalize(spilledTmp));
        // Search for a local that stores the value of <spilledTmp>.
        if (pos != localValues.end()) {
          size_t locId = pos - localValues.begin();
          assert(curFp != NULL);
          ConstInstruction constInst(curFp, Local(locId));
          IRInstruction* ldHomeInst =
            m_irFactory->cloneInstruction(&constInst);
          newInst = m_irFactory->gen(LdLoc,
                                     dst->getType(),
                                     m_irFactory->getSSATmp(ldHomeInst));
        }
      }
      if (newInst) {
        UNUSED Type::Tag oldType = dst->getType();
        newInst->setDst(dst);
        dst->setInstruction(newInst);
        assert(outputType(newInst) == oldType);
        *it = newInst;
        newInst->setParent(trace);
      }
    }

    // Updating <curSp> and <localValues>.
    if (dst && dst->getReg() == rVmSp) {
      // <inst> modifies the stack pointer.
      curSp = dst;
    }
    if (opc == LdLoc || opc == StLoc || opc == StLocNT) {
      // dst = LdLoc home
      // StLoc/StLocNT home, src
      int locId = getLocalIdFromHomeOpnd(inst->getSrc(0));
      // Note that when we implement inlining, we will need to deal
      // with the new local id space of the inlined function.
      SSATmp* localValue = (opc == LdLoc ? dst : inst->getSrc(1));
      if (int(localValues.size()) < locId + 1) {
        localValues.resize(locId + 1);
      }
      localValues[locId] = canonicalize(localValue);
    }
    // Other instructions that may have side effects on locals must
    // kill the local variable values.
    else if (opc == IterInit) {
      int valLocId = inst->getSrc(3)->getConstValAsInt();
      localValues[valLocId] = NULL;
      if (inst->getNumSrcs() == 5) {
        int keyLocId = inst->getSrc(4)->getConstValAsInt();
        localValues[keyLocId] = NULL;
      }
    } else if (opc == IterNext) {
      int valLocId = inst->getSrc(2)->getConstValAsInt();
      localValues[valLocId] = NULL;
      if (inst->getNumSrcs() == 4) {
        int keyLocId = inst->getSrc(3)->getConstValAsInt();
        localValues[keyLocId] = NULL;
      }
    }

    if (inst->isControlFlowInstruction()) {
      LabelInstruction* label = inst->getLabel();
      if (label != NULL && label->getId() == inst->getId() + 1) {
        rematerializeAux(label->getParent(), curSp, curFp, localValues);
      }
    }
  }
}