Exemple #1
0
void checkRefs(IRGS& env,
               int64_t entryArDelta,
               const std::vector<bool>& mask,
               const std::vector<bool>& vals,
               Offset dest) {
  auto const actRecOff = entryArDelta + offsetFromIRSP(env, BCSPOffset{0});
  auto const funcPtr = gen(env, LdARFuncPtr,
                           IRSPOffsetData { actRecOff }, sp(env));
  SSATmp* nParams = nullptr;

  for (unsigned i = 0; i < mask.size(); i += 64) {
    assertx(i < vals.size());

    uint64_t mask64 = packBitVec(mask, i);
    if (mask64 == 0) {
      continue;
    }

    if (i == 0) {
      nParams = cns(env, 64);
    } else if (!nParams || nParams->hasConstVal()) {
      nParams = gen(env, LdFuncNumParams, funcPtr);
    }

    auto const vals64 = packBitVec(vals, i);
    auto failBlock = env.irb->guardFailBlock();
    if (failBlock == nullptr) failBlock = makeExit(env, dest);
    gen(env, CheckRefs, failBlock, funcPtr, nParams,
        cns(env, i), cns(env, mask64), cns(env, vals64));
  }
}
Exemple #2
0
// If main trace ends with a conditional jump with no side-effects on exit,
// hook it to the exitTrace and make it a TraceExitType::NormalCc
static void hoistConditionalJumps(Trace* trace, IRFactory* irFactory) {
  IRInstruction::List& instList = trace->getInstructionList();
  IRInstruction::Iterator tail  = instList.end();
  IRInstruction* jccInst        = nullptr;
  IRInstruction* exitInst       = nullptr;
  IRInstruction* exitCcInst     = nullptr;
  Opcode opc = OpAdd;
  // Normally Jcc comes before a Marker
  for (int idx = 3; idx >= 0; idx--) {
    tail--; // go back to the previous instruction
    IRInstruction* inst = *tail;
    opc = inst->getOpcode();
    if (opc == ExitTrace) {
      exitInst = inst;
      continue;
    }
    if (opc == Marker) {
      continue;
    }
    if (jccCanBeDirectExit(opc)) {
      jccInst = inst;
      break;
    }
    break;
  }
  if (jccCanBeDirectExit(opc)) {
    SSATmp* dst = jccInst->getDst();
    Trace* targetTrace = jccInst->getLabel()->getParent();
    IRInstruction::List& targetInstList = targetTrace->getInstructionList();
    IRInstruction::Iterator targetInstIter = targetInstList.begin();
    targetInstIter++; // skip over label

    // Check for a NormalCc exit with no side effects
    for (IRInstruction::Iterator it = targetInstIter;
         it != targetInstList.end();
         ++it) {
      IRInstruction* instr = (*it);
      // Extend to support ExitSlow, ExitSlowNoProgress, ...
      Opcode opc = instr->getOpcode();
      if (opc == ExitTraceCc) {
        exitCcInst = instr;
        break;
      } else if (opc == Marker) {
        continue;
      } else {
        // Do not optimize if there are other instructions
        break;
      }
    }

    if (exitInst && exitCcInst) {
      // Found both exits, link them to Jcc for codegen
      assert(dst);
      exitCcInst->appendSrc(irFactory->arena(), dst);
      exitInst->appendSrc(irFactory->arena(), dst);
      // Set flag so Jcc and exits know this is active
      dst->setTCA(kIRDirectJccJmpActive);
    }
  }
}
Exemple #3
0
PhysReg forceAlloc(const SSATmp& tmp) {
  if (tmp.type() <= TBottom) return InvalidReg;

  auto inst = tmp.inst();
  auto opc = inst->op();

  auto const forceStkPtrs = [&] {
    switch (arch()) {
    case Arch::X64: return false;
    case Arch::ARM: return true;
    case Arch::PPC64: not_implemented(); break;
    }
    not_reached();
  }();

  if (forceStkPtrs && tmp.isA(TStkPtr)) {
    assert_flog(
      opc == DefSP ||
      opc == Mov,
      "unexpected StkPtr dest from {}",
      opcodeName(opc)
    );
    return rvmsp();
  }

  // LdContActRec and LdAFWHActRec, loading a generator's AR, is the only time
  // we have a pointer to an AR that is not in rvmfp().
  if (opc != LdContActRec && opc != LdAFWHActRec && tmp.isA(TFramePtr)) {
    return rvmfp();
  }

  return InvalidReg;
}
SSATmp* IRInstruction::modifiedStkPtr() const {
  assert(modifiesStack());
  assert(MInstrEffects::supported(this));
  SSATmp* sp = dst(hasMainDst() ? 1 : 0);
  assert(sp->isA(Type::StkPtr));
  return sp;
}
Exemple #5
0
/**
 * Called to clear out the tracked local values at a call site.
 * Calls kill all registers, so we don't want to keep locals in
 * registers across calls. We do continue tracking the types in
 * locals, however.
 */
void TraceBuilder::killLocalsForCall() {
  auto doKill = [&](smart::vector<LocalState>& locals) {
    for (auto& loc : locals) {
      SSATmp* t = loc.value;
      // should not kill DefConst, and LdConst should be replaced by DefConst
      if (!t || t->inst()->op() == DefConst) continue;

      if (t->inst()->op() == LdConst) {
        // make the new DefConst instruction
        IRInstruction* clone = t->inst()->clone(&m_irFactory);
        clone->setOpcode(DefConst);
        loc.value = clone->dst();
        continue;
      }
      assert(!t->isConst());
      loc.unsafe = true;
    }
  };

  doKill(m_locals);
  m_callerAvailableValues.clear();

  for (auto& state : m_inlineSavedStates) {
    doKill(state->locals);
    state->callerAvailableValues.clear();
  }
}
Exemple #6
0
SSATmp* Simplifier::simplifyGuardType(IRInstruction* inst) {
  Type type    = inst->getTypeParam();
  SSATmp*   src     = inst->getSrc(0);
  Type srcType = src->getType();
  if (srcType == type || srcType.strictSubtypeOf(type)) {
    /*
     * the type of the src is the same or more refined than type, so the
     * guard is unnecessary.
     */
    return src;
  }
  if (type.strictSubtypeOf(srcType)) {
    if (hoistGuardToLoad(src, type)) {
      return src;
    }
  } else {
    /*
     * incompatible types!  We should just generate a jump here and
     * return null.
     *
     * For now, this case should currently be impossible, but it may
     * come up later due to other optimizations.  The assert is so
     * we'll remember this spot ...
     */
    assert(0);
  }
  return nullptr;
}
/*
 * Stores a ref (boxed value) to a local. Also handles unsetting a local.
 */
void TraceBuilder::genBindLoc(uint32_t id,
                              SSATmp* newValue,
                              bool doRefCount /* = true */) {
  Type trackedType = getLocalType(id);
  SSATmp* prevValue = 0;
  if (trackedType == Type::None) {
    if (doRefCount) {
      prevValue = gen(LdLoc, Type::Gen, LocalId(id), m_fpValue);
    }
  } else {
    prevValue = getLocalValue(id);
    assert(prevValue == nullptr || prevValue->type() == trackedType);
    if (prevValue == newValue) {
      // Silent store: local already contains value being stored
      // NewValue needs to be decref'ed
      if (!trackedType.notCounted() && doRefCount) {
        gen(DecRef, prevValue);
      }
      return;
    }
    if (trackedType.maybeCounted() && !prevValue && doRefCount) {
      prevValue = gen(LdLoc, trackedType, LocalId(id), m_fpValue);
    }
  }
  bool genStoreType = true;
  if ((trackedType.isBoxed() && newValue->type().isBoxed()) ||
      (trackedType == newValue->type() && !trackedType.isString())) {
    // no need to store type with local value
    genStoreType = false;
  }
  gen(genStoreType ? StLoc : StLocNT, LocalId(id), m_fpValue, newValue);
  if (trackedType.maybeCounted() && doRefCount) {
    gen(DecRef, prevValue);
  }
}
Exemple #8
0
SSATmp* IRBuilder::preOptimizeCheckType(IRInstruction* inst) {
  SSATmp* src  = inst->src(0);
  auto const oldType = src->type();
  auto const newType = inst->typeParam();

  if (oldType.isBoxed() && newType.isBoxed() &&
      (oldType.not(newType) || newType < oldType)) {
    /* This CheckType serves to update the inner type hint for a boxed value,
     * which requires no runtime work. This depends on the type being boxed,
     * and constraining it with DataTypeCountness will do it.  */
    constrainValue(src, DataTypeCountness);
    return gen(AssertType, newType, src);
  }

  if (oldType.not(newType)) {
    /* This check will always fail. It's probably due to an incorrect
     * prediction. Generate a Jmp, and return src because
     * following instructions may depend on the output of CheckType
     * (they'll be DCEd later). Note that we can't use convertToJmp
     * because the return value isn't nullptr, so the original
     * instruction won't be inserted into the stream. */
    gen(Jmp, inst->taken());
    return src;
  }

  if (newType >= oldType) {
    /* The type of the src is the same or more refined than type, so the guard
     * is unnecessary. */
    return src;
  }

  return nullptr;
}
Exemple #9
0
SSATmp* Simplifier::simplifyGetCtxFwdCall(IRInstruction* inst) {
  SSATmp*  srcCtx = inst->getSrc(0);
  if (srcCtx->isA(Type::Cctx)) {
    return srcCtx;
  }
  return nullptr;
}
Exemple #10
0
void LinearScan::removeUnusedSpillsAux(Trace* trace) {
  IRInstruction::List& instList = trace->getInstructionList();
  for (IRInstruction::Iterator it = instList.begin();
       it != instList.end(); ) {
    IRInstruction::Iterator next = it; ++next;
    IRInstruction* inst = *it;
    if (inst->getOpcode() == Spill && inst->getDst()->getUseCount() == 0) {
      instList.erase(it);
      SSATmp* src = inst->getSrc(0);
      if (src->decUseCount() == 0) {
        Opcode srcOpc = src->getInstruction()->getOpcode();
        // Not all instructions are able to take noreg as its dest
        // reg.  We pick LdLoc and IncRef because they occur often.
        if (srcOpc == IncRef || srcOpc == LdLoc) {
          for (int locIndex = 0;
               locIndex < src->numNeededRegs();
               ++locIndex) {
            src->setReg(InvalidReg, locIndex);
          }
        }
      }
    }
    it = next;
  }
}
SSATmp* TraceBuilder::optimizeWork(IRInstruction* inst,
                                   const folly::Optional<IdomVector>& idoms) {
    // Since some of these optimizations inspect tracked state, we don't
    // perform any of them on non-main traces.
    if (m_savedTraces.size() > 0) return nullptr;

    static DEBUG_ONLY __thread int instNest = 0;
    if (debug) ++instNest;
    SCOPE_EXIT { if (debug) --instNest; };
DEBUG_ONLY auto indent = [&] { return std::string(instNest * 2, ' '); };

    FTRACE(1, "{}{}\n", indent(), inst->toString());

    // First pass of tracebuilder optimizations try to replace an
    // instruction based on tracked state before we do anything else.
    // May mutate the IRInstruction in place (and return nullptr) or
    // return an SSATmp*.
    if (SSATmp* preOpt = preOptimize(inst)) {
        FTRACE(1, "  {}preOptimize returned: {}\n",
               indent(), preOpt->inst()->toString());
        return preOpt;
    }
    if (inst->op() == Nop) return nullptr;

    // copy propagation on inst source operands
    copyProp(inst);

    SSATmp* result = nullptr;
    if (m_enableCse && inst->canCSE()) {
        result = cseLookup(inst, idoms);
        if (result) {
            // Found a dominating instruction that can be used instead of inst
            FTRACE(1, "  {}cse found: {}\n",
                   indent(), result->inst()->toString());
            assert(!inst->consumesReferences());
            if (inst->producesReference()) {
                // Replace with an IncRef
                FTRACE(1, "  {}cse of refcount-producing instruction\n", indent());
                return gen(IncRef, result);
            } else {
                return result;
            }
        }
    }

    if (m_enableSimplification) {
        result = m_simplifier.simplify(inst);
        if (result) {
            // Found a simpler instruction that can be used instead of inst
            FTRACE(1, "  {}simplification returned: {}\n",
                   indent(), result->inst()->toString());
            assert(inst->hasDst());
            return result;
        }
    }
    return nullptr;
}
Exemple #12
0
/**
 * If main trace ends with a conditional jump with no side-effects on exit,
 * hook it to the exitTrace and make it a TraceExitType::NormalCc.
 *
 * This function essentially looks for the following code pattern:
 *
 * Main Trace:
 * ----------
 * L1: // jccBlock
 *    ...
 *    Jcc ... -> L3
 * L2: // lastBlock
 *    DefLabel
 *    [Marker]
 *    ExitTrace
 *
 * Exit Trace:
 * ----------
 * L3: // targetBlock
 *   DefLabel
 *   [Marker]
 *   ExitTraceCc
 *
 * If the pattern is found, Jcc's dst operand is linked to the ExitTrace and
 * ExitTraceCc instructions and it's flagged with kIRDirectJccJmpActive.  This
 * then triggers CodeGenerator to emit a REQ_BIND_JMPCC_FIRST service request.
 *
 */
static void hoistConditionalJumps(Trace* trace, IRFactory* irFactory) {
  IRInstruction* exitInst       = nullptr;
  IRInstruction* exitCcInst     = nullptr;
  Opcode opc = OpAdd;
  // Normally Jcc comes before a Marker
  auto& blocks = trace->getBlocks();
  if (blocks.size() < 2) return;
  auto it = blocks.end();
  Block* lastBlock = *(--it);
  Block* jccBlock  = *(--it);

  IRInstruction& jccInst = *(jccBlock->back());
  if (!jccCanBeDirectExit(jccInst.getOpcode())) return;

  for (auto it = lastBlock->skipLabel(), end = lastBlock->end(); it != end;
       it++) {
    IRInstruction& inst = *it;
    opc = inst.getOpcode();
    if (opc == ExitTrace) {
      exitInst = &inst;
      break;
    }
    if (opc != Marker) {
      // Found real instruction on the last block
      return;
    }
  }
  if (exitInst) {
    SSATmp* dst = jccInst.getDst();
    Block* targetBlock = jccInst.getTaken();
    auto targetInstIter = targetBlock->skipLabel();

    // Check for a NormalCc exit with no side effects
    for (auto it = targetInstIter, end = targetBlock->end(); it != end; ++it) {
      IRInstruction* instr = &*it;
      // Extend to support ExitSlow, ExitSlowNoProgress, ...
      Opcode opc = instr->getOpcode();
      if (opc == ExitTraceCc) {
        exitCcInst = instr;
        break;
      } else if (opc != Marker) {
        // Do not optimize if there are other instructions
        break;
      }
    }

    if (exitCcInst) {
      // Found both exits, link them to Jcc for codegen
      assert(dst);
      exitCcInst->appendSrc(irFactory->arena(), dst);
      exitInst->appendSrc(irFactory->arena(), dst);
      // Set flag so Jcc and exits know this is active
      dst->setTCA(kIRDirectJccJmpActive);
    }
  }
}
Exemple #13
0
void LinearScan::freeReg(RegState* reg) {
  pushFreeReg(reg);
  // The <tmp> shouldn't be reused any more.
  SSATmp* tmp = reg->m_ssaTmp;
  int32 slotId = tmp->getSpillSlot();
  if (slotId != -1) {
    m_slots[slotId].m_latestTmp = NULL;
  }
  reg->m_ssaTmp = NULL;
}
SSATmp* TraceBuilder::genLdLocAsCell(uint32_t id, Trace* exitTrace) {
  SSATmp*    tmp = genLdLoc(id);
  Type type = tmp->type();
  assert(type.isBoxed() || type.notBoxed());
  if (!type.isBoxed()) {
    return tmp;
  }
  // Unbox tmp into a cell via a LdRef
  return gen(LdRef, type.innerType(), exitTrace, tmp);
}
Exemple #15
0
bool isUnguardedLoad(IRInstruction* inst) {
  Opcode opc = inst->getOpcode();
  SSATmp* dst = inst->getDst();
  if (!dst) return false;
  Type::Tag type = dst->getType();
  return (opc == LdStack && (type == Type::Gen || type == Type::Cell))
          || (opc == LdLoc && type == Type::Gen)
          || (opc == LdRefNR && type == Type::Cell)
          || (opc == LdMemNR && type == Type::Cell &&
              inst->getSrc(0)->getType() == Type::PtrToCell);
}
Exemple #16
0
SSATmp* Simplifier::simplifyLdClsCtx(IRInstruction* inst) {
  SSATmp*  ctx = inst->getSrc(0);
  Type ctxType = ctx->getType();
  if (ctxType.equals(Type::Obj)) {
    // this pointer... load its class ptr
    return m_tb->gen(LdObjClass, ctx);
  }
  if (ctxType.equals(Type::Cctx)) {
    return m_tb->gen(LdClsCctx, ctx);
  }
  return nullptr;
}
Exemple #17
0
PhysReg forceAlloc(const SSATmp& tmp) {
  auto inst = tmp.inst();
  auto opc = inst->op();

  // TODO(t5485866) Our manipulations to vmsp must be SSA to play nice with
  // LLVM. In the X64 backend, this causes enough extra reg-reg copies to
  // measurably impact performance, so keep forcing things into rVmSp for
  // now. We should be able to remove this completely once the necessary
  // improvements are made to vxls.
  auto const forceStkPtrs = arch() != Arch::X64 || !RuntimeOption::EvalJitLLVM;

  if (forceStkPtrs && tmp.isA(Type::StkPtr)) {
    assert_flog(
      opc == DefSP ||
      opc == ReDefSP ||
      opc == Call ||
      opc == CallArray ||
      opc == ContEnter ||
      opc == SpillStack ||
      opc == SpillFrame ||
      opc == CufIterSpillFrame ||
      opc == ExceptionBarrier ||
      opc == RetAdjustStack ||
      opc == InterpOne ||
      opc == InterpOneCF ||
      opc == Mov ||
      opc == CheckStk ||
      opc == GuardStk ||
      opc == AssertStk ||
      opc == CastStk ||
      opc == CastStkIntToDbl ||
      opc == CoerceStk ||
      opc == DefLabel ||
      opc == HintStkInner ||
      MInstrEffects::supported(opc),
      "unexpected StkPtr dest from {}",
      opcodeName(opc)
    );
    return mcg->backEnd().rVmSp();
  }

  // LdContActRec and LdAFWHActRec, loading a generator's AR, is the only time
  // we have a pointer to an AR that is not in rVmFp.
  if (opc != LdContActRec && opc != LdAFWHActRec && tmp.isA(Type::FramePtr)) {
    return mcg->backEnd().rVmFp();
  }

  if (opc == DefMIStateBase) {
    assert(tmp.isA(Type::PtrToGen));
    return mcg->backEnd().rVmTl();
  }
  return InvalidReg;
}
/*
 * Store a cell value to a local that might be boxed.
 */
SSATmp* TraceBuilder::genStLoc(uint32_t id,
                               SSATmp* newValue,
                               bool doRefCount,
                               bool genStoreType,
                               Trace* exit) {
  assert(!newValue->type().isBoxed());
  /*
   * If prior value of local is a cell, then  re-use genBindLoc.
   * Otherwise, if prior value of local is a ref:
   *
   * prevLocValue = LdLoc<T>{id} fp
   *    prevValue = LdRef [prevLocValue]
   *       newRef = StRef [prevLocValue], newValue
   * DecRef prevValue
   * -- track local value in newRef
   */
  Type trackedType = getLocalType(id);
  assert(trackedType != Type::None);  // tracelet guards guarantee a type
  if (trackedType.notBoxed()) {
    SSATmp* retVal = doRefCount ? gen(IncRef, newValue) : newValue;
    genBindLoc(id, newValue, doRefCount);
    return retVal;
  }
  assert(trackedType.isBoxed());
  SSATmp* prevRef = getLocalValue(id);
  assert(prevRef == nullptr || prevRef->type() == trackedType);
  // prevRef is a ref
  if (prevRef == nullptr) {
    // prevRef = ldLoc
    prevRef = gen(LdLoc, trackedType, LocalId(id), m_fpValue);
  }
  SSATmp* prevValue = nullptr;
  if (doRefCount) {
    assert(exit);
    Type innerType = trackedType.innerType();
    prevValue = gen(LdRef, innerType, exit, prevRef);
  }
  // stref [prevRef] = t1
  Opcode opc = genStoreType ? StRef : StRefNT;
  gen(opc, prevRef, newValue);

  SSATmp* retVal = newValue;
  if (doRefCount) {
    retVal = gen(IncRef, newValue);
    gen(DecRef, prevValue);
  }
  return retVal;
}
Exemple #19
0
void printSrc(std::ostream& ostream, const IRInstruction* inst, uint32_t i,
              const RegAllocInfo* regs, const LifetimeInfo* lifetime) {
  SSATmp* src = inst->src(i);
  if (src != nullptr) {
    if (lifetime && lifetime->linear[inst] != 0 && !src->isConst() &&
        lifetime->uses[src].lastUse == lifetime->linear[inst]) {
      ostream << "~";
    }
    print(ostream, src, regs, lifetime);
  } else {
    ostream << color(ANSI_COLOR_RED)
            << "!!!NULL @ " << i
            << color(ANSI_COLOR_END)
            ;
  }
}
void TraceBuilder::genDecRefStack(Type type, uint32_t stackOff) {
  bool spansCall = false;
  Type knownType = Type::None;
  SSATmp* tmp = getStackValue(m_spValue, stackOff, spansCall, knownType);
  if (!tmp || (spansCall && tmp->inst()->op() != DefConst)) {
    // We don't want to extend live ranges of tmps across calls, so we
    // don't get the value if spansCall is true; however, we can use
    // any type information known.
    if (knownType != Type::None) {
      type = Type::mostRefined(type, knownType);
    }
    gen(DecRefStack, type, m_spValue, cns(int64_t(stackOff)));
  } else {
    gen(DecRef, tmp);
  }
}
Exemple #21
0
static void insertRefCountAssertsAux(Trace* trace, IRFactory* factory) {
    IRInstruction::List& instructions = trace->getInstructionList();
    IRInstruction::Iterator it;
    for (it = instructions.begin(); it != instructions.end(); ) {
        IRInstruction* inst = *it;
        it++;
        SSATmp* dst = inst->getDst();
        if (dst &&
                Type::isStaticallyKnown(dst->getType()) &&
                Type::isRefCounted(dst->getType())) {
            auto* assertInst = factory->gen(DbgAssertRefCount, dst);
            assertInst->setParent(trace);
            instructions.insert(it, assertInst);
        }
    }
}
Exemple #22
0
const StringData* findClassName(SSATmp* cls) {
  assert(cls->isA(Type::Cls));

  if (cls->isConst()) {
    return cls->getValClass()->preClass()->name();
  }
  // Try to get the class name from a LdCls
  IRInstruction* clsInst = cls->inst();
  if (clsInst->op() == LdCls || clsInst->op() == LdClsCached) {
    SSATmp* clsName = clsInst->src(0);
    assert(clsName->isA(Type::Str));
    if (clsName->isConst()) {
      return clsName->getValStr();
    }
  }
  return nullptr;
}
/**
 * Called to clear out the tracked local values at a call site.
 * Calls kill all registers, so we don't want to keep locals in
 * registers across calls. We do continue tracking the types in
 * locals, however.
 */
void TraceBuilder::killLocalsForCall() {
    for (auto& loc : m_locals) {
        SSATmp* t = loc.value;
        // should not kill DefConst, and LdConst should be replaced by DefConst
        if (!t || t->inst()->op() == DefConst) continue;

        if (t->inst()->op() == LdConst) {
            // make the new DefConst instruction
            IRInstruction* clone = t->inst()->clone(&m_irFactory);
            clone->setOpcode(DefConst);
            loc.value = clone->dst();
            continue;
        }
        assert(!t->isConst());
        loc.unsafe = true;
    }
}
Exemple #24
0
SSATmp* TraceBuilder::optimizeWork(IRInstruction* inst) {
  static DEBUG_ONLY __thread int instNest = 0;
  if (debug) ++instNest;
  SCOPE_EXIT { if (debug) --instNest; };
  DEBUG_ONLY auto indent = [&] { return std::string(instNest * 2, ' '); };

  FTRACE(1, "{}{}\n", indent(), inst->toString());

  // First pass of tracebuilder optimizations try to replace an
  // instruction based on tracked state before we do anything else.
  // May mutate the IRInstruction in place (and return nullptr) or
  // return an SSATmp*.
  if (SSATmp* preOpt = preOptimize(inst)) {
    FTRACE(1, "  {}preOptimize returned: {}\n",
           indent(), preOpt->inst()->toString());
    return preOpt;
  }
  if (inst->op() == Nop) return nullptr;

  // copy propagation on inst source operands
  copyProp(inst);

  SSATmp* result = nullptr;
  if (m_enableCse && inst->canCSE()) {
    result = cseLookup(inst);
    if (result) {
      // Found a dominating instruction that can be used instead of inst
      FTRACE(1, "  {}cse found: {}\n",
             indent(), result->inst()->toString());
      return result;
    }
  }

  if (m_enableSimplification) {
    result = m_simplifier.simplify(inst);
    if (result) {
      // Found a simpler instruction that can be used instead of inst
      FTRACE(1, "  {}simplification returned: {}\n",
             indent(), result->inst()->toString());
      assert(inst->hasDst());
      return result;
    }
  }
  return nullptr;
}
Exemple #25
0
/**
 * Called to clear out the tracked local values at a call site.
 * Calls kill all registers, so we don't want to keep locals in
 * registers across calls. We do continue tracking the types in
 * locals, however.
 */
void TraceBuilder::killLocals() {
  for (uint32_t i = 0; i < m_localValues.size(); i++) {
    SSATmp* t = m_localValues[i];
    // should not kill DefConst, and LdConst should be replaced by DefConst
    if (!t || t->inst()->op() == DefConst) {
      continue;
    }
    if (t->inst()->op() == LdConst) {
      // make the new DefConst instruction
      IRInstruction* clone = t->inst()->clone(&m_irFactory);
      clone->setOpcode(DefConst);
      m_localValues[i] = clone->getDst();
      continue;
    }
    assert(!t->isConst());
    m_localValues[i] = nullptr;
  }
}
SSATmp* TraceBuilder::genBoxLoc(uint32_t id) {
  SSATmp* prevValue  = genLdLoc(id);
  Type prevType = prevValue->type();
  // Don't box if local's value already boxed
  if (prevType.isBoxed()) {
    return prevValue;
  }
  assert(prevType.notBoxed());
  // The Box helper requires us to incref the values its boxing, but in
  // this case we don't need to incref prevValue because we are simply
  // transfering its refcount from the local to the box.
  if (prevValue->isA(Type::Uninit)) {
    // No box can ever contain Uninit, so promote it to InitNull here.
    prevValue = genDefInitNull();
  }
  SSATmp* newValue = gen(Box, prevValue);
  gen(StLoc, LocalId(id), m_fpValue, newValue);
  return newValue;
}
void LinearScan::removeUnusedSpills() {
  for (SlotInfo& slot : m_slots) {
    IRInstruction* spill = slot.m_spillTmp->getInstruction();
    if (spill->getDst()->getUseCount() == 0) {
      Block* block = spill->getBlock();
      block->erase(block->iteratorTo(spill));
      SSATmp* src = spill->getSrc(0);
      if (src->decUseCount() == 0) {
        Opcode srcOpc = src->getInstruction()->getOpcode();
        // Not all instructions are able to take noreg as its dest
        // reg.  We pick LdLoc and IncRef because they occur often.
        if (srcOpc == IncRef || srcOpc == LdLoc) {
          for (int i = 0, n = src->numNeededRegs(); i < n; ++i) {
            src->setReg(InvalidReg, i);
          }
        }
      }
    }
  }
}
Exemple #28
0
PhysReg forceAlloc(const SSATmp& tmp) {
  auto inst = tmp.inst();
  auto opc = inst->op();

  if (tmp.isA(Type::StkPtr)) {
    assert(opc == DefSP ||
           opc == ReDefSP ||
           opc == Call ||
           opc == CallArray ||
           opc == ContEnter ||
           opc == SpillStack ||
           opc == SpillFrame ||
           opc == CufIterSpillFrame ||
           opc == ExceptionBarrier ||
           opc == RetAdjustStack ||
           opc == InterpOne ||
           opc == InterpOneCF ||
           opc == Mov ||
           opc == CheckStk ||
           opc == GuardStk ||
           opc == AssertStk ||
           opc == CastStk ||
           opc == CastStkIntToDbl ||
           opc == CoerceStk ||
           opc == SideExitGuardStk  ||
           MInstrEffects::supported(opc));
    return mcg->backEnd().rVmSp();
  }

  // LdContActRec and LdAFWHActRec, loading a generator's AR, is the only time
  // we have a pointer to an AR that is not in rVmFp.
  if (opc != LdContActRec && opc != LdAFWHActRec && tmp.isA(Type::FramePtr)) {
    return mcg->backEnd().rVmFp();
  }

  if (opc == DefMIStateBase) {
    assert(tmp.isA(Type::PtrToCell));
    return mcg->backEnd().rSp();
  }
  return InvalidReg;
}
Exemple #29
0
SSATmp* Simplifier::simplifyLdCls(IRInstruction* inst) {
  SSATmp* clsName = inst->getSrc(0);
  if (clsName->isConst()) {
    const Class* cls = Unit::lookupUniqueClass(clsName->getValStr());
    if (cls) {
      if (RuntimeOption::RepoAuthoritative && (cls->attrs() & AttrUnique)) {
        // the class is unique
        return m_tb->genDefConst(cls);
      }
      const Class* ctx = inst->getSrc(1)->getValClass();
      if (ctx && ctx->classof(cls)) {
        // the class of the current function being compiled is the
        // same as or derived from cls, so cls must be defined and
        // cannot change the next time we execute this same code
        return m_tb->genDefConst(cls);
      }
    }
    return m_tb->gen(LdClsCached, clsName);
  }
  return nullptr;
}
void LinearScan::preAllocSpillLoc(uint32_t numSpillLocs) {
  for (Block* block : m_blocks) {
    for (IRInstruction& inst : *block) {
      if (inst.getOpcode() == Spill) {
        SSATmp* dst = inst.getDst();
        for (int index = 0; index < dst->numNeededRegs(); ++index) {
          assert(!dst->hasReg(index));
          if (dst->getSpillInfo(index).type() == SpillInfo::Memory) {
            uint32_t spillLoc = dst->getSpillInfo(index).mem();
            // Native stack layout:
            // |               |
            // +---------------+
            // |               |  <-- spill[5..]
            // | pre allocated |  <-- spill[4]
            // |  (16 slots)   |  <-- spill[3]
            // +---------------+
            // |  return addr  |
            // +---------------+
            // |    extra      |  <-- spill[2]
            // |    spill      |  <-- spill[1]
            // |  locations    |  <-- spill[0]
            // +---------------+  <-- %rsp
            // If a spill location falls into the pre-allocated region, we
            // need to increase its index by 1 to avoid overwriting the
            // return address.
            if (spillLoc + NumPreAllocatedSpillLocs >= numSpillLocs) {
              dst->setSpillInfo(index, SpillInfo(spillLoc + 1));
            }
          }
        }
      }
    }
  }
}