示例#1
0
/**
 * Called to clear out the tracked local values at a call site.
 * Calls kill all registers, so we don't want to keep locals in
 * registers across calls. We do continue tracking the types in
 * locals, however.
 */
void TraceBuilder::killLocalsForCall() {
  auto doKill = [&](smart::vector<LocalState>& locals) {
    for (auto& loc : locals) {
      SSATmp* t = loc.value;
      // should not kill DefConst, and LdConst should be replaced by DefConst
      if (!t || t->inst()->op() == DefConst) continue;

      if (t->inst()->op() == LdConst) {
        // make the new DefConst instruction
        IRInstruction* clone = t->inst()->clone(&m_irFactory);
        clone->setOpcode(DefConst);
        loc.value = clone->dst();
        continue;
      }
      assert(!t->isConst());
      loc.unsafe = true;
    }
  };

  doKill(m_locals);
  m_callerAvailableValues.clear();

  for (auto& state : m_inlineSavedStates) {
    doKill(state->locals);
    state->callerAvailableValues.clear();
  }
}
示例#2
0
void printSrc(std::ostream& ostream, const IRInstruction* inst, uint32_t i,
              const RegAllocInfo* regs, const LifetimeInfo* lifetime) {
  SSATmp* src = inst->src(i);
  if (src != nullptr) {
    if (lifetime && lifetime->linear[inst] != 0 && !src->isConst() &&
        lifetime->uses[src].lastUse == lifetime->linear[inst]) {
      ostream << "~";
    }
    print(ostream, src, regs, lifetime);
  } else {
    ostream << color(ANSI_COLOR_RED)
            << "!!!NULL @ " << i
            << color(ANSI_COLOR_END)
            ;
  }
}
示例#3
0
const StringData* findClassName(SSATmp* cls) {
  assert(cls->isA(Type::Cls));

  if (cls->isConst()) {
    return cls->getValClass()->preClass()->name();
  }
  // Try to get the class name from a LdCls
  IRInstruction* clsInst = cls->inst();
  if (clsInst->op() == LdCls || clsInst->op() == LdClsCached) {
    SSATmp* clsName = clsInst->src(0);
    assert(clsName->isA(Type::Str));
    if (clsName->isConst()) {
      return clsName->getValStr();
    }
  }
  return nullptr;
}
示例#4
0
/**
 * Called to clear out the tracked local values at a call site.
 * Calls kill all registers, so we don't want to keep locals in
 * registers across calls. We do continue tracking the types in
 * locals, however.
 */
void TraceBuilder::killLocalsForCall() {
    for (auto& loc : m_locals) {
        SSATmp* t = loc.value;
        // should not kill DefConst, and LdConst should be replaced by DefConst
        if (!t || t->inst()->op() == DefConst) continue;

        if (t->inst()->op() == LdConst) {
            // make the new DefConst instruction
            IRInstruction* clone = t->inst()->clone(&m_irFactory);
            clone->setOpcode(DefConst);
            loc.value = clone->dst();
            continue;
        }
        assert(!t->isConst());
        loc.unsafe = true;
    }
}
示例#5
0
/**
 * Called to clear out the tracked local values at a call site.
 * Calls kill all registers, so we don't want to keep locals in
 * registers across calls. We do continue tracking the types in
 * locals, however.
 */
void TraceBuilder::killLocals() {
  for (uint32_t i = 0; i < m_localValues.size(); i++) {
    SSATmp* t = m_localValues[i];
    // should not kill DefConst, and LdConst should be replaced by DefConst
    if (!t || t->inst()->op() == DefConst) {
      continue;
    }
    if (t->inst()->op() == LdConst) {
      // make the new DefConst instruction
      IRInstruction* clone = t->inst()->clone(&m_irFactory);
      clone->setOpcode(DefConst);
      m_localValues[i] = clone->getDst();
      continue;
    }
    assert(!t->isConst());
    m_localValues[i] = nullptr;
  }
}
示例#6
0
SSATmp* Simplifier::simplifyLdCls(IRInstruction* inst) {
  SSATmp* clsName = inst->getSrc(0);
  if (clsName->isConst()) {
    const Class* cls = Unit::lookupUniqueClass(clsName->getValStr());
    if (cls) {
      if (RuntimeOption::RepoAuthoritative && (cls->attrs() & AttrUnique)) {
        // the class is unique
        return m_tb->genDefConst(cls);
      }
      const Class* ctx = inst->getSrc(1)->getValClass();
      if (ctx && ctx->classof(cls)) {
        // the class of the current function being compiled is the
        // same as or derived from cls, so cls must be defined and
        // cannot change the next time we execute this same code
        return m_tb->genDefConst(cls);
      }
    }
    return m_tb->gen(LdClsCached, clsName);
  }
  return nullptr;
}
示例#7
0
SSATmp* IRBuilder::preOptimizeCheckType(IRInstruction* inst) {
  SSATmp* src  = inst->src(0);
  auto const oldType = src->type();
  auto const newType = inst->typeParam();

  if (oldType.not(newType)) {
    if (oldType.isBoxed() && newType.isBoxed()) {
      /* This CheckType serves to update the inner type hint for a boxed
       * value, which requires no runtime work.  This depends on the type being
       * boxed, and constraining it with DataTypeCountness will do it.  */
      constrainValue(src, DataTypeCountness);
      return gen(AssertType, newType, src);
    }
    /* This check will always fail. It's probably due to an incorrect
     * prediction. Generate a Jmp, and return src because
     * following instructions may depend on the output of CheckType
     * (they'll be DCEd later). Note that we can't use convertToJmp
     * because the return value isn't nullptr, so the original
     * instruction won't be inserted into the stream. */
    gen(Jmp, inst->taken());
    return src;
  }

  if (newType >= oldType) {
    /*
     * The type of the src is the same or more refined than type, so the guard
     * is unnecessary.
     */
    return src;
  }
  if (newType < oldType) {
    assert(!src->isConst());
    return nullptr;
  }

  return nullptr;
}
示例#8
0
void CodeGenerator::cgGuardRefs(IRInstruction* inst) {
  assert(inst->numSrcs() == 5);

  SSATmp* funcPtrTmp = inst->src(0);
  SSATmp* nParamsTmp = inst->src(1);
  SSATmp* firstBitNumTmp = inst->src(2);
  SSATmp* mask64Tmp  = inst->src(3);
  SSATmp* vals64Tmp  = inst->src(4);

  // Get values in place
  assert(funcPtrTmp->type() == Type::Func);
  auto funcPtrReg = x2a(curOpd(funcPtrTmp).reg());
  assert(funcPtrReg.IsValid());

  assert(nParamsTmp->type() == Type::Int);
  auto nParamsReg = x2a(curOpd(nParamsTmp).reg());
  assert(nParamsReg.IsValid() || nParamsTmp->isConst());

  assert(firstBitNumTmp->isConst() && firstBitNumTmp->type() == Type::Int);
  uint32_t firstBitNum = (uint32_t)(firstBitNumTmp->getValInt());

  assert(mask64Tmp->type() == Type::Int);
  assert(mask64Tmp->isConst());
  auto mask64Reg = x2a(curOpd(mask64Tmp).reg());
  assert(mask64Reg.IsValid() || mask64Tmp->inst()->op() != LdConst);
  uint64_t mask64 = mask64Tmp->getValInt();
  assert(mask64);

  assert(vals64Tmp->type() == Type::Int);
  assert(vals64Tmp->isConst());
  auto vals64Reg = x2a(curOpd(vals64Tmp).reg());
  assert(vals64Reg.IsValid() || vals64Tmp->inst()->op() != LdConst);
  uint64_t vals64 = vals64Tmp->getValInt();
  assert((vals64 & mask64) == vals64);

  auto const destSK = SrcKey(curFunc(), m_unit.bcOff());
  auto const destSR = m_tx64->getSrcRec(destSK);

  auto thenBody = [&] {
    auto bitsOff = sizeof(uint64_t) * (firstBitNum / 64);
    auto cond = CC_NE;
    auto bitsPtrReg = rAsm;

    if (firstBitNum == 0) {
      bitsOff = Func::refBitValOff();
      bitsPtrReg = funcPtrReg;
    } else {
      m_as.    Ldr  (bitsPtrReg, funcPtrReg[Func::sharedOff()]);
      bitsOff -= sizeof(uint64_t);
    }

    // Don't need the bits pointer after this point
    auto bitsReg = rAsm;
    // Load the bits
    m_as.    Ldr  (bitsReg, bitsPtrReg[bitsOff]);

    // Mask the bits. There are restrictions on what can be encoded as an
    // immediate in ARM's logical instructions, and if they're not met, we'll
    // have to use a register.
    if (vixl::Assembler::IsImmLogical(mask64, vixl::kXRegSize)) {
      m_as.  And  (bitsReg, bitsReg, mask64);
    } else {
      if (mask64Reg.IsValid()) {
        m_as.And  (bitsReg, bitsReg, mask64Reg);
      } else {
        m_as.Mov  (rAsm2, mask64);
        m_as.And  (bitsReg, bitsReg, rAsm2);
      }
    }

    // Now do the compare. There are also restrictions on immediates in
    // arithmetic instructions (of which Cmp is one; it's just a subtract that
    // sets flags), so same deal as with the mask immediate above.
    if (vixl::Assembler::IsImmArithmetic(vals64)) {
      m_as.  Cmp  (bitsReg, vals64);
    } else {
      if (vals64Reg.IsValid()) {
        m_as.Cmp  (bitsReg, vals64Reg);
      } else {
        m_as.Mov  (rAsm2, vals64);
        m_as.Cmp  (bitsReg, rAsm2);
      }
    }
    destSR->emitFallbackJump(m_mainCode, cond);
  };

  if (firstBitNum == 0) {
    assert(!nParamsReg.IsValid());
    // This is the first 64 bits. No need to check
    // nParams.
    thenBody();
  } else {
    assert(nParamsReg.IsValid());
    // Check number of args...
    m_as.    Cmp   (nParamsReg, firstBitNum);

    if (vals64 != 0 && vals64 != mask64) {
      // If we're beyond nParams, then either all params
      // are refs, or all params are non-refs, so if vals64
      // isn't 0 and isnt mask64, there's no possibility of
      // a match
      destSR->emitFallbackJump(m_mainCode, CC_LE);
      thenBody();
    } else {
      ifThenElse(m_as, vixl::gt, thenBody, /* else */ [&] {
          //   If not special builtin...
          m_as.  Ldr  (rAsm, funcPtrReg[Func::attrsOff()]);
          m_as.  Tst  (rAsm, AttrVariadicByRef);
          destSR->emitFallbackJump(m_mainCode, vals64 ? CC_Z : CC_NZ);
        });
    }
  }
}
示例#9
0
void CodeGenerator::cgGuardRefs(IRInstruction* inst) {
  assert(inst->numSrcs() == 5);

  SSATmp* funcPtrTmp = inst->src(0);
  SSATmp* nParamsTmp = inst->src(1);
  SSATmp* firstBitNumTmp = inst->src(2);
  SSATmp* mask64Tmp  = inst->src(3);
  SSATmp* vals64Tmp  = inst->src(4);

  // Get values in place
  assert(funcPtrTmp->type() == Type::Func);
  auto funcPtrReg = x2a(m_regs[funcPtrTmp].reg());
  assert(funcPtrReg.IsValid());

  assert(nParamsTmp->type() == Type::Int);
  auto nParamsReg = x2a(m_regs[nParamsTmp].reg());
  assert(nParamsReg.IsValid() || nParamsTmp->isConst());

  assert(firstBitNumTmp->isConst() && firstBitNumTmp->type() == Type::Int);
  uint32_t firstBitNum = (uint32_t)(firstBitNumTmp->getValInt());

  assert(mask64Tmp->type() == Type::Int);
  assert(mask64Tmp->isConst());
  auto mask64Reg = x2a(m_regs[mask64Tmp].reg());
  assert(mask64Reg.IsValid() || mask64Tmp->inst()->op() != LdConst);
  uint64_t mask64 = mask64Tmp->getValInt();
  assert(mask64);

  assert(vals64Tmp->type() == Type::Int);
  assert(vals64Tmp->isConst());
  auto vals64Reg = x2a(m_regs[vals64Tmp].reg());
  assert(vals64Reg.IsValid() || vals64Tmp->inst()->op() != LdConst);
  uint64_t vals64 = vals64Tmp->getValInt();
  assert((vals64 & mask64) == vals64);

  auto const destSK = SrcKey(curFunc(), m_unit.bcOff());
  auto const destSR = m_tx64->getSrcRec(destSK);

  auto thenBody = [&] {
    auto bitsOff = sizeof(uint64_t) * (firstBitNum / 64);
    auto cond = CC_NE;
    auto bitsPtrReg = rAsm;

    if (firstBitNum == 0) {
      bitsOff = Func::refBitValOff();
      bitsPtrReg = funcPtrReg;
    } else {
      m_as.    Ldr  (bitsPtrReg, funcPtrReg[Func::sharedOff()]);
      bitsOff -= sizeof(uint64_t);
    }

    if (vals64 == 0 || (mask64 & (mask64 - 1)) == 0) {
      // If vals64 is zero, or we're testing a single
      // bit, we can get away with a single test,
      // rather than mask-and-compare
      m_as.    Ldr  (rAsm2, bitsPtrReg[bitsOff]);
      if (mask64Reg.IsValid()) {
        m_as.  Tst  (rAsm2, mask64Reg);
      } else {
        assert(vixl::Assembler::IsImmLogical(mask64, vixl::kXRegSize));
        m_as.  Tst  (rAsm2, mask64);
      }
      if (vals64) cond = CC_E;
    } else {
      auto bitsValReg = rAsm;
      m_as.    Ldr  (bitsValReg, bitsPtrReg[bitsOff]);
      if (debug) bitsPtrReg = Register();

      //     bitsValReg <- bitsValReg & mask64
      // NB: these 'And' ops don't set flags. They don't need to.
      if (mask64Reg.IsValid()) {
        m_as.  And    (bitsValReg, bitsValReg, mask64Reg);
      } else {
        // There are restrictions on the immediates that can be encoded into
        // logical ops. If the mask doesn't meet those restrictions, we have to
        // load it into a register first.
        if (vixl::Assembler::IsImmLogical(mask64, vixl::kXRegSize)) {
          m_as.And    (bitsValReg, bitsValReg, mask64);
        } else {
          m_as.Mov    (rAsm2, mask64);
          m_as.And    (bitsValReg, bitsValReg, rAsm2);
        }
      }

      //   If bitsValReg != vals64, then goto Exit
      if (vals64Reg.IsValid()) {
        m_as.  Cmp    (bitsValReg, vals64Reg);
      } else {
        m_as.  Cmp    (bitsValReg, vals64);
      }
    }
    destSR->emitFallbackJump(m_mainCode, cond);
  };

  if (firstBitNum == 0) {
    assert(!nParamsReg.IsValid());
    // This is the first 64 bits. No need to check
    // nParams.
    thenBody();
  } else {
    assert(nParamsReg.IsValid());
    // Check number of args...
    m_as.    Cmp   (nParamsReg, firstBitNum);

    if (vals64 != 0 && vals64 != mask64) {
      // If we're beyond nParams, then either all params
      // are refs, or all params are non-refs, so if vals64
      // isn't 0 and isnt mask64, there's no possibility of
      // a match
      destSR->emitFallbackJump(m_mainCode, CC_LE);
      thenBody();
    } else {
      ifThenElse(m_as, vixl::gt, thenBody, /* else */ [&] {
          //   If not special builtin...
          m_as.  Ldr  (rAsm, funcPtrReg[Func::attrsOff()]);
          m_as.  Tst  (rAsm, AttrVariadicByRef);
          destSR->emitFallbackJump(m_mainCode, vals64 ? CC_Z : CC_NZ);
        });
    }
  }
}