示例#1
0
void CodeGenerator::cgGuardRefs(IRInstruction* inst) {
  assert(inst->numSrcs() == 5);

  SSATmp* funcPtrTmp = inst->src(0);
  SSATmp* nParamsTmp = inst->src(1);
  SSATmp* firstBitNumTmp = inst->src(2);
  SSATmp* mask64Tmp  = inst->src(3);
  SSATmp* vals64Tmp  = inst->src(4);

  // Get values in place
  assert(funcPtrTmp->type() == Type::Func);
  auto funcPtrReg = x2a(curOpd(funcPtrTmp).reg());
  assert(funcPtrReg.IsValid());

  assert(nParamsTmp->type() == Type::Int);
  auto nParamsReg = x2a(curOpd(nParamsTmp).reg());
  assert(nParamsReg.IsValid() || nParamsTmp->isConst());

  assert(firstBitNumTmp->isConst() && firstBitNumTmp->type() == Type::Int);
  uint32_t firstBitNum = (uint32_t)(firstBitNumTmp->getValInt());

  assert(mask64Tmp->type() == Type::Int);
  assert(mask64Tmp->isConst());
  auto mask64Reg = x2a(curOpd(mask64Tmp).reg());
  assert(mask64Reg.IsValid() || mask64Tmp->inst()->op() != LdConst);
  uint64_t mask64 = mask64Tmp->getValInt();
  assert(mask64);

  assert(vals64Tmp->type() == Type::Int);
  assert(vals64Tmp->isConst());
  auto vals64Reg = x2a(curOpd(vals64Tmp).reg());
  assert(vals64Reg.IsValid() || vals64Tmp->inst()->op() != LdConst);
  uint64_t vals64 = vals64Tmp->getValInt();
  assert((vals64 & mask64) == vals64);

  auto const destSK = SrcKey(curFunc(), m_unit.bcOff());
  auto const destSR = m_tx64->getSrcRec(destSK);

  auto thenBody = [&] {
    auto bitsOff = sizeof(uint64_t) * (firstBitNum / 64);
    auto cond = CC_NE;
    auto bitsPtrReg = rAsm;

    if (firstBitNum == 0) {
      bitsOff = Func::refBitValOff();
      bitsPtrReg = funcPtrReg;
    } else {
      m_as.    Ldr  (bitsPtrReg, funcPtrReg[Func::sharedOff()]);
      bitsOff -= sizeof(uint64_t);
    }

    // Don't need the bits pointer after this point
    auto bitsReg = rAsm;
    // Load the bits
    m_as.    Ldr  (bitsReg, bitsPtrReg[bitsOff]);

    // Mask the bits. There are restrictions on what can be encoded as an
    // immediate in ARM's logical instructions, and if they're not met, we'll
    // have to use a register.
    if (vixl::Assembler::IsImmLogical(mask64, vixl::kXRegSize)) {
      m_as.  And  (bitsReg, bitsReg, mask64);
    } else {
      if (mask64Reg.IsValid()) {
        m_as.And  (bitsReg, bitsReg, mask64Reg);
      } else {
        m_as.Mov  (rAsm2, mask64);
        m_as.And  (bitsReg, bitsReg, rAsm2);
      }
    }

    // Now do the compare. There are also restrictions on immediates in
    // arithmetic instructions (of which Cmp is one; it's just a subtract that
    // sets flags), so same deal as with the mask immediate above.
    if (vixl::Assembler::IsImmArithmetic(vals64)) {
      m_as.  Cmp  (bitsReg, vals64);
    } else {
      if (vals64Reg.IsValid()) {
        m_as.Cmp  (bitsReg, vals64Reg);
      } else {
        m_as.Mov  (rAsm2, vals64);
        m_as.Cmp  (bitsReg, rAsm2);
      }
    }
    destSR->emitFallbackJump(m_mainCode, cond);
  };

  if (firstBitNum == 0) {
    assert(!nParamsReg.IsValid());
    // This is the first 64 bits. No need to check
    // nParams.
    thenBody();
  } else {
    assert(nParamsReg.IsValid());
    // Check number of args...
    m_as.    Cmp   (nParamsReg, firstBitNum);

    if (vals64 != 0 && vals64 != mask64) {
      // If we're beyond nParams, then either all params
      // are refs, or all params are non-refs, so if vals64
      // isn't 0 and isnt mask64, there's no possibility of
      // a match
      destSR->emitFallbackJump(m_mainCode, CC_LE);
      thenBody();
    } else {
      ifThenElse(m_as, vixl::gt, thenBody, /* else */ [&] {
          //   If not special builtin...
          m_as.  Ldr  (rAsm, funcPtrReg[Func::attrsOff()]);
          m_as.  Tst  (rAsm, AttrVariadicByRef);
          destSR->emitFallbackJump(m_mainCode, vals64 ? CC_Z : CC_NZ);
        });
    }
  }
}
示例#2
0
void CodeGenerator::cgGuardRefs(IRInstruction* inst) {
  assert(inst->numSrcs() == 5);

  SSATmp* funcPtrTmp = inst->src(0);
  SSATmp* nParamsTmp = inst->src(1);
  SSATmp* firstBitNumTmp = inst->src(2);
  SSATmp* mask64Tmp  = inst->src(3);
  SSATmp* vals64Tmp  = inst->src(4);

  // Get values in place
  assert(funcPtrTmp->type() == Type::Func);
  auto funcPtrReg = x2a(m_regs[funcPtrTmp].reg());
  assert(funcPtrReg.IsValid());

  assert(nParamsTmp->type() == Type::Int);
  auto nParamsReg = x2a(m_regs[nParamsTmp].reg());
  assert(nParamsReg.IsValid() || nParamsTmp->isConst());

  assert(firstBitNumTmp->isConst() && firstBitNumTmp->type() == Type::Int);
  uint32_t firstBitNum = (uint32_t)(firstBitNumTmp->getValInt());

  assert(mask64Tmp->type() == Type::Int);
  assert(mask64Tmp->isConst());
  auto mask64Reg = x2a(m_regs[mask64Tmp].reg());
  assert(mask64Reg.IsValid() || mask64Tmp->inst()->op() != LdConst);
  uint64_t mask64 = mask64Tmp->getValInt();
  assert(mask64);

  assert(vals64Tmp->type() == Type::Int);
  assert(vals64Tmp->isConst());
  auto vals64Reg = x2a(m_regs[vals64Tmp].reg());
  assert(vals64Reg.IsValid() || vals64Tmp->inst()->op() != LdConst);
  uint64_t vals64 = vals64Tmp->getValInt();
  assert((vals64 & mask64) == vals64);

  auto const destSK = SrcKey(curFunc(), m_unit.bcOff());
  auto const destSR = m_tx64->getSrcRec(destSK);

  auto thenBody = [&] {
    auto bitsOff = sizeof(uint64_t) * (firstBitNum / 64);
    auto cond = CC_NE;
    auto bitsPtrReg = rAsm;

    if (firstBitNum == 0) {
      bitsOff = Func::refBitValOff();
      bitsPtrReg = funcPtrReg;
    } else {
      m_as.    Ldr  (bitsPtrReg, funcPtrReg[Func::sharedOff()]);
      bitsOff -= sizeof(uint64_t);
    }

    if (vals64 == 0 || (mask64 & (mask64 - 1)) == 0) {
      // If vals64 is zero, or we're testing a single
      // bit, we can get away with a single test,
      // rather than mask-and-compare
      m_as.    Ldr  (rAsm2, bitsPtrReg[bitsOff]);
      if (mask64Reg.IsValid()) {
        m_as.  Tst  (rAsm2, mask64Reg);
      } else {
        assert(vixl::Assembler::IsImmLogical(mask64, vixl::kXRegSize));
        m_as.  Tst  (rAsm2, mask64);
      }
      if (vals64) cond = CC_E;
    } else {
      auto bitsValReg = rAsm;
      m_as.    Ldr  (bitsValReg, bitsPtrReg[bitsOff]);
      if (debug) bitsPtrReg = Register();

      //     bitsValReg <- bitsValReg & mask64
      // NB: these 'And' ops don't set flags. They don't need to.
      if (mask64Reg.IsValid()) {
        m_as.  And    (bitsValReg, bitsValReg, mask64Reg);
      } else {
        // There are restrictions on the immediates that can be encoded into
        // logical ops. If the mask doesn't meet those restrictions, we have to
        // load it into a register first.
        if (vixl::Assembler::IsImmLogical(mask64, vixl::kXRegSize)) {
          m_as.And    (bitsValReg, bitsValReg, mask64);
        } else {
          m_as.Mov    (rAsm2, mask64);
          m_as.And    (bitsValReg, bitsValReg, rAsm2);
        }
      }

      //   If bitsValReg != vals64, then goto Exit
      if (vals64Reg.IsValid()) {
        m_as.  Cmp    (bitsValReg, vals64Reg);
      } else {
        m_as.  Cmp    (bitsValReg, vals64);
      }
    }
    destSR->emitFallbackJump(m_mainCode, cond);
  };

  if (firstBitNum == 0) {
    assert(!nParamsReg.IsValid());
    // This is the first 64 bits. No need to check
    // nParams.
    thenBody();
  } else {
    assert(nParamsReg.IsValid());
    // Check number of args...
    m_as.    Cmp   (nParamsReg, firstBitNum);

    if (vals64 != 0 && vals64 != mask64) {
      // If we're beyond nParams, then either all params
      // are refs, or all params are non-refs, so if vals64
      // isn't 0 and isnt mask64, there's no possibility of
      // a match
      destSR->emitFallbackJump(m_mainCode, CC_LE);
      thenBody();
    } else {
      ifThenElse(m_as, vixl::gt, thenBody, /* else */ [&] {
          //   If not special builtin...
          m_as.  Ldr  (rAsm, funcPtrReg[Func::attrsOff()]);
          m_as.  Tst  (rAsm, AttrVariadicByRef);
          destSR->emitFallbackJump(m_mainCode, vals64 ? CC_Z : CC_NZ);
        });
    }
  }
}