Example #1
0
/// We do not support symbolic projections yet, only 32-bit unsigned integers.
bool swift::getIntegerIndex(SILValue IndexVal, unsigned &IndexConst) {
    if (auto *IndexLiteral = dyn_cast<IntegerLiteralInst>(IndexVal)) {
        APInt ConstInt = IndexLiteral->getValue();
        // IntegerLiterals are signed.
        if (ConstInt.isIntN(32) && ConstInt.isNonNegative()) {
            IndexConst = (unsigned)ConstInt.getSExtValue();
            return true;
        }
    }
    return false;
}
Example #2
0
/// Extract all of the characters from the number \p Num one by one and
/// insert them into the string builder \p SB.
static void DecodeFixedWidth(APInt &Num, std::string &SB) {
  uint64_t CL = Huffman::CharsetLength;

  // NL is the number of characters that we can hold in a 64bit number.
  // Each letter takes Log2(CL) bits. Doing this computation in floating-
  // point arithmetic could give a slightly better (more optimistic) result,
  // but the computation may not be constant at compile time.
  uint64_t NumLetters = 64 / Log2_64_Ceil(CL);

  assert(Num.getBitWidth() > 8 &&
         "Not enough bits for arithmetic on this alphabet");

  // Try to decode eight numbers at once. It is much faster to work with
  // local 64bit numbers than working with APInt. In this loop we try to
  // extract NL characters at once and process them using a local 64-bit
  // number.

  // Calculate CharsetLength**NumLetters (CL to the power of NL), which is the
  // highest numeric value that can hold NumLetters characters in a 64bit
  // number. Notice: this loop is optimized away and CLX is computed to a
  // constant integer at compile time.
  uint64_t CLX = 1;
  for (unsigned  i = 0; i < NumLetters; i++) { CLX *= CL; }

  while (Num.ugt(CLX)) {
    unsigned BW = Num.getBitWidth();
    APInt C = APInt(BW, CLX);
    APInt Quotient(1, 0), Remainder(1, 0);
    APInt::udivrem(Num, C, Quotient, Remainder);

    // Try to reduce the bitwidth of the API after the division. This can
    // accelerate the division operation in future iterations because the
    // number becomes smaller (fewer bits) with each iteration. However,
    // We can't reduce the number to something too small because we still
    // need to be able to perform the "mod charset_length" operation.
    Num = Quotient.zextOrTrunc(std::max(Quotient.getActiveBits(), 64u));
    uint64_t Tail = Remainder.getZExtValue();
    for (unsigned i = 0; i < NumLetters; i++) {
      SB += Huffman::Charset[Tail % CL];
      Tail = Tail / CL;
    }
  }

  // Pop characters out of the APInt one by one.
  while (Num.getBoolValue()) {
    unsigned BW = Num.getBitWidth();

    APInt C = APInt(BW, CL);
    APInt Quotient(1, 0), Remainder(1, 0);
    APInt::udivrem(Num, C, Quotient, Remainder);
    Num = Quotient;
    SB += Huffman::Charset[Remainder.getZExtValue()];
  }
}
Example #3
0
std::string get_string(const APInt &api)
{
	std::ostringstream str;
	str << "_b";
	for (unsigned count = api.countLeadingZeros(); count > 0; count--)
		str << "0";

	if (api != 0)
		str << api.toString(2, false /* treat as  unsigned */);
	return str.str();
}
Example #4
0
/// truncate - Return a new range in the specified integer type, which must be
/// strictly smaller than the current type.  The returned range will
/// correspond to the possible range of values as if the source range had been
/// truncated to the specified type.
ConstantRange ConstantRange::truncate(uint32_t DstTySize) const {
  unsigned SrcTySize = getBitWidth();
  assert(SrcTySize > DstTySize && "Not a value truncation");
  APInt Size(APInt::getLowBitsSet(SrcTySize, DstTySize));
  if (isFullSet() || getSetSize().ugt(Size))
    return ConstantRange(DstTySize);

  APInt L = Lower; L.trunc(DstTySize);
  APInt U = Upper; U.trunc(DstTySize);
  return ConstantRange(L, U);
}
Example #5
0
unsigned X86TTI::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
                               Type *Ty) const {
  assert(Ty->isIntegerTy());

  unsigned BitSize = Ty->getPrimitiveSizeInBits();
  if (BitSize == 0)
    return ~0U;

  unsigned ImmIdx = ~0U;
  switch (Opcode) {
  default: return TCC_Free;
  case Instruction::GetElementPtr:
    if (Idx == 0)
      return 2 * TCC_Basic;
    return TCC_Free;
  case Instruction::Store:
    ImmIdx = 0;
    break;
  case Instruction::Add:
  case Instruction::Sub:
  case Instruction::Mul:
  case Instruction::UDiv:
  case Instruction::SDiv:
  case Instruction::URem:
  case Instruction::SRem:
  case Instruction::Shl:
  case Instruction::LShr:
  case Instruction::AShr:
  case Instruction::And:
  case Instruction::Or:
  case Instruction::Xor:
  case Instruction::ICmp:
    ImmIdx = 1;
    break;
  case Instruction::Trunc:
  case Instruction::ZExt:
  case Instruction::SExt:
  case Instruction::IntToPtr:
  case Instruction::PtrToInt:
  case Instruction::BitCast:
  case Instruction::PHI:
  case Instruction::Call:
  case Instruction::Select:
  case Instruction::Ret:
  case Instruction::Load:
    break;
  }

  if ((Idx == ImmIdx) &&
      Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
    return TCC_Free;

  return X86TTI::getIntImmCost(Imm, Ty);
}
Example #6
0
/// zeroExtend - Return a new range in the specified integer type, which must
/// be strictly larger than the current type.  The returned range will
/// correspond to the possible range of values as if the source range had been
/// zero extended.
ConstantRange ConstantRange::zeroExtend(uint32_t DstTySize) const {
  unsigned SrcTySize = getBitWidth();
  assert(SrcTySize < DstTySize && "Not a value extension");
  if (isFullSet())
    // Change a source full set into [0, 1 << 8*numbytes)
    return ConstantRange(APInt(DstTySize,0), APInt(DstTySize,1).shl(SrcTySize));

  APInt L = Lower; L.zext(DstTySize);
  APInt U = Upper; U.zext(DstTySize);
  return ConstantRange(L, U);
}
Example #7
0
static void EncodeFixedWidth(APInt &num, char ch) {
  APInt C = APInt(num.getBitWidth(), Huffman::CharsetLength);
  // TODO: autogenerate a table for the reverse lookup.
  for (unsigned i = 0; i < Huffman::CharsetLength; i++) {
    if (Huffman::Charset[i] == ch) {
      num *= C;
      num += APInt(num.getBitWidth(), i);
      return;
    }
  }
  assert(false);
}
Example #8
0
ConstantRange
ConstantRange::binaryAnd(const ConstantRange &Other) const {
  if (isEmptySet() || Other.isEmptySet())
    return ConstantRange(getBitWidth(), /*isFullSet=*/false);

  // TODO: replace this with something less conservative

  APInt umin = APIntOps::umin(Other.getUnsignedMax(), getUnsignedMax());
  if (umin.isAllOnesValue())
    return ConstantRange(getBitWidth(), /*isFullSet=*/true);
  return ConstantRange(APInt::getNullValue(getBitWidth()), umin + 1);
}
static SILInstruction *constantFoldIntrinsic(BuiltinInst *BI,
                                             llvm::Intrinsic::ID ID,
                                             Optional<bool> &ResultsInError) {
  switch (ID) {
  default: break;
  case llvm::Intrinsic::expect: {
    // An expect of an integral constant is the constant itself.
    assert(BI->getArguments().size() == 2 && "Expect should have 2 args.");
    auto *Op1 = dyn_cast<IntegerLiteralInst>(BI->getArguments()[0]);
    if (!Op1)
      return nullptr;
    return Op1;
  }

  case llvm::Intrinsic::ctlz: {
    assert(BI->getArguments().size() == 2 && "Ctlz should have 2 args.");
    OperandValueArrayRef Args = BI->getArguments();

    // Fold for integer constant arguments.
    auto *LHS = dyn_cast<IntegerLiteralInst>(Args[0]);
    if (!LHS) {
      return nullptr;
    }
    APInt LHSI = LHS->getValue();
    unsigned LZ = 0;
    // Check corner-case of source == zero
    if (LHSI == 0) {
      auto *RHS = dyn_cast<IntegerLiteralInst>(Args[1]);
      if (!RHS || RHS->getValue() != 0) {
        // Undefined
        return nullptr;
      }
      LZ = LHSI.getBitWidth();
    } else {
      LZ = LHSI.countLeadingZeros();
    }
    APInt LZAsAPInt = APInt(LHSI.getBitWidth(), LZ);
    SILBuilderWithScope B(BI);
    return B.createIntegerLiteral(BI->getLoc(), LHS->getType(), LZAsAPInt);
  }

  case llvm::Intrinsic::sadd_with_overflow:
  case llvm::Intrinsic::uadd_with_overflow:
  case llvm::Intrinsic::ssub_with_overflow:
  case llvm::Intrinsic::usub_with_overflow:
  case llvm::Intrinsic::smul_with_overflow:
  case llvm::Intrinsic::umul_with_overflow:
    return constantFoldBinaryWithOverflow(BI, ID,
                                          /* ReportOverflow */ false,
                                          ResultsInError);
  }
  return nullptr;
}
Example #10
0
ConstantRange
ConstantRange::binaryOr(const ConstantRange &Other) const {
  if (isEmptySet() || Other.isEmptySet())
    return ConstantRange(getBitWidth(), /*isFullSet=*/false);

  // TODO: replace this with something less conservative

  APInt umax = APIntOps::umax(getUnsignedMin(), Other.getUnsignedMin());
  if (umax.isMinValue())
    return ConstantRange(getBitWidth(), /*isFullSet=*/true);
  return ConstantRange(umax, APInt::getNullValue(getBitWidth()));
}
Example #11
0
/// signExtend - Return a new range in the specified integer type, which must
/// be strictly larger than the current type.  The returned range will
/// correspond to the possible range of values as if the source range had been
/// sign extended.
ConstantRange ConstantRange::signExtend(uint32_t DstTySize) const {
  unsigned SrcTySize = getBitWidth();
  assert(SrcTySize < DstTySize && "Not a value extension");
  if (isFullSet()) {
    return ConstantRange(APInt::getHighBitsSet(DstTySize,DstTySize-SrcTySize+1),
                         APInt::getLowBitsSet(DstTySize, SrcTySize-1) + 1);
  }

  APInt L = Lower; L.sext(DstTySize);
  APInt U = Upper; U.sext(DstTySize);
  return ConstantRange(L, U);
}
static void EmitAPInt(SmallVectorImpl<uint64_t> &Vals,
                      unsigned &Code, unsigned &AbbrevToUse, const APInt &Val) {
  if (Val.getBitWidth() <= 64) {
    uint64_t V = Val.getSExtValue();
    emitSignedInt64(Vals, V);
    Code = naclbitc::CST_CODE_INTEGER;
    AbbrevToUse =
        Val == 0 ? CONSTANTS_INTEGER_ZERO_ABBREV : CONSTANTS_INTEGER_ABBREV;
  } else {
    report_fatal_error("Wide integers are not supported");
  }
}
Example #13
0
void MBlazeMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
  OutMI.setOpcode(MI->getOpcode());

  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
    const MachineOperand &MO = MI->getOperand(i);

    MCOperand MCOp;
    switch (MO.getType()) {
    default: llvm_unreachable("unknown operand type");
    case MachineOperand::MO_Register:
      // Ignore all implicit register operands.
      if (MO.isImplicit()) continue;
      MCOp = MCOperand::CreateReg(MO.getReg());
      break;
    case MachineOperand::MO_Immediate:
      MCOp = MCOperand::CreateImm(MO.getImm());
      break;
    case MachineOperand::MO_MachineBasicBlock:
      MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create(
                         MO.getMBB()->getSymbol(), Ctx));
      break;
    case MachineOperand::MO_GlobalAddress:
      MCOp = LowerSymbolOperand(MO, GetGlobalAddressSymbol(MO));
      break;
    case MachineOperand::MO_ExternalSymbol:
      MCOp = LowerSymbolOperand(MO, GetExternalSymbolSymbol(MO));
      break;
    case MachineOperand::MO_JumpTableIndex:
      MCOp = LowerSymbolOperand(MO, GetJumpTableSymbol(MO));
      break;
    case MachineOperand::MO_ConstantPoolIndex:
      MCOp = LowerSymbolOperand(MO, GetConstantPoolIndexSymbol(MO));
      break;
    case MachineOperand::MO_BlockAddress:
      MCOp = LowerSymbolOperand(MO, GetBlockAddressSymbol(MO));
      break;
    case MachineOperand::MO_FPImmediate: {
      bool ignored;
      APFloat FVal = MO.getFPImm()->getValueAPF();
      FVal.convert(APFloat::IEEEsingle, APFloat::rmTowardZero, &ignored);

      APInt IVal = FVal.bitcastToAPInt();
      uint64_t Val = *IVal.getRawData();
      MCOp = MCOperand::CreateImm(Val);
      break;
    }
    case MachineOperand::MO_RegisterMask:
      continue;
    }

    OutMI.addOperand(MCOp);
  }
}
Example #14
0
/// \brief True if C2 is a multiple of C1. Quotient contains C2/C1.
static bool IsMultiple(const APInt &C1, const APInt &C2, APInt &Quotient,
                       bool IsSigned) {
  assert(C1.getBitWidth() == C2.getBitWidth() &&
         "Inconsistent width of constants!");

  APInt Remainder(C1.getBitWidth(), /*Val=*/0ULL, IsSigned);
  if (IsSigned)
    APInt::sdivrem(C1, C2, Quotient, Remainder);
  else
    APInt::udivrem(C1, C2, Quotient, Remainder);

  return Remainder.isMinValue();
}
unsigned X86TTI::getIntImmCost(const APInt &Imm, Type *Ty) const {
  assert(Ty->isIntegerTy());

  unsigned BitSize = Ty->getPrimitiveSizeInBits();
  if (BitSize == 0)
    return ~0U;

  if (Imm.getBitWidth() <= 64 &&
      (isInt<32>(Imm.getSExtValue()) || isUInt<32>(Imm.getZExtValue())))
    return TCC_Basic;
  else
    return 2 * TCC_Basic;
}
/// Return true if it is OK to use SIToFPInst for an induction variable
/// with given initial and exit values.
static bool useSIToFPInst(ConstantFP &InitV, ConstantFP &ExitV,
                          uint64_t intIV, uint64_t intEV) {

  if (InitV.getValueAPF().isNegative() || ExitV.getValueAPF().isNegative())
    return true;

  // If the iteration range can be handled by SIToFPInst then use it.
  APInt Max = APInt::getSignedMaxValue(32);
  if (Max.getZExtValue() > static_cast<uint64_t>(abs64(intEV - intIV)))
    return true;

  return false;
}
Example #17
0
bool X86MCInstrAnalysis::clearsSuperRegisters(const MCRegisterInfo &MRI,
                                              const MCInst &Inst,
                                              APInt &Mask) const {
  const MCInstrDesc &Desc = Info->get(Inst.getOpcode());
  unsigned NumDefs = Desc.getNumDefs();
  unsigned NumImplicitDefs = Desc.getNumImplicitDefs();
  assert(Mask.getBitWidth() == NumDefs + NumImplicitDefs &&
         "Unexpected number of bits in the mask!");

  bool HasVEX = (Desc.TSFlags & X86II::EncodingMask) == X86II::VEX;
  bool HasEVEX = (Desc.TSFlags & X86II::EncodingMask) == X86II::EVEX;
  bool HasXOP = (Desc.TSFlags & X86II::EncodingMask) == X86II::XOP;

  const MCRegisterClass &GR32RC = MRI.getRegClass(X86::GR32RegClassID);
  const MCRegisterClass &VR128XRC = MRI.getRegClass(X86::VR128XRegClassID);
  const MCRegisterClass &VR256XRC = MRI.getRegClass(X86::VR256XRegClassID);

  auto ClearsSuperReg = [=](unsigned RegID) {
    // On X86-64, a general purpose integer register is viewed as a 64-bit
    // register internal to the processor.
    // An update to the lower 32 bits of a 64 bit integer register is
    // architecturally defined to zero extend the upper 32 bits.
    if (GR32RC.contains(RegID))
      return true;

    // Early exit if this instruction has no vex/evex/xop prefix.
    if (!HasEVEX && !HasVEX && !HasXOP)
      return false;

    // All VEX and EVEX encoded instructions are defined to zero the high bits
    // of the destination register up to VLMAX (i.e. the maximum vector register
    // width pertaining to the instruction).
    // We assume the same behavior for XOP instructions too.
    return VR128XRC.contains(RegID) || VR256XRC.contains(RegID);
  };

  Mask.clearAllBits();
  for (unsigned I = 0, E = NumDefs; I < E; ++I) {
    const MCOperand &Op = Inst.getOperand(I);
    if (ClearsSuperReg(Op.getReg()))
      Mask.setBit(I);
  }

  for (unsigned I = 0, E = NumImplicitDefs; I < E; ++I) {
    const MCPhysReg Reg = Desc.getImplicitDefs()[I];
    if (ClearsSuperReg(Reg))
      Mask.setBit(NumDefs + I);
  }

  return Mask.getBoolValue();
}
Example #18
0
/// lowerSDIV - Given an SDiv expressing a divide by constant,
/// replace it by multiplying by a magic number.  See:
/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
bool lowerSDiv(Instruction *inst) const {
  ConstantInt *Op1 = dyn_cast<ConstantInt>(inst->getOperand(1));
  // check if dividing by a constant and not by a power of 2
  if (!Op1 || inst->getType()->getPrimitiveSizeInBits() != 32 ||
          Op1->getValue().isPowerOf2()) {
      return false;
  }

  BasicBlock::iterator ii(inst);
  Value *Op0 = inst->getOperand(0);
  APInt d = Op1->getValue();

  APInt::ms magics = Op1->getValue().magic();

  IntegerType * type64 = IntegerType::get(Mod->getContext(), 64);

  Instruction *sext = CastInst::CreateSExtOrBitCast(Op0, type64, "", inst);
  APInt m = APInt(64, magics.m.getSExtValue());
  Constant *magicNum = ConstantInt::get(type64, m);

  Instruction *magInst = BinaryOperator::CreateNSWMul(sext, magicNum, "", inst);

  APInt ap = APInt(64, 32);
  Constant *movHiConst = ConstantInt::get(type64, ap);
  Instruction *movHi = BinaryOperator::Create(Instruction::AShr, magInst, 
    movHiConst, "", inst);
  Instruction *trunc = CastInst::CreateTruncOrBitCast(movHi, inst->getType(),
    "", inst);
  if (d.isStrictlyPositive() && magics.m.isNegative()) {
    trunc = BinaryOperator::Create(Instruction::Add, trunc, Op0, "", inst);
  } else if (d.isNegative() && magics.m.isStrictlyPositive()) {
    trunc = BinaryOperator::Create(Instruction::Sub, trunc, Op0, "", inst);
  }
  if (magics.s > 0) {
    APInt apS = APInt(32, magics.s);
    Constant *magicShift = ConstantInt::get(inst->getType(), apS);
    trunc = BinaryOperator::Create(Instruction::AShr, trunc,
      magicShift, "", inst);
  }

  APInt ap31 = APInt(32, 31);
  Constant *thirtyOne = ConstantInt::get(inst->getType(), ap31);
  // get sign bit
  Instruction *sign = BinaryOperator::Create(Instruction::LShr, trunc,
    thirtyOne, "", inst);
  
  Instruction *result = BinaryOperator::Create(Instruction::Add, trunc, sign,
    "");
  ReplaceInstWithInst(inst->getParent()->getInstList(), ii, result);
  return true;
}
void SeparateConstOffsetFromGEP::lowerToSingleIndexGEPs(
    GetElementPtrInst *Variadic, int64_t AccumulativeByteOffset) {
  IRBuilder<> Builder(Variadic);
  Type *IntPtrTy = DL->getIntPtrType(Variadic->getType());

  Type *I8PtrTy =
      Builder.getInt8PtrTy(Variadic->getType()->getPointerAddressSpace());
  Value *ResultPtr = Variadic->getOperand(0);
  if (ResultPtr->getType() != I8PtrTy)
    ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy);

  gep_type_iterator GTI = gep_type_begin(*Variadic);
  // Create an ugly GEP for each sequential index. We don't create GEPs for
  // structure indices, as they are accumulated in the constant offset index.
  for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) {
    if (isa<SequentialType>(*GTI)) {
      Value *Idx = Variadic->getOperand(I);
      // Skip zero indices.
      if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx))
        if (CI->isZero())
          continue;

      APInt ElementSize = APInt(IntPtrTy->getIntegerBitWidth(),
                                DL->getTypeAllocSize(GTI.getIndexedType()));
      // Scale the index by element size.
      if (ElementSize != 1) {
        if (ElementSize.isPowerOf2()) {
          Idx = Builder.CreateShl(
              Idx, ConstantInt::get(IntPtrTy, ElementSize.logBase2()));
        } else {
          Idx = Builder.CreateMul(Idx, ConstantInt::get(IntPtrTy, ElementSize));
        }
      }
      // Create an ugly GEP with a single index for each index.
      ResultPtr =
          Builder.CreateGEP(Builder.getInt8Ty(), ResultPtr, Idx, "uglygep");
    }
  }

  // Create a GEP with the constant offset index.
  if (AccumulativeByteOffset != 0) {
    Value *Offset = ConstantInt::get(IntPtrTy, AccumulativeByteOffset);
    ResultPtr =
        Builder.CreateGEP(Builder.getInt8Ty(), ResultPtr, Offset, "uglygep");
  }
  if (ResultPtr->getType() != Variadic->getType())
    ResultPtr = Builder.CreateBitCast(ResultPtr, Variadic->getType());

  Variadic->replaceAllUsesWith(ResultPtr);
  Variadic->eraseFromParent();
}
Example #20
0
/// truncate - Return a new range in the specified integer type, which must be
/// strictly smaller than the current type.  The returned range will
/// correspond to the possible range of values as if the source range had been
/// truncated to the specified type.
ConstantRange ConstantRange::truncate(uint32_t DstTySize) const {
  assert(getBitWidth() > DstTySize && "Not a value truncation");
  if (isEmptySet())
    return ConstantRange(DstTySize, /*isFullSet=*/false);
  if (isFullSet())
    return ConstantRange(DstTySize, /*isFullSet=*/true);

  APInt MaxValue = APInt::getMaxValue(DstTySize).zext(getBitWidth());
  APInt MaxBitValue(getBitWidth(), 0);
  MaxBitValue.setBit(DstTySize);

  APInt LowerDiv(Lower), UpperDiv(Upper);
  ConstantRange Union(DstTySize, /*isFullSet=*/false);

  // Analyze wrapped sets in their two parts: [0, Upper) \/ [Lower, MaxValue]
  // We use the non-wrapped set code to analyze the [Lower, MaxValue) part, and
  // then we do the union with [MaxValue, Upper)
  if (isWrappedSet()) {
    // if Upper is greater than Max Value, it covers the whole truncated range.
    if (Upper.uge(MaxValue))
      return ConstantRange(DstTySize, /*isFullSet=*/true);

    Union = ConstantRange(APInt::getMaxValue(DstTySize),Upper.trunc(DstTySize));
    UpperDiv = APInt::getMaxValue(getBitWidth());

    // Union covers the MaxValue case, so return if the remaining range is just
    // MaxValue.
    if (LowerDiv == UpperDiv)
      return Union;
  }

  // Chop off the most significant bits that are past the destination bitwidth.
  if (LowerDiv.uge(MaxValue)) {
    APInt Div(getBitWidth(), 0);
    APInt::udivrem(LowerDiv, MaxBitValue, Div, LowerDiv);
    UpperDiv = UpperDiv - MaxBitValue * Div;
  }

  if (UpperDiv.ule(MaxValue))
    return ConstantRange(LowerDiv.trunc(DstTySize),
                         UpperDiv.trunc(DstTySize)).unionWith(Union);

  // The truncated value wrapps around. Check if we can do better than fullset.
  APInt UpperModulo = UpperDiv - MaxBitValue;
  if (UpperModulo.ult(LowerDiv))
    return ConstantRange(LowerDiv.trunc(DstTySize),
                         UpperModulo.trunc(DstTySize)).unionWith(Union);

  return ConstantRange(DstTySize, /*isFullSet=*/true);
}
Example #21
0
ConstantRange
ConstantRange::makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp,
                                          const APInt &C, unsigned NoWrapKind) {
  typedef OverflowingBinaryOperator OBO;

  // Computes the intersection of CR0 and CR1.  It is different from
  // intersectWith in that the ConstantRange returned will only contain elements
  // in both CR0 and CR1 (i.e. SubsetIntersect(X, Y) is a *subset*, proper or
  // not, of both X and Y).
  auto SubsetIntersect =
      [](const ConstantRange &CR0, const ConstantRange &CR1) {
    return CR0.inverse().unionWith(CR1.inverse()).inverse();
  };

  assert(BinOp >= Instruction::BinaryOpsBegin &&
         BinOp < Instruction::BinaryOpsEnd && "Binary operators only!");

  assert((NoWrapKind == OBO::NoSignedWrap ||
          NoWrapKind == OBO::NoUnsignedWrap ||
          NoWrapKind == (OBO::NoUnsignedWrap | OBO::NoSignedWrap)) &&
         "NoWrapKind invalid!");

  unsigned BitWidth = C.getBitWidth();
  if (BinOp != Instruction::Add)
    // Conservative answer: empty set
    return ConstantRange(BitWidth, false);

  if (C.isMinValue())
    // Full set: nothing signed / unsigned wraps when added to 0.
    return ConstantRange(BitWidth);

  ConstantRange Result(BitWidth);

  if (NoWrapKind & OBO::NoUnsignedWrap)
    Result = SubsetIntersect(Result,
                             ConstantRange(APInt::getNullValue(BitWidth), -C));

  if (NoWrapKind & OBO::NoSignedWrap) {
    if (C.isStrictlyPositive())
      Result = SubsetIntersect(
          Result, ConstantRange(APInt::getSignedMinValue(BitWidth),
                                APInt::getSignedMinValue(BitWidth) - C));
    else
      Result = SubsetIntersect(
          Result, ConstantRange(APInt::getSignedMinValue(BitWidth) - C,
                                APInt::getSignedMinValue(BitWidth)));
  }

  return Result;
}
Example #22
0
/// \brief Compute the size of the object pointed by Ptr. Returns true and the
/// object size in Size if successful, and false otherwise.
/// If RoundToAlign is true, then Size is rounded up to the aligment of allocas,
/// byval arguments, and global variables.
bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL,
                         const TargetLibraryInfo *TLI, bool RoundToAlign) {
  ObjectSizeOffsetVisitor Visitor(DL, TLI, Ptr->getContext(), RoundToAlign);
  SizeOffsetType Data = Visitor.compute(const_cast<Value*>(Ptr));
  if (!Visitor.bothKnown(Data))
    return false;

  APInt ObjSize = Data.first, Offset = Data.second;
  // check for overflow
  if (Offset.slt(0) || ObjSize.ult(Offset))
    Size = 0;
  else
    Size = (ObjSize - Offset).getZExtValue();
  return true;
}
Example #23
0
void DwarfExpression::addUnsignedConstant(const APInt &Value) {
  unsigned Size = Value.getBitWidth();
  const uint64_t *Data = Value.getRawData();

  // Chop it up into 64-bit pieces, because that's the maximum that
  // addUnsignedConstant takes.
  unsigned Offset = 0;
  while (Offset < Size) {
    addUnsignedConstant(*Data++);
    if (Offset == 0 && Size <= 64)
      break;
    addOpPiece(std::min(Size-Offset, 64u), Offset);
    Offset += 64;
  }
}
Example #24
0
void APNumericStorage::setIntValue(ASTContext &C, const APInt &Val) {
  if (hasAllocation())
    C.Deallocate(pVal);

  BitWidth = Val.getBitWidth();
  unsigned NumWords = Val.getNumWords();
  const uint64_t* Words = Val.getRawData();
  if (NumWords > 1) {
    pVal = new (C) uint64_t[NumWords];
    std::copy(Words, Words + NumWords, pVal);
  } else if (NumWords == 1)
    VAL = Words[0];
  else
    VAL = 0;
}
Example #25
0
/// \brief Accumulate a constant GEP offset into an APInt if possible.
///
/// Returns false if unable to compute the offset for any reason. Respects any
/// simplified values known during the analysis of this callsite.
bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
  if (!TD)
    return false;

  unsigned IntPtrWidth = TD->getPointerSizeInBits();
  assert(IntPtrWidth == Offset.getBitWidth());

  for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
       GTI != GTE; ++GTI) {
    ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
    if (!OpC)
      if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand()))
        OpC = dyn_cast<ConstantInt>(SimpleOp);
    if (!OpC)
      return false;
    if (OpC->isZero()) continue;

    // Handle a struct index, which adds its field offset to the pointer.
    if (StructType *STy = dyn_cast<StructType>(*GTI)) {
      unsigned ElementIdx = OpC->getZExtValue();
      const StructLayout *SL = TD->getStructLayout(STy);
      Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
      continue;
    }

    APInt TypeSize(IntPtrWidth, TD->getTypeAllocSize(GTI.getIndexedType()));
    Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize;
  }
  return true;
}
Example #26
0
/// subtract - Subtract the specified constant from the endpoints of this
/// constant range.
ConstantRange ConstantRange::subtract(const APInt &Val) const {
  assert(Val.getBitWidth() == getBitWidth() && "Wrong bit width");
  // If the set is empty or full, don't modify the endpoints.
  if (Lower == Upper) 
    return *this;
  return ConstantRange(Lower - Val, Upper - Val);
}
void
AMDGPUTargetLowering::computeMaskedBitsForTargetNode(
    const SDValue Op,
    APInt &KnownZero,
    APInt &KnownOne,
    const SelectionDAG &DAG,
    unsigned Depth) const
{
  APInt KnownZero2;
  APInt KnownOne2;
  KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); // Don't know anything
  switch (Op.getOpcode()) {
    default: break;
    case ISD::SELECT_CC:
             DAG.ComputeMaskedBits(
                 Op.getOperand(1),
                 KnownZero,
                 KnownOne,
                 Depth + 1
                 );
             DAG.ComputeMaskedBits(
                 Op.getOperand(0),
                 KnownZero2,
                 KnownOne2
                 );
             assert((KnownZero & KnownOne) == 0
                 && "Bits known to be one AND zero?");
             assert((KnownZero2 & KnownOne2) == 0
                 && "Bits known to be one AND zero?");
             // Only known if known in both the LHS and RHS
             KnownOne &= KnownOne2;
             KnownZero &= KnownZero2;
             break;
  };
}
Example #28
0
static bool isAligned(const Value *Base, APInt Offset, unsigned Align,
                      const DataLayout &DL) {
    APInt BaseAlign(Offset.getBitWidth(), Base->getPointerAlignment(DL));

    if (!BaseAlign) {
        Type *Ty = Base->getType()->getPointerElementType();
        if (!Ty->isSized())
            return false;
        BaseAlign = DL.getABITypeAlignment(Ty);
    }

    APInt Alignment(Offset.getBitWidth(), Align);

    assert(Alignment.isPowerOf2() && "must be a power of 2!");
    return BaseAlign.uge(Alignment) && !(Offset & (Alignment-1));
}
Example #29
0
/// Check if a checked trunc instruction can overflow.
/// Returns false if it can be proven that no overflow can happen.
/// Otherwise returns true.
static bool checkTruncOverflow(BuiltinInst *BI) {
  SILValue Left, Right;
  if (match(BI, m_CheckedTrunc(m_And(m_SILValue(Left),
                               m_SILValue(Right))))) {
    // [US]ToSCheckedTrunc(And(x, mask)) cannot overflow
    // if mask has the following properties:
    // Only the first (N-1) bits are allowed to be set, where N is the width
    // of the trunc result type.
    //
    // [US]ToUCheckedTrunc(And(x, mask)) cannot overflow
    // if mask has the following properties:
    // Only the first N bits are allowed to be set, where N is the width
    // of the trunc result type.
    if (auto BITy = BI->getType().
                        getTupleElementType(0).
                        getAs<BuiltinIntegerType>()) {
      unsigned Width = BITy->getFixedWidth();

      switch (BI->getBuiltinInfo().ID) {
      case BuiltinValueKind::SToSCheckedTrunc:
      case BuiltinValueKind::UToSCheckedTrunc:
        // If it is a trunc to a signed value
        // then sign bit should not be set to avoid overflows.
        --Width;
        break;
      default:
        break;
      }

      if (auto *ILLeft = dyn_cast<IntegerLiteralInst>(Left)) {
        APInt Value = ILLeft->getValue();
        if (Value.isIntN(Width)) {
          return false;
        }
      }

      if (auto *ILRight = dyn_cast<IntegerLiteralInst>(Right)) {
        APInt Value = ILRight->getValue();
        if (Value.isIntN(Width)) {
          return false;
        }
      }
    }
  }
  return true;
}
void
SeparateConstOffsetFromGEP::lowerToArithmetics(GetElementPtrInst *Variadic,
                                               int64_t AccumulativeByteOffset) {
  IRBuilder<> Builder(Variadic);
  const DataLayout &DL = Variadic->getModule()->getDataLayout();
  Type *IntPtrTy = DL.getIntPtrType(Variadic->getType());

  Value *ResultPtr = Builder.CreatePtrToInt(Variadic->getOperand(0), IntPtrTy);
  gep_type_iterator GTI = gep_type_begin(*Variadic);
  // Create ADD/SHL/MUL arithmetic operations for each sequential indices. We
  // don't create arithmetics for structure indices, as they are accumulated
  // in the constant offset index.
  for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) {
    if (isa<SequentialType>(*GTI)) {
      Value *Idx = Variadic->getOperand(I);
      // Skip zero indices.
      if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx))
        if (CI->isZero())
          continue;

      APInt ElementSize = APInt(IntPtrTy->getIntegerBitWidth(),
                                DL.getTypeAllocSize(GTI.getIndexedType()));
      // Scale the index by element size.
      if (ElementSize != 1) {
        if (ElementSize.isPowerOf2()) {
          Idx = Builder.CreateShl(
              Idx, ConstantInt::get(IntPtrTy, ElementSize.logBase2()));
        } else {
          Idx = Builder.CreateMul(Idx, ConstantInt::get(IntPtrTy, ElementSize));
        }
      }
      // Create an ADD for each index.
      ResultPtr = Builder.CreateAdd(ResultPtr, Idx);
    }
  }

  // Create an ADD for the constant offset index.
  if (AccumulativeByteOffset != 0) {
    ResultPtr = Builder.CreateAdd(
        ResultPtr, ConstantInt::get(IntPtrTy, AccumulativeByteOffset));
  }

  ResultPtr = Builder.CreateIntToPtr(ResultPtr, Variadic->getType());
  Variadic->replaceAllUsesWith(ResultPtr);
  Variadic->eraseFromParent();
}