Beispiel #1
0
/// subtract - Subtract the specified constant from the endpoints of this
/// constant range.
ConstantRange ConstantRange::subtract(const APInt &Val) const {
  assert(Val.getBitWidth() == getBitWidth() && "Wrong bit width");
  // If the set is empty or full, don't modify the endpoints.
  if (Lower == Upper) 
    return *this;
  return ConstantRange(Lower - Val, Upper - Val);
}
void
AMDGPUTargetLowering::computeMaskedBitsForTargetNode(
    const SDValue Op,
    APInt &KnownZero,
    APInt &KnownOne,
    const SelectionDAG &DAG,
    unsigned Depth) const
{
  APInt KnownZero2;
  APInt KnownOne2;
  KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); // Don't know anything
  switch (Op.getOpcode()) {
    default: break;
    case ISD::SELECT_CC:
             DAG.ComputeMaskedBits(
                 Op.getOperand(1),
                 KnownZero,
                 KnownOne,
                 Depth + 1
                 );
             DAG.ComputeMaskedBits(
                 Op.getOperand(0),
                 KnownZero2,
                 KnownOne2
                 );
             assert((KnownZero & KnownOne) == 0
                 && "Bits known to be one AND zero?");
             assert((KnownZero2 & KnownOne2) == 0
                 && "Bits known to be one AND zero?");
             // Only known if known in both the LHS and RHS
             KnownOne &= KnownOne2;
             KnownZero &= KnownZero2;
             break;
  };
}
Beispiel #3
0
/// \brief Finds and combines constants that can be easily rematerialized with
/// an add from a common base constant.
void ConstantHoisting::FindBaseConstants() {
  // Sort the constants by value and type. This invalidates the mapping.
  std::sort(ConstantMap.begin(), ConstantMap.end(), ConstantMapLessThan);

  // Simple linear scan through the sorted constant map for viable merge
  // candidates.
  ConstantMapType::iterator MinValItr = ConstantMap.begin();
  for (ConstantMapType::iterator I = llvm::next(ConstantMap.begin()),
       E = ConstantMap.end(); I != E; ++I) {
    if (MinValItr->first->getType() == I->first->getType()) {
      // Check if the constant is in range of an add with immediate.
      APInt Diff = I->first->getValue() - MinValItr->first->getValue();
      if ((Diff.getBitWidth() <= 64) &&
          TTI->isLegalAddImmediate(Diff.getSExtValue()))
        continue;
    }
    // We either have now a different constant type or the constant is not in
    // range of an add with immediate anymore.
    FindAndMakeBaseConstant(MinValItr, I);
    // Start a new base constant search.
    MinValItr = I;
  }
  // Finalize the last base constant search.
  FindAndMakeBaseConstant(MinValItr, ConstantMap.end());
}
int SystemZTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
  assert(Ty->isIntegerTy());

  unsigned BitSize = Ty->getPrimitiveSizeInBits();
  // There is no cost model for constants with a bit size of 0. Return TCC_Free
  // here, so that constant hoisting will ignore this constant.
  if (BitSize == 0)
    return TTI::TCC_Free;
  // No cost model for operations on integers larger than 64 bit implemented yet.
  if (BitSize > 64)
    return TTI::TCC_Free;

  if (Imm == 0)
    return TTI::TCC_Free;

  if (Imm.getBitWidth() <= 64) {
    // Constants loaded via lgfi.
    if (isInt<32>(Imm.getSExtValue()))
      return TTI::TCC_Basic;
    // Constants loaded via llilf.
    if (isUInt<32>(Imm.getZExtValue()))
      return TTI::TCC_Basic;
    // Constants loaded via llihf:
    if ((Imm.getZExtValue() & 0xffffffff) == 0)
      return TTI::TCC_Basic;

    return 2 * TTI::TCC_Basic;
  }

  return 4 * TTI::TCC_Basic;
}
Beispiel #5
0
/// \brief Accumulate a constant GEP offset into an APInt if possible.
///
/// Returns false if unable to compute the offset for any reason. Respects any
/// simplified values known during the analysis of this callsite.
bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
  if (!DL)
    return false;

  unsigned IntPtrWidth = DL->getPointerSizeInBits();
  assert(IntPtrWidth == Offset.getBitWidth());

  for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
       GTI != GTE; ++GTI) {
    ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
    if (!OpC)
      if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand()))
        OpC = dyn_cast<ConstantInt>(SimpleOp);
    if (!OpC)
      return false;
    if (OpC->isZero()) continue;

    // Handle a struct index, which adds its field offset to the pointer.
    if (StructType *STy = dyn_cast<StructType>(*GTI)) {
      unsigned ElementIdx = OpC->getZExtValue();
      const StructLayout *SL = DL->getStructLayout(STy);
      Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
      continue;
    }

    APInt TypeSize(IntPtrWidth, DL->getTypeAllocSize(GTI.getIndexedType()));
    Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize;
  }
  return true;
}
Beispiel #6
0
static bool isAligned(const Value *Base, APInt Offset, unsigned Align,
                      const DataLayout &DL) {
    APInt BaseAlign(Offset.getBitWidth(), Base->getPointerAlignment(DL));

    if (!BaseAlign) {
        Type *Ty = Base->getType()->getPointerElementType();
        if (!Ty->isSized())
            return false;
        BaseAlign = DL.getABITypeAlignment(Ty);
    }

    APInt Alignment(Offset.getBitWidth(), Align);

    assert(Alignment.isPowerOf2() && "must be a power of 2!");
    return BaseAlign.uge(Alignment) && !(Offset & (Alignment-1));
}
int PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
  if (DisablePPCConstHoist)
    return BaseT::getIntImmCost(Imm, Ty);

  assert(Ty->isIntegerTy());

  unsigned BitSize = Ty->getPrimitiveSizeInBits();
  if (BitSize == 0)
    return ~0U;

  if (Imm == 0)
    return TTI::TCC_Free;

  if (Imm.getBitWidth() <= 64) {
    if (isInt<16>(Imm.getSExtValue()))
      return TTI::TCC_Basic;

    if (isInt<32>(Imm.getSExtValue())) {
      // A constant that can be materialized using lis.
      if ((Imm.getZExtValue() & 0xFFFF) == 0)
        return TTI::TCC_Basic;

      return 2 * TTI::TCC_Basic;
    }
  }

  return 4 * TTI::TCC_Basic;
}
Beispiel #8
0
/// isBytewiseValue - If the specified value can be set by repeating the same
/// byte in memory, return the i8 value that it is represented with.  This is
/// true for all i8 values obviously, but is also true for i32 0, i32 -1,
/// i16 0xF0F0, double 0.0 etc.  If the value can't be handled with a repeated
/// byte store (e.g. i16 0x1234), return null.
static Value *isBytewiseValue(Value *V) {
  LLVMContext &Context = V->getContext();
  
  // All byte-wide stores are splatable, even of arbitrary variables.
  if (V->getType()->isIntegerTy(8)) return V;
  
  // Constant float and double values can be handled as integer values if the
  // corresponding integer value is "byteable".  An important case is 0.0. 
  if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
    if (CFP->getType()->isFloatTy())
      V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(Context));
    if (CFP->getType()->isDoubleTy())
      V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(Context));
    // Don't handle long double formats, which have strange constraints.
  }
  
  // We can handle constant integers that are power of two in size and a 
  // multiple of 8 bits.
  if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
    unsigned Width = CI->getBitWidth();
    if (isPowerOf2_32(Width) && Width > 8) {
      // We can handle this value if the recursive binary decomposition is the
      // same at all levels.
      APInt Val = CI->getValue();
      APInt Val2;
      while (Val.getBitWidth() != 8) {
        unsigned NextWidth = Val.getBitWidth()/2;
        Val2  = Val.lshr(NextWidth);
        Val2.trunc(Val.getBitWidth()/2);
        Val.trunc(Val.getBitWidth()/2);

        // If the top/bottom halves aren't the same, reject it.
        if (Val != Val2)
          return 0;
      }
      return ConstantInt::get(Context, Val);
    }
  }
  
  // Conceptually, we could handle things like:
  //   %a = zext i8 %X to i16
  //   %b = shl i16 %a, 8
  //   %c = or i16 %a, %b
  // but until there is an example that actually needs this, it doesn't seem
  // worth worrying about.
  return 0;
}
static std::string APIntToHexString(const APInt &AI) {
  unsigned Width = (AI.getBitWidth() / 8) * 2;
  std::string HexString = utohexstr(AI.getLimitedValue(), /*LowerCase=*/true);
  unsigned Size = HexString.size();
  assert(Width >= Size && "hex string is too large!");
  HexString.insert(HexString.begin(), Width - Size, '0');

  return HexString;
}
Beispiel #10
0
SymbolicValue SymbolicValue::getInteger(const APInt &value,
                                        ASTContext &astContext) {
  // In the common case, we can form an inline representation.
  unsigned numWords = value.getNumWords();
  if (numWords == 1)
    return getInteger(value.getRawData()[0], value.getBitWidth());

  // Copy the integers from the APInt into the bump pointer.
  auto *words = astContext.Allocate<uint64_t>(numWords).data();
  std::uninitialized_copy(value.getRawData(), value.getRawData() + numWords,
                          words);

  SymbolicValue result;
  result.representationKind = RK_Integer;
  result.value.integer = words;
  result.auxInfo.integerBitwidth = value.getBitWidth();
  return result;
}
Beispiel #11
0
/// Adds an edge from V_from to V_to with weight value
void ABCD::InequalityGraph::addEdge(Value *V_to, Value *V_from,
                                    APInt value, bool upper) {
  assert(V_from->getType() == V_to->getType());
  assert(cast<IntegerType>(V_from->getType())->getBitWidth() ==
         value.getBitWidth());

  DenseMap<Value *, SmallPtrSet<Edge *, 16> >::iterator from;
  from = addNode(V_from);
  from->second.insert(new Edge(V_to, value, upper));
}
Beispiel #12
0
llvm::Constant *irgen::emitConstantInt(IRGenModule &IGM,
                                       IntegerLiteralInst *ILI) {
  APInt value = ILI->getValue();
  BuiltinIntegerWidth width
    = ILI->getType().castTo<BuiltinIntegerType>()->getWidth();

  // The value may need truncation if its type had an abstract size.
  if (!width.isFixedWidth()) {
    assert(width.isPointerWidth() && "impossible width value");

    unsigned pointerWidth = IGM.getPointerSize().getValueInBits();
    assert(pointerWidth <= value.getBitWidth()
           && "lost precision at AST/SIL level?!");
    if (pointerWidth < value.getBitWidth())
      value = value.trunc(pointerWidth);
  }

  return llvm::ConstantInt::get(IGM.LLVMContext, value);
}
Beispiel #13
0
/// Extract all of the characters from the number \p Num one by one and
/// insert them into the string builder \p SB.
static void DecodeFixedWidth(APInt &Num, std::string &SB) {
  uint64_t CL = Huffman::CharsetLength;
  assert(Num.getBitWidth() > 8 &&
         "Not enough bits for arithmetic on this alphabet");

  // Try to decode eight numbers at once. It is much faster to work with
  // local 64bit numbers than working with APInt. In this loop we try to
  // extract 8 characters at one and process them using a local 64bit number.
  // In this code we assume a worse case scenario where our alphabet is a full
  // 8-bit ascii. It is possible to improve this code by packing one or two
  // more characters into the 64bit local variable.
  uint64_t CL8 = CL * CL * CL * CL * CL * CL * CL * CL;
  while (Num.ugt(CL8)) {
    unsigned BW = Num.getBitWidth();
    APInt C = APInt(BW, CL8);
    APInt Quotient(1, 0), Remainder(1, 0);
    APInt::udivrem(Num, C, Quotient, Remainder);

    // Try to reduce the bitwidth of the API after the division. This can
    // accelerate the division operation in future iterations because the
    // number becomes smaller (fewer bits) with each iteration. However,
    // We can't reduce the number to something too small because we still
    // need to be able to perform the "mod charset_length" operation.
    Num = Quotient.zextOrTrunc(std::max(Quotient.getActiveBits(), 64u));
    uint64_t Tail = Remainder.getZExtValue();
    for (int i=0; i < 8; i++) {
      SB += Huffman::Charset[Tail % CL];
      Tail = Tail / CL;
    }
  }

  // Pop characters out of the APInt one by one.
  while (Num.getBoolValue()) {
    unsigned BW = Num.getBitWidth();

    APInt C = APInt(BW, CL);
    APInt Quotient(1, 0), Remainder(1, 0);
    APInt::udivrem(Num, C, Quotient, Remainder);
    Num = Quotient;
    SB += Huffman::Charset[Remainder.getZExtValue()];
  }
}
Beispiel #14
0
unsigned X86TTI::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
                               Type *Ty) const {
  assert(Ty->isIntegerTy());

  unsigned BitSize = Ty->getPrimitiveSizeInBits();
  if (BitSize == 0)
    return ~0U;

  unsigned ImmIdx = ~0U;
  switch (Opcode) {
  default: return TCC_Free;
  case Instruction::GetElementPtr:
    if (Idx == 0)
      return 2 * TCC_Basic;
    return TCC_Free;
  case Instruction::Store:
    ImmIdx = 0;
    break;
  case Instruction::Add:
  case Instruction::Sub:
  case Instruction::Mul:
  case Instruction::UDiv:
  case Instruction::SDiv:
  case Instruction::URem:
  case Instruction::SRem:
  case Instruction::Shl:
  case Instruction::LShr:
  case Instruction::AShr:
  case Instruction::And:
  case Instruction::Or:
  case Instruction::Xor:
  case Instruction::ICmp:
    ImmIdx = 1;
    break;
  case Instruction::Trunc:
  case Instruction::ZExt:
  case Instruction::SExt:
  case Instruction::IntToPtr:
  case Instruction::PtrToInt:
  case Instruction::BitCast:
  case Instruction::PHI:
  case Instruction::Call:
  case Instruction::Select:
  case Instruction::Ret:
  case Instruction::Load:
    break;
  }

  if ((Idx == ImmIdx) &&
      Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
    return TCC_Free;

  return X86TTI::getIntImmCost(Imm, Ty);
}
static void EmitAPInt(SmallVectorImpl<uint64_t> &Vals,
                      unsigned &Code, unsigned &AbbrevToUse, const APInt &Val) {
  if (Val.getBitWidth() <= 64) {
    uint64_t V = Val.getSExtValue();
    emitSignedInt64(Vals, V);
    Code = naclbitc::CST_CODE_INTEGER;
    AbbrevToUse =
        Val == 0 ? CONSTANTS_INTEGER_ZERO_ABBREV : CONSTANTS_INTEGER_ABBREV;
  } else {
    report_fatal_error("Wide integers are not supported");
  }
}
Beispiel #16
0
/// Test if V is always a pointer to allocated and suitably aligned memory for
/// a simple load or store.
static bool isDereferenceableAndAlignedPointer(
    const Value *V, unsigned Align, const APInt &Size, const DataLayout &DL,
    const Instruction *CtxI, const DominatorTree *DT,
    SmallPtrSetImpl<const Value *> &Visited) {
  // Note that it is not safe to speculate into a malloc'd region because
  // malloc may return null.

  // bitcast instructions are no-ops as far as dereferenceability is concerned.
  if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V))
    return isDereferenceableAndAlignedPointer(BC->getOperand(0), Align, Size,
                                              DL, CtxI, DT, Visited);

  bool CheckForNonNull = false;
  APInt KnownDerefBytes(Size.getBitWidth(),
                        V->getPointerDereferenceableBytes(DL, CheckForNonNull));
  if (KnownDerefBytes.getBoolValue()) {
    if (KnownDerefBytes.uge(Size))
      if (!CheckForNonNull || isKnownNonNullAt(V, CtxI, DT))
        return isAligned(V, Align, DL);
  }

  // For GEPs, determine if the indexing lands within the allocated object.
  if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
    const Value *Base = GEP->getPointerOperand();

    APInt Offset(DL.getPointerTypeSizeInBits(GEP->getType()), 0);
    if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() ||
        !Offset.urem(APInt(Offset.getBitWidth(), Align)).isMinValue())
      return false;

    // If the base pointer is dereferenceable for Offset+Size bytes, then the
    // GEP (== Base + Offset) is dereferenceable for Size bytes.  If the base
    // pointer is aligned to Align bytes, and the Offset is divisible by Align
    // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
    // aligned to Align bytes.

    return Visited.insert(Base).second &&
           isDereferenceableAndAlignedPointer(Base, Align, Offset + Size, DL,
                                              CtxI, DT, Visited);
  }

  // For gc.relocate, look through relocations
  if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
    return isDereferenceableAndAlignedPointer(
        RelocateInst->getDerivedPtr(), Align, Size, DL, CtxI, DT, Visited);

  if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
    return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, Size,
                                              DL, CtxI, DT, Visited);

  // If we don't know, assume the worst.
  return false;
}
unsigned X86TTI::getIntImmCost(const APInt &Imm, Type *Ty) const {
  assert(Ty->isIntegerTy());

  unsigned BitSize = Ty->getPrimitiveSizeInBits();
  if (BitSize == 0)
    return ~0U;

  if (Imm.getBitWidth() <= 64 &&
      (isInt<32>(Imm.getSExtValue()) || isUInt<32>(Imm.getZExtValue())))
    return TCC_Basic;
  else
    return 2 * TCC_Basic;
}
int AArch64TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
                                  const APInt &Imm, Type *Ty) {
  assert(Ty->isIntegerTy());

  unsigned BitSize = Ty->getPrimitiveSizeInBits();
  // There is no cost model for constants with a bit size of 0. Return TCC_Free
  // here, so that constant hoisting will ignore this constant.
  if (BitSize == 0)
    return TTI::TCC_Free;

  switch (IID) {
  default:
    return TTI::TCC_Free;
  case Intrinsic::sadd_with_overflow:
  case Intrinsic::uadd_with_overflow:
  case Intrinsic::ssub_with_overflow:
  case Intrinsic::usub_with_overflow:
  case Intrinsic::smul_with_overflow:
  case Intrinsic::umul_with_overflow:
    if (Idx == 1) {
      int NumConstants = (BitSize + 63) / 64;
      int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty);
      return (Cost <= NumConstants * TTI::TCC_Basic)
                 ? static_cast<int>(TTI::TCC_Free)
                 : Cost;
    }
    break;
  case Intrinsic::experimental_stackmap:
    if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
      return TTI::TCC_Free;
    break;
  case Intrinsic::experimental_patchpoint_void:
  case Intrinsic::experimental_patchpoint_i64:
    if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
      return TTI::TCC_Free;
    break;
  }
  return AArch64TTIImpl::getIntImmCost(Imm, Ty);
}
Beispiel #19
0
bool X86MCInstrAnalysis::clearsSuperRegisters(const MCRegisterInfo &MRI,
                                              const MCInst &Inst,
                                              APInt &Mask) const {
  const MCInstrDesc &Desc = Info->get(Inst.getOpcode());
  unsigned NumDefs = Desc.getNumDefs();
  unsigned NumImplicitDefs = Desc.getNumImplicitDefs();
  assert(Mask.getBitWidth() == NumDefs + NumImplicitDefs &&
         "Unexpected number of bits in the mask!");

  bool HasVEX = (Desc.TSFlags & X86II::EncodingMask) == X86II::VEX;
  bool HasEVEX = (Desc.TSFlags & X86II::EncodingMask) == X86II::EVEX;
  bool HasXOP = (Desc.TSFlags & X86II::EncodingMask) == X86II::XOP;

  const MCRegisterClass &GR32RC = MRI.getRegClass(X86::GR32RegClassID);
  const MCRegisterClass &VR128XRC = MRI.getRegClass(X86::VR128XRegClassID);
  const MCRegisterClass &VR256XRC = MRI.getRegClass(X86::VR256XRegClassID);

  auto ClearsSuperReg = [=](unsigned RegID) {
    // On X86-64, a general purpose integer register is viewed as a 64-bit
    // register internal to the processor.
    // An update to the lower 32 bits of a 64 bit integer register is
    // architecturally defined to zero extend the upper 32 bits.
    if (GR32RC.contains(RegID))
      return true;

    // Early exit if this instruction has no vex/evex/xop prefix.
    if (!HasEVEX && !HasVEX && !HasXOP)
      return false;

    // All VEX and EVEX encoded instructions are defined to zero the high bits
    // of the destination register up to VLMAX (i.e. the maximum vector register
    // width pertaining to the instruction).
    // We assume the same behavior for XOP instructions too.
    return VR128XRC.contains(RegID) || VR256XRC.contains(RegID);
  };

  Mask.clearAllBits();
  for (unsigned I = 0, E = NumDefs; I < E; ++I) {
    const MCOperand &Op = Inst.getOperand(I);
    if (ClearsSuperReg(Op.getReg()))
      Mask.setBit(I);
  }

  for (unsigned I = 0, E = NumImplicitDefs; I < E; ++I) {
    const MCPhysReg Reg = Desc.getImplicitDefs()[I];
    if (ClearsSuperReg(Reg))
      Mask.setBit(NumDefs + I);
  }

  return Mask.getBoolValue();
}
Beispiel #20
0
ConstantRange
ConstantRange::makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp,
                                          const APInt &C, unsigned NoWrapKind) {
  typedef OverflowingBinaryOperator OBO;

  // Computes the intersection of CR0 and CR1.  It is different from
  // intersectWith in that the ConstantRange returned will only contain elements
  // in both CR0 and CR1 (i.e. SubsetIntersect(X, Y) is a *subset*, proper or
  // not, of both X and Y).
  auto SubsetIntersect =
      [](const ConstantRange &CR0, const ConstantRange &CR1) {
    return CR0.inverse().unionWith(CR1.inverse()).inverse();
  };

  assert(BinOp >= Instruction::BinaryOpsBegin &&
         BinOp < Instruction::BinaryOpsEnd && "Binary operators only!");

  assert((NoWrapKind == OBO::NoSignedWrap ||
          NoWrapKind == OBO::NoUnsignedWrap ||
          NoWrapKind == (OBO::NoUnsignedWrap | OBO::NoSignedWrap)) &&
         "NoWrapKind invalid!");

  unsigned BitWidth = C.getBitWidth();
  if (BinOp != Instruction::Add)
    // Conservative answer: empty set
    return ConstantRange(BitWidth, false);

  if (C.isMinValue())
    // Full set: nothing signed / unsigned wraps when added to 0.
    return ConstantRange(BitWidth);

  ConstantRange Result(BitWidth);

  if (NoWrapKind & OBO::NoUnsignedWrap)
    Result = SubsetIntersect(Result,
                             ConstantRange(APInt::getNullValue(BitWidth), -C));

  if (NoWrapKind & OBO::NoSignedWrap) {
    if (C.isStrictlyPositive())
      Result = SubsetIntersect(
          Result, ConstantRange(APInt::getSignedMinValue(BitWidth),
                                APInt::getSignedMinValue(BitWidth) - C));
    else
      Result = SubsetIntersect(
          Result, ConstantRange(APInt::getSignedMinValue(BitWidth) - C,
                                APInt::getSignedMinValue(BitWidth)));
  }

  return Result;
}
Beispiel #21
0
void DwarfExpression::addUnsignedConstant(const APInt &Value) {
  unsigned Size = Value.getBitWidth();
  const uint64_t *Data = Value.getRawData();

  // Chop it up into 64-bit pieces, because that's the maximum that
  // addUnsignedConstant takes.
  unsigned Offset = 0;
  while (Offset < Size) {
    addUnsignedConstant(*Data++);
    if (Offset == 0 && Size <= 64)
      break;
    addOpPiece(std::min(Size-Offset, 64u), Offset);
    Offset += 64;
  }
}
Beispiel #22
0
void APNumericStorage::setIntValue(ASTContext &C, const APInt &Val) {
  if (hasAllocation())
    C.Deallocate(pVal);

  BitWidth = Val.getBitWidth();
  unsigned NumWords = Val.getNumWords();
  const uint64_t* Words = Val.getRawData();
  if (NumWords > 1) {
    pVal = new (C) uint64_t[NumWords];
    std::copy(Words, Words + NumWords, pVal);
  } else if (NumWords == 1)
    VAL = Words[0];
  else
    VAL = 0;
}
Beispiel #23
0
bool AMDGPUDAGToDAGISel::SelectU24(SDValue Op, SDValue &U24) {
  APInt KnownZero;
  APInt KnownOne;
  CurDAG->ComputeMaskedBits(Op, KnownZero, KnownOne);

  assert (Op.getValueType() == MVT::i32);

  // ANY_EXTEND and EXTLOAD operations can only be done on types smaller than
  // i32.  These smaller types are legal to use with the i24 instructions.
  if ((KnownZero & APInt(KnownZero.getBitWidth(), 0xFF000000)) == 0xFF000000 ||
       Op.getOpcode() == ISD::ANY_EXTEND ||
       ISD::isEXTLoad(Op.getNode())) {
    U24 = SimplifyI24(Op);
    return true;
  }
  return false;
}
Beispiel #24
0
static bool isDereferenceableFromAttribute(const Value *BV, APInt Offset,
        Type *Ty, const DataLayout &DL,
        const Instruction *CtxI,
        const DominatorTree *DT,
        const TargetLibraryInfo *TLI) {
    assert(Offset.isNonNegative() && "offset can't be negative");
    assert(Ty->isSized() && "must be sized");

    APInt DerefBytes(Offset.getBitWidth(), 0);
    bool CheckForNonNull = false;
    if (const Argument *A = dyn_cast<Argument>(BV)) {
        DerefBytes = A->getDereferenceableBytes();
        if (!DerefBytes.getBoolValue()) {
            DerefBytes = A->getDereferenceableOrNullBytes();
            CheckForNonNull = true;
        }
    } else if (auto CS = ImmutableCallSite(BV)) {
        DerefBytes = CS.getDereferenceableBytes(0);
        if (!DerefBytes.getBoolValue()) {
            DerefBytes = CS.getDereferenceableOrNullBytes(0);
            CheckForNonNull = true;
        }
    } else if (const LoadInst *LI = dyn_cast<LoadInst>(BV)) {
        if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable)) {
            ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
            DerefBytes = CI->getLimitedValue();
        }
        if (!DerefBytes.getBoolValue()) {
            if (MDNode *MD =
                        LI->getMetadata(LLVMContext::MD_dereferenceable_or_null)) {
                ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
                DerefBytes = CI->getLimitedValue();
            }
            CheckForNonNull = true;
        }
    }

    if (DerefBytes.getBoolValue())
        if (DerefBytes.uge(Offset + DL.getTypeStoreSize(Ty)))
            if (!CheckForNonNull || isKnownNonNullAt(BV, CtxI, DT, TLI))
                return true;

    return false;
}
// Select constant vector splats whose value only has a consecutive sequence
// of right-most bits set (e.g. 0b00...0011...11).
//
// In addition to the requirements of selectVSplat(), this function returns
// true and sets Imm if:
// * The splat value is the same width as the elements of the vector
// * The splat value is a consecutive sequence of right-most bits.
//
// This function looks through ISD::BITCAST nodes.
// TODO: This might not be appropriate for big-endian MSA since BITCAST is
//       sometimes a shuffle in big-endian mode.
bool MipsSEDAGToDAGISel::selectVSplatMaskR(SDValue N, SDValue &Imm) const {
  APInt ImmValue;
  EVT EltTy = N->getValueType(0).getVectorElementType();

  if (N->getOpcode() == ISD::BITCAST)
    N = N->getOperand(0);

  if (selectVSplat(N.getNode(), ImmValue) &&
      ImmValue.getBitWidth() == EltTy.getSizeInBits()) {
    // Extract the run of set bits starting with bit zero, and test that the
    // result is the same as the original value
    if (ImmValue == (ImmValue & ~(ImmValue + 1))) {
      Imm = CurDAG->getTargetConstant(ImmValue.countPopulation(), EltTy);
      return true;
    }
  }

  return false;
}
// Select constant vector splats whose value is a power of 2.
//
// In addition to the requirements of selectVSplat(), this function returns
// true and sets Imm if:
// * The splat value is the same width as the elements of the vector
// * The splat value is a power of two.
//
// This function looks through ISD::BITCAST nodes.
// TODO: This might not be appropriate for big-endian MSA since BITCAST is
//       sometimes a shuffle in big-endian mode.
bool MipsSEDAGToDAGISel::selectVSplatUimmPow2(SDValue N, SDValue &Imm) const {
  APInt ImmValue;
  EVT EltTy = N->getValueType(0).getVectorElementType();

  if (N->getOpcode() == ISD::BITCAST)
    N = N->getOperand(0);

  if (selectVSplat (N.getNode(), ImmValue) &&
      ImmValue.getBitWidth() == EltTy.getSizeInBits()) {
    int32_t Log2 = ImmValue.exactLogBase2();

    if (Log2 != -1) {
      Imm = CurDAG->getTargetConstant(Log2, EltTy);
      return true;
    }
  }

  return false;
}
unsigned X86TTI::getIntImmCost(unsigned Opcode, const APInt &Imm,
                               Type *Ty) const {
  assert(Ty->isIntegerTy());

  unsigned BitSize = Ty->getPrimitiveSizeInBits();
  if (BitSize == 0)
    return ~0U;

  switch (Opcode) {
  case Instruction::Add:
  case Instruction::Sub:
  case Instruction::Mul:
  case Instruction::UDiv:
  case Instruction::SDiv:
  case Instruction::URem:
  case Instruction::SRem:
  case Instruction::Shl:
  case Instruction::LShr:
  case Instruction::AShr:
  case Instruction::And:
  case Instruction::Or:
  case Instruction::Xor:
  case Instruction::ICmp:
    if (Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
      return TCC_Free;
    else
      return X86TTI::getIntImmCost(Imm, Ty);
  case Instruction::Trunc:
  case Instruction::ZExt:
  case Instruction::SExt:
  case Instruction::IntToPtr:
  case Instruction::PtrToInt:
  case Instruction::BitCast:
  case Instruction::Call:
  case Instruction::Select:
  case Instruction::Ret:
  case Instruction::Load:
  case Instruction::Store:
    return X86TTI::getIntImmCost(Imm, Ty);
  }
  return TargetTransformInfo::getIntImmCost(Opcode, Imm, Ty);
}
// Select constant vector splats.
//
// In addition to the requirements of selectVSplat(), this function returns
// true and sets Imm if:
// * The splat value is the same width as the elements of the vector
// * The splat value fits in an integer with the specified signed-ness and
//   width.
//
// This function looks through ISD::BITCAST nodes.
// TODO: This might not be appropriate for big-endian MSA since BITCAST is
//       sometimes a shuffle in big-endian mode.
//
// It's worth noting that this function is not used as part of the selection
// of ldi.[bhwd] since it does not permit using the wrong-typed ldi.[bhwd]
// instruction to achieve the desired bit pattern. ldi.[bhwd] is selected in
// MipsSEDAGToDAGISel::selectNode.
bool MipsSEDAGToDAGISel::
selectVSplatCommon(SDValue N, SDValue &Imm, bool Signed,
                   unsigned ImmBitSize) const {
  APInt ImmValue;
  EVT EltTy = N->getValueType(0).getVectorElementType();

  if (N->getOpcode() == ISD::BITCAST)
    N = N->getOperand(0);

  if (selectVSplat (N.getNode(), ImmValue) &&
      ImmValue.getBitWidth() == EltTy.getSizeInBits()) {
    if (( Signed && ImmValue.isSignedIntN(ImmBitSize)) ||
        (!Signed && ImmValue.isIntN(ImmBitSize))) {
      Imm = CurDAG->getTargetConstant(ImmValue, EltTy);
      return true;
    }
  }

  return false;
}
Beispiel #29
0
/// addConstantValue - Add constant value entry in variable DIE.
bool CompileUnit::addConstantValue(DIE *Die, const ConstantInt *CI,
                                   bool Unsigned) {
  unsigned CIBitWidth = CI->getBitWidth();
  if (CIBitWidth <= 64) {
    unsigned form = 0;
    switch (CIBitWidth) {
    case 8: form = dwarf::DW_FORM_data1; break;
    case 16: form = dwarf::DW_FORM_data2; break;
    case 32: form = dwarf::DW_FORM_data4; break;
    case 64: form = dwarf::DW_FORM_data8; break;
    default: 
      form = Unsigned ? dwarf::DW_FORM_udata : dwarf::DW_FORM_sdata;
    }
    if (Unsigned)
      addUInt(Die, dwarf::DW_AT_const_value, form, CI->getZExtValue());
    else
      addSInt(Die, dwarf::DW_AT_const_value, form, CI->getSExtValue());
    return true;
  }

  DIEBlock *Block = new (DIEValueAllocator) DIEBlock();

  // Get the raw data form of the large APInt.
  const APInt Val = CI->getValue();
  const char *Ptr = (const char*)Val.getRawData();

  int NumBytes = Val.getBitWidth() / 8; // 8 bits per byte.
  bool LittleEndian = Asm->getTargetData().isLittleEndian();
  int Incr = (LittleEndian ? 1 : -1);
  int Start = (LittleEndian ? 0 : NumBytes - 1);
  int Stop = (LittleEndian ? NumBytes : -1);

  // Output the constant to DWARF one byte at a time.
  for (; Start != Stop; Start += Incr)
    addUInt(Block, 0, dwarf::DW_FORM_data1,
            (unsigned char)0xFF & Ptr[Start]);

  addBlock(Die, dwarf::DW_AT_const_value, 0, Block);
  return true;
}
/// Emit a diagnostic for `poundAssert` builtins whose condition is
/// false or whose condition cannot be evaluated.
static void diagnosePoundAssert(const SILInstruction *I,
                                SILModule &M,
                                ConstExprEvaluator &constantEvaluator) {
  auto *builtinInst = dyn_cast<BuiltinInst>(I);
  if (!builtinInst ||
      builtinInst->getBuiltinKind() != BuiltinValueKind::PoundAssert)
    return;

  SmallVector<SymbolicValue, 1> values;
  constantEvaluator.computeConstantValues({builtinInst->getArguments()[0]},
                                          values);
  SymbolicValue value = values[0];
  if (!value.isConstant()) {
    diagnose(M.getASTContext(), I->getLoc().getSourceLoc(),
             diag::pound_assert_condition_not_constant);

    // If we have more specific information about what went wrong, emit
    // notes.
    if (value.getKind() == SymbolicValue::Unknown)
      value.emitUnknownDiagnosticNotes(builtinInst->getLoc());
    return;
  }
  assert(value.getKind() == SymbolicValue::Integer &&
         "sema prevents non-integer #assert condition");

  APInt intValue = value.getIntegerValue();
  assert(intValue.getBitWidth() == 1 &&
         "sema prevents non-int1 #assert condition");
  if (intValue.isNullValue()) {
    auto *message = cast<StringLiteralInst>(builtinInst->getArguments()[1]);
    StringRef messageValue = message->getValue();
    if (messageValue.empty())
      messageValue = "assertion failed";
    diagnose(M.getASTContext(), I->getLoc().getSourceLoc(),
             diag::pound_assert_failure, messageValue);
    return;
  }
}