/// truncate - Return a new range in the specified integer type, which must be /// strictly smaller than the current type. The returned range will /// correspond to the possible range of values as if the source range had been /// truncated to the specified type. ConstantRange ConstantRange::truncate(uint32_t DstTySize) const { unsigned SrcTySize = getBitWidth(); assert(SrcTySize > DstTySize && "Not a value truncation"); APInt Size(APInt::getLowBitsSet(SrcTySize, DstTySize)); if (isFullSet() || getSetSize().ugt(Size)) return ConstantRange(DstTySize); APInt L = Lower; L.trunc(DstTySize); APInt U = Upper; U.trunc(DstTySize); return ConstantRange(L, U); }
APInt swift::constantFoldCast(APInt val, const BuiltinInfo &BI) { // Get the cast result. Type SrcTy = BI.Types[0]; Type DestTy = BI.Types.size() == 2 ? BI.Types[1] : Type(); uint32_t SrcBitWidth = SrcTy->castTo<BuiltinIntegerType>()->getGreatestWidth(); uint32_t DestBitWidth = DestTy->castTo<BuiltinIntegerType>()->getGreatestWidth(); APInt CastResV; if (SrcBitWidth == DestBitWidth) { return val; } else switch (BI.ID) { default : llvm_unreachable("Invalid case."); case BuiltinValueKind::Trunc: case BuiltinValueKind::TruncOrBitCast: return val.trunc(DestBitWidth); case BuiltinValueKind::ZExt: case BuiltinValueKind::ZExtOrBitCast: return val.zext(DestBitWidth); break; case BuiltinValueKind::SExt: case BuiltinValueKind::SExtOrBitCast: return val.sext(DestBitWidth); } }
/// truncate - Return a new range in the specified integer type, which must be /// strictly smaller than the current type. The returned range will /// correspond to the possible range of values as if the source range had been /// truncated to the specified type. ConstantRange ConstantRange::truncate(uint32_t DstTySize) const { assert(getBitWidth() > DstTySize && "Not a value truncation"); if (isEmptySet()) return ConstantRange(DstTySize, /*isFullSet=*/false); if (isFullSet()) return ConstantRange(DstTySize, /*isFullSet=*/true); APInt MaxValue = APInt::getMaxValue(DstTySize).zext(getBitWidth()); APInt MaxBitValue(getBitWidth(), 0); MaxBitValue.setBit(DstTySize); APInt LowerDiv(Lower), UpperDiv(Upper); ConstantRange Union(DstTySize, /*isFullSet=*/false); // Analyze wrapped sets in their two parts: [0, Upper) \/ [Lower, MaxValue] // We use the non-wrapped set code to analyze the [Lower, MaxValue) part, and // then we do the union with [MaxValue, Upper) if (isWrappedSet()) { // if Upper is greater than Max Value, it covers the whole truncated range. if (Upper.uge(MaxValue)) return ConstantRange(DstTySize, /*isFullSet=*/true); Union = ConstantRange(APInt::getMaxValue(DstTySize),Upper.trunc(DstTySize)); UpperDiv = APInt::getMaxValue(getBitWidth()); // Union covers the MaxValue case, so return if the remaining range is just // MaxValue. if (LowerDiv == UpperDiv) return Union; } // Chop off the most significant bits that are past the destination bitwidth. if (LowerDiv.uge(MaxValue)) { APInt Div(getBitWidth(), 0); APInt::udivrem(LowerDiv, MaxBitValue, Div, LowerDiv); UpperDiv = UpperDiv - MaxBitValue * Div; } if (UpperDiv.ule(MaxValue)) return ConstantRange(LowerDiv.trunc(DstTySize), UpperDiv.trunc(DstTySize)).unionWith(Union); // The truncated value wrapps around. Check if we can do better than fullset. APInt UpperModulo = UpperDiv - MaxBitValue; if (UpperModulo.ult(LowerDiv)) return ConstantRange(LowerDiv.trunc(DstTySize), UpperModulo.trunc(DstTySize)).unionWith(Union); return ConstantRange(DstTySize, /*isFullSet=*/true); }
/// isBytewiseValue - If the specified value can be set by repeating the same /// byte in memory, return the i8 value that it is represented with. This is /// true for all i8 values obviously, but is also true for i32 0, i32 -1, /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated /// byte store (e.g. i16 0x1234), return null. static Value *isBytewiseValue(Value *V) { LLVMContext &Context = V->getContext(); // All byte-wide stores are splatable, even of arbitrary variables. if (V->getType()->isIntegerTy(8)) return V; // Constant float and double values can be handled as integer values if the // corresponding integer value is "byteable". An important case is 0.0. if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { if (CFP->getType()->isFloatTy()) V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(Context)); if (CFP->getType()->isDoubleTy()) V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(Context)); // Don't handle long double formats, which have strange constraints. } // We can handle constant integers that are power of two in size and a // multiple of 8 bits. if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { unsigned Width = CI->getBitWidth(); if (isPowerOf2_32(Width) && Width > 8) { // We can handle this value if the recursive binary decomposition is the // same at all levels. APInt Val = CI->getValue(); APInt Val2; while (Val.getBitWidth() != 8) { unsigned NextWidth = Val.getBitWidth()/2; Val2 = Val.lshr(NextWidth); Val2.trunc(Val.getBitWidth()/2); Val.trunc(Val.getBitWidth()/2); // If the top/bottom halves aren't the same, reject it. if (Val != Val2) return 0; } return ConstantInt::get(Context, Val); } } // Conceptually, we could handle things like: // %a = zext i8 %X to i16 // %b = shl i16 %a, 8 // %c = or i16 %a, %b // but until there is an example that actually needs this, it doesn't seem // worth worrying about. return 0; }
llvm::Constant *irgen::emitConstantInt(IRGenModule &IGM, IntegerLiteralInst *ILI) { APInt value = ILI->getValue(); BuiltinIntegerWidth width = ILI->getType().castTo<BuiltinIntegerType>()->getWidth(); // The value may need truncation if its type had an abstract size. if (!width.isFixedWidth()) { assert(width.isPointerWidth() && "impossible width value"); unsigned pointerWidth = IGM.getPointerSize().getValueInBits(); assert(pointerWidth <= value.getBitWidth() && "lost precision at AST/SIL level?!"); if (pointerWidth < value.getBitWidth()) value = value.trunc(pointerWidth); } return llvm::ConstantInt::get(IGM.LLVMContext, value); }
void BDCE::determineLiveOperandBits(const Instruction *UserI, const Instruction *I, unsigned OperandNo, const APInt &AOut, APInt &AB, APInt &KnownZero, APInt &KnownOne, APInt &KnownZero2, APInt &KnownOne2) { unsigned BitWidth = AB.getBitWidth(); // We're called once per operand, but for some instructions, we need to // compute known bits of both operands in order to determine the live bits of // either (when both operands are instructions themselves). We don't, // however, want to do this twice, so we cache the result in APInts that live // in the caller. For the two-relevant-operands case, both operand values are // provided here. auto ComputeKnownBits = [&](unsigned BitWidth, const Value *V1, const Value *V2) { const DataLayout &DL = I->getModule()->getDataLayout(); KnownZero = APInt(BitWidth, 0); KnownOne = APInt(BitWidth, 0); computeKnownBits(const_cast<Value *>(V1), KnownZero, KnownOne, DL, 0, AC, UserI, DT); if (V2) { KnownZero2 = APInt(BitWidth, 0); KnownOne2 = APInt(BitWidth, 0); computeKnownBits(const_cast<Value *>(V2), KnownZero2, KnownOne2, DL, 0, AC, UserI, DT); } }; switch (UserI->getOpcode()) { default: break; case Instruction::Call: case Instruction::Invoke: if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(UserI)) switch (II->getIntrinsicID()) { default: break; case Intrinsic::bswap: // The alive bits of the input are the swapped alive bits of // the output. AB = AOut.byteSwap(); break; case Intrinsic::ctlz: if (OperandNo == 0) { // We need some output bits, so we need all bits of the // input to the left of, and including, the leftmost bit // known to be one. ComputeKnownBits(BitWidth, I, nullptr); AB = APInt::getHighBitsSet(BitWidth, std::min(BitWidth, KnownOne.countLeadingZeros()+1)); } break; case Intrinsic::cttz: if (OperandNo == 0) { // We need some output bits, so we need all bits of the // input to the right of, and including, the rightmost bit // known to be one. ComputeKnownBits(BitWidth, I, nullptr); AB = APInt::getLowBitsSet(BitWidth, std::min(BitWidth, KnownOne.countTrailingZeros()+1)); } break; } break; case Instruction::Add: case Instruction::Sub: // Find the highest live output bit. We don't need any more input // bits than that (adds, and thus subtracts, ripple only to the // left). AB = APInt::getLowBitsSet(BitWidth, AOut.getActiveBits()); break; case Instruction::Shl: if (OperandNo == 0) if (ConstantInt *CI = dyn_cast<ConstantInt>(UserI->getOperand(1))) { uint64_t ShiftAmt = CI->getLimitedValue(BitWidth-1); AB = AOut.lshr(ShiftAmt); // If the shift is nuw/nsw, then the high bits are not dead // (because we've promised that they *must* be zero). const ShlOperator *S = cast<ShlOperator>(UserI); if (S->hasNoSignedWrap()) AB |= APInt::getHighBitsSet(BitWidth, ShiftAmt+1); else if (S->hasNoUnsignedWrap()) AB |= APInt::getHighBitsSet(BitWidth, ShiftAmt); } break; case Instruction::LShr: if (OperandNo == 0) if (ConstantInt *CI = dyn_cast<ConstantInt>(UserI->getOperand(1))) { uint64_t ShiftAmt = CI->getLimitedValue(BitWidth-1); AB = AOut.shl(ShiftAmt); // If the shift is exact, then the low bits are not dead // (they must be zero). if (cast<LShrOperator>(UserI)->isExact()) AB |= APInt::getLowBitsSet(BitWidth, ShiftAmt); } break; case Instruction::AShr: if (OperandNo == 0) if (ConstantInt *CI = dyn_cast<ConstantInt>(UserI->getOperand(1))) { uint64_t ShiftAmt = CI->getLimitedValue(BitWidth-1); AB = AOut.shl(ShiftAmt); // Because the high input bit is replicated into the // high-order bits of the result, if we need any of those // bits, then we must keep the highest input bit. if ((AOut & APInt::getHighBitsSet(BitWidth, ShiftAmt)) .getBoolValue()) AB.setBit(BitWidth-1); // If the shift is exact, then the low bits are not dead // (they must be zero). if (cast<AShrOperator>(UserI)->isExact()) AB |= APInt::getLowBitsSet(BitWidth, ShiftAmt); } break; case Instruction::And: AB = AOut; // For bits that are known zero, the corresponding bits in the // other operand are dead (unless they're both zero, in which // case they can't both be dead, so just mark the LHS bits as // dead). if (OperandNo == 0) { ComputeKnownBits(BitWidth, I, UserI->getOperand(1)); AB &= ~KnownZero2; } else { if (!isa<Instruction>(UserI->getOperand(0))) ComputeKnownBits(BitWidth, UserI->getOperand(0), I); AB &= ~(KnownZero & ~KnownZero2); } break; case Instruction::Or: AB = AOut; // For bits that are known one, the corresponding bits in the // other operand are dead (unless they're both one, in which // case they can't both be dead, so just mark the LHS bits as // dead). if (OperandNo == 0) { ComputeKnownBits(BitWidth, I, UserI->getOperand(1)); AB &= ~KnownOne2; } else { if (!isa<Instruction>(UserI->getOperand(0))) ComputeKnownBits(BitWidth, UserI->getOperand(0), I); AB &= ~(KnownOne & ~KnownOne2); } break; case Instruction::Xor: case Instruction::PHI: AB = AOut; break; case Instruction::Trunc: AB = AOut.zext(BitWidth); break; case Instruction::ZExt: AB = AOut.trunc(BitWidth); break; case Instruction::SExt: AB = AOut.trunc(BitWidth); // Because the high input bit is replicated into the // high-order bits of the result, if we need any of those // bits, then we must keep the highest input bit. if ((AOut & APInt::getHighBitsSet(AOut.getBitWidth(), AOut.getBitWidth() - BitWidth)) .getBoolValue()) AB.setBit(BitWidth-1); break; case Instruction::Select: if (OperandNo != 0) AB = AOut; break; } }
/// ComputeMaskedBits - Determine which of the bits specified in Mask are /// known to be either zero or one and return them in the KnownZero/KnownOne /// bit sets. This code only analyzes bits in Mask, in order to short-circuit /// processing. /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that /// we cannot optimize based on the assumption that it is zero without changing /// it to be an explicit zero. If we don't change it to zero, other code could /// optimized based on the contradictory assumption that it is non-zero. /// Because instcombine aggressively folds operations with undef args anyway, /// this won't lose us code quality. void llvm::ComputeMaskedBits(Value *V, const APInt &Mask, APInt &KnownZero, APInt &KnownOne, TargetData *TD, unsigned Depth) { const unsigned MaxDepth = 6; assert(V && "No Value?"); assert(Depth <= MaxDepth && "Limit Search Depth"); unsigned BitWidth = Mask.getBitWidth(); assert((V->getType()->isInteger() || isa<PointerType>(V->getType())) && "Not integer or pointer type!"); assert((!TD || TD->getTypeSizeInBits(V->getType()) == BitWidth) && (!isa<IntegerType>(V->getType()) || V->getType()->getPrimitiveSizeInBits() == BitWidth) && KnownZero.getBitWidth() == BitWidth && KnownOne.getBitWidth() == BitWidth && "V, Mask, KnownOne and KnownZero should have same BitWidth"); if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { // We know all of the bits for a constant! KnownOne = CI->getValue() & Mask; KnownZero = ~KnownOne & Mask; return; } // Null is all-zeros. if (isa<ConstantPointerNull>(V)) { KnownOne.clear(); KnownZero = Mask; return; } // The address of an aligned GlobalValue has trailing zeros. if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { unsigned Align = GV->getAlignment(); if (Align == 0 && TD && GV->getType()->getElementType()->isSized()) Align = TD->getPrefTypeAlignment(GV->getType()->getElementType()); if (Align > 0) KnownZero = Mask & APInt::getLowBitsSet(BitWidth, CountTrailingZeros_32(Align)); else KnownZero.clear(); KnownOne.clear(); return; } KnownZero.clear(); KnownOne.clear(); // Start out not knowing anything. if (Depth == MaxDepth || Mask == 0) return; // Limit search depth. User *I = dyn_cast<User>(V); if (!I) return; APInt KnownZero2(KnownZero), KnownOne2(KnownOne); switch (getOpcode(I)) { default: break; case Instruction::And: { // If either the LHS or the RHS are Zero, the result is zero. ComputeMaskedBits(I->getOperand(1), Mask, KnownZero, KnownOne, TD, Depth+1); APInt Mask2(Mask & ~KnownZero); ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD, Depth+1); assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); // Output known-1 bits are only known if set in both the LHS & RHS. KnownOne &= KnownOne2; // Output known-0 are known to be clear if zero in either the LHS | RHS. KnownZero |= KnownZero2; return; } case Instruction::Or: { ComputeMaskedBits(I->getOperand(1), Mask, KnownZero, KnownOne, TD, Depth+1); APInt Mask2(Mask & ~KnownOne); ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD, Depth+1); assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); // Output known-0 bits are only known if clear in both the LHS & RHS. KnownZero &= KnownZero2; // Output known-1 are known to be set if set in either the LHS | RHS. KnownOne |= KnownOne2; return; } case Instruction::Xor: { ComputeMaskedBits(I->getOperand(1), Mask, KnownZero, KnownOne, TD, Depth+1); ComputeMaskedBits(I->getOperand(0), Mask, KnownZero2, KnownOne2, TD, Depth+1); assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); // Output known-0 bits are known if clear or set in both the LHS & RHS. APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); // Output known-1 are known to be set if set in only one of the LHS, RHS. KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); KnownZero = KnownZeroOut; return; } case Instruction::Mul: { APInt Mask2 = APInt::getAllOnesValue(BitWidth); ComputeMaskedBits(I->getOperand(1), Mask2, KnownZero, KnownOne, TD,Depth+1); ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD, Depth+1); assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); // If low bits are zero in either operand, output low known-0 bits. // Also compute a conserative estimate for high known-0 bits. // More trickiness is possible, but this is sufficient for the // interesting case of alignment computation. KnownOne.clear(); unsigned TrailZ = KnownZero.countTrailingOnes() + KnownZero2.countTrailingOnes(); unsigned LeadZ = std::max(KnownZero.countLeadingOnes() + KnownZero2.countLeadingOnes(), BitWidth) - BitWidth; TrailZ = std::min(TrailZ, BitWidth); LeadZ = std::min(LeadZ, BitWidth); KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) | APInt::getHighBitsSet(BitWidth, LeadZ); KnownZero &= Mask; return; } case Instruction::UDiv: { // For the purposes of computing leading zeros we can conservatively // treat a udiv as a logical right shift by the power of 2 known to // be less than the denominator. APInt AllOnes = APInt::getAllOnesValue(BitWidth); ComputeMaskedBits(I->getOperand(0), AllOnes, KnownZero2, KnownOne2, TD, Depth+1); unsigned LeadZ = KnownZero2.countLeadingOnes(); KnownOne2.clear(); KnownZero2.clear(); ComputeMaskedBits(I->getOperand(1), AllOnes, KnownZero2, KnownOne2, TD, Depth+1); unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros(); if (RHSUnknownLeadingOnes != BitWidth) LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSUnknownLeadingOnes - 1); KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ) & Mask; return; } case Instruction::Select: ComputeMaskedBits(I->getOperand(2), Mask, KnownZero, KnownOne, TD, Depth+1); ComputeMaskedBits(I->getOperand(1), Mask, KnownZero2, KnownOne2, TD, Depth+1); assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); // Only known if known in both the LHS and RHS. KnownOne &= KnownOne2; KnownZero &= KnownZero2; return; case Instruction::FPTrunc: case Instruction::FPExt: case Instruction::FPToUI: case Instruction::FPToSI: case Instruction::SIToFP: case Instruction::UIToFP: return; // Can't work with floating point. case Instruction::PtrToInt: case Instruction::IntToPtr: // We can't handle these if we don't know the pointer size. if (!TD) return; // FALL THROUGH and handle them the same as zext/trunc. case Instruction::ZExt: case Instruction::Trunc: { // Note that we handle pointer operands here because of inttoptr/ptrtoint // which fall through here. const Type *SrcTy = I->getOperand(0)->getType(); unsigned SrcBitWidth = TD ? TD->getTypeSizeInBits(SrcTy) : SrcTy->getPrimitiveSizeInBits(); APInt MaskIn(Mask); MaskIn.zextOrTrunc(SrcBitWidth); KnownZero.zextOrTrunc(SrcBitWidth); KnownOne.zextOrTrunc(SrcBitWidth); ComputeMaskedBits(I->getOperand(0), MaskIn, KnownZero, KnownOne, TD, Depth+1); KnownZero.zextOrTrunc(BitWidth); KnownOne.zextOrTrunc(BitWidth); // Any top bits are known to be zero. if (BitWidth > SrcBitWidth) KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); return; } case Instruction::BitCast: { const Type *SrcTy = I->getOperand(0)->getType(); if (SrcTy->isInteger() || isa<PointerType>(SrcTy)) { ComputeMaskedBits(I->getOperand(0), Mask, KnownZero, KnownOne, TD, Depth+1); return; } break; } case Instruction::SExt: { // Compute the bits in the result that are not present in the input. const IntegerType *SrcTy = cast<IntegerType>(I->getOperand(0)->getType()); unsigned SrcBitWidth = SrcTy->getBitWidth(); APInt MaskIn(Mask); MaskIn.trunc(SrcBitWidth); KnownZero.trunc(SrcBitWidth); KnownOne.trunc(SrcBitWidth); ComputeMaskedBits(I->getOperand(0), MaskIn, KnownZero, KnownOne, TD, Depth+1); assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); KnownZero.zext(BitWidth); KnownOne.zext(BitWidth); // If the sign bit of the input is known set or clear, then we know the // top bits of the result. if (KnownZero[SrcBitWidth-1]) // Input sign bit known zero KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); else if (KnownOne[SrcBitWidth-1]) // Input sign bit known set KnownOne |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); return; } case Instruction::Shl: // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { uint64_t ShiftAmt = SA->getLimitedValue(BitWidth); APInt Mask2(Mask.lshr(ShiftAmt)); ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero, KnownOne, TD, Depth+1); assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); KnownZero <<= ShiftAmt; KnownOne <<= ShiftAmt; KnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt); // low bits known 0 return; } break; case Instruction::LShr: // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { // Compute the new bits that are at the top now. uint64_t ShiftAmt = SA->getLimitedValue(BitWidth); // Unsigned shift right. APInt Mask2(Mask.shl(ShiftAmt)); ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero,KnownOne, TD, Depth+1); assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?"); KnownZero = APIntOps::lshr(KnownZero, ShiftAmt); KnownOne = APIntOps::lshr(KnownOne, ShiftAmt); // high bits known zero. KnownZero |= APInt::getHighBitsSet(BitWidth, ShiftAmt); return; } break; case Instruction::AShr: // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { // Compute the new bits that are at the top now. uint64_t ShiftAmt = SA->getLimitedValue(BitWidth); // Signed shift right. APInt Mask2(Mask.shl(ShiftAmt)); ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero, KnownOne, TD, Depth+1); assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?"); KnownZero = APIntOps::lshr(KnownZero, ShiftAmt); KnownOne = APIntOps::lshr(KnownOne, ShiftAmt); APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt)); if (KnownZero[BitWidth-ShiftAmt-1]) // New bits are known zero. KnownZero |= HighBits; else if (KnownOne[BitWidth-ShiftAmt-1]) // New bits are known one. KnownOne |= HighBits; return; } break; case Instruction::Sub: { if (ConstantInt *CLHS = dyn_cast<ConstantInt>(I->getOperand(0))) { // We know that the top bits of C-X are clear if X contains less bits // than C (i.e. no wrap-around can happen). For example, 20-X is // positive if we can prove that X is >= 0 and < 16. if (!CLHS->getValue().isNegative()) { unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros(); // NLZ can't be BitWidth with no sign bit APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1); ComputeMaskedBits(I->getOperand(1), MaskV, KnownZero2, KnownOne2, TD, Depth+1); // If all of the MaskV bits are known to be zero, then we know the // output top bits are zero, because we now know that the output is // from [0-C]. if ((KnownZero2 & MaskV) == MaskV) { unsigned NLZ2 = CLHS->getValue().countLeadingZeros(); // Top bits known zero. KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2) & Mask; } } } } // fall through case Instruction::Add: { // If one of the operands has trailing zeros, than the bits that the // other operand has in those bit positions will be preserved in the // result. For an add, this works with either operand. For a subtract, // this only works if the known zeros are in the right operand. APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); APInt Mask2 = APInt::getLowBitsSet(BitWidth, BitWidth - Mask.countLeadingZeros()); ComputeMaskedBits(I->getOperand(0), Mask2, LHSKnownZero, LHSKnownOne, TD, Depth+1); assert((LHSKnownZero & LHSKnownOne) == 0 && "Bits known to be one AND zero?"); unsigned LHSKnownZeroOut = LHSKnownZero.countTrailingOnes(); ComputeMaskedBits(I->getOperand(1), Mask2, KnownZero2, KnownOne2, TD, Depth+1); assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); unsigned RHSKnownZeroOut = KnownZero2.countTrailingOnes(); // Determine which operand has more trailing zeros, and use that // many bits from the other operand. if (LHSKnownZeroOut > RHSKnownZeroOut) { if (getOpcode(I) == Instruction::Add) { APInt Mask = APInt::getLowBitsSet(BitWidth, LHSKnownZeroOut); KnownZero |= KnownZero2 & Mask; KnownOne |= KnownOne2 & Mask; } else { // If the known zeros are in the left operand for a subtract, // fall back to the minimum known zeros in both operands. KnownZero |= APInt::getLowBitsSet(BitWidth, std::min(LHSKnownZeroOut, RHSKnownZeroOut)); } } else if (RHSKnownZeroOut >= LHSKnownZeroOut) { APInt Mask = APInt::getLowBitsSet(BitWidth, RHSKnownZeroOut); KnownZero |= LHSKnownZero & Mask; KnownOne |= LHSKnownOne & Mask; } return; } case Instruction::SRem: if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { APInt RA = Rem->getValue(); if (RA.isPowerOf2() || (-RA).isPowerOf2()) { APInt LowBits = RA.isStrictlyPositive() ? (RA - 1) : ~RA; APInt Mask2 = LowBits | APInt::getSignBit(BitWidth); ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD, Depth+1); // If the sign bit of the first operand is zero, the sign bit of // the result is zero. If the first operand has no one bits below // the second operand's single 1 bit, its sign will be zero. if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits)) KnownZero2 |= ~LowBits; KnownZero |= KnownZero2 & Mask; assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?"); } } break; case Instruction::URem: { if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { APInt RA = Rem->getValue(); if (RA.isPowerOf2()) { APInt LowBits = (RA - 1); APInt Mask2 = LowBits & Mask; KnownZero |= ~LowBits & Mask; ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero, KnownOne, TD, Depth+1); assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?"); break; } } // Since the result is less than or equal to either operand, any leading // zero bits in either operand must also exist in the result. APInt AllOnes = APInt::getAllOnesValue(BitWidth); ComputeMaskedBits(I->getOperand(0), AllOnes, KnownZero, KnownOne, TD, Depth+1); ComputeMaskedBits(I->getOperand(1), AllOnes, KnownZero2, KnownOne2, TD, Depth+1); unsigned Leaders = std::max(KnownZero.countLeadingOnes(), KnownZero2.countLeadingOnes()); KnownOne.clear(); KnownZero = APInt::getHighBitsSet(BitWidth, Leaders) & Mask; break; } case Instruction::Alloca: case Instruction::Malloc: { AllocationInst *AI = cast<AllocationInst>(V); unsigned Align = AI->getAlignment(); if (Align == 0 && TD) { if (isa<AllocaInst>(AI)) Align = TD->getABITypeAlignment(AI->getType()->getElementType()); else if (isa<MallocInst>(AI)) { // Malloc returns maximally aligned memory. Align = TD->getABITypeAlignment(AI->getType()->getElementType()); Align = std::max(Align, (unsigned)TD->getABITypeAlignment(Type::DoubleTy)); Align = std::max(Align, (unsigned)TD->getABITypeAlignment(Type::Int64Ty)); } } if (Align > 0) KnownZero = Mask & APInt::getLowBitsSet(BitWidth, CountTrailingZeros_32(Align)); break; } case Instruction::GetElementPtr: { // Analyze all of the subscripts of this getelementptr instruction // to determine if we can prove known low zero bits. APInt LocalMask = APInt::getAllOnesValue(BitWidth); APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0); ComputeMaskedBits(I->getOperand(0), LocalMask, LocalKnownZero, LocalKnownOne, TD, Depth+1); unsigned TrailZ = LocalKnownZero.countTrailingOnes(); gep_type_iterator GTI = gep_type_begin(I); for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { Value *Index = I->getOperand(i); if (const StructType *STy = dyn_cast<StructType>(*GTI)) { // Handle struct member offset arithmetic. if (!TD) return; const StructLayout *SL = TD->getStructLayout(STy); unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); uint64_t Offset = SL->getElementOffset(Idx); TrailZ = std::min(TrailZ, CountTrailingZeros_64(Offset)); } else { // Handle array index arithmetic. const Type *IndexedTy = GTI.getIndexedType(); if (!IndexedTy->isSized()) return; unsigned GEPOpiBits = Index->getType()->getPrimitiveSizeInBits(); uint64_t TypeSize = TD ? TD->getTypeAllocSize(IndexedTy) : 1; LocalMask = APInt::getAllOnesValue(GEPOpiBits); LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0); ComputeMaskedBits(Index, LocalMask, LocalKnownZero, LocalKnownOne, TD, Depth+1); TrailZ = std::min(TrailZ, unsigned(CountTrailingZeros_64(TypeSize) + LocalKnownZero.countTrailingOnes())); } } KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) & Mask; break; } case Instruction::PHI: { PHINode *P = cast<PHINode>(I); // Handle the case of a simple two-predecessor recurrence PHI. // There's a lot more that could theoretically be done here, but // this is sufficient to catch some interesting cases. if (P->getNumIncomingValues() == 2) { for (unsigned i = 0; i != 2; ++i) { Value *L = P->getIncomingValue(i); Value *R = P->getIncomingValue(!i); User *LU = dyn_cast<User>(L); if (!LU) continue; unsigned Opcode = getOpcode(LU); // Check for operations that have the property that if // both their operands have low zero bits, the result // will have low zero bits. if (Opcode == Instruction::Add || Opcode == Instruction::Sub || Opcode == Instruction::And || Opcode == Instruction::Or || Opcode == Instruction::Mul) { Value *LL = LU->getOperand(0); Value *LR = LU->getOperand(1); // Find a recurrence. if (LL == I) L = LR; else if (LR == I) L = LL; else break; // Ok, we have a PHI of the form L op= R. Check for low // zero bits. APInt Mask2 = APInt::getAllOnesValue(BitWidth); ComputeMaskedBits(R, Mask2, KnownZero2, KnownOne2, TD, Depth+1); Mask2 = APInt::getLowBitsSet(BitWidth, KnownZero2.countTrailingOnes()); // We need to take the minimum number of known bits APInt KnownZero3(KnownZero), KnownOne3(KnownOne); ComputeMaskedBits(L, Mask2, KnownZero3, KnownOne3, TD, Depth+1); KnownZero = Mask & APInt::getLowBitsSet(BitWidth, std::min(KnownZero2.countTrailingOnes(), KnownZero3.countTrailingOnes())); break; } } } // Otherwise take the unions of the known bit sets of the operands, // taking conservative care to avoid excessive recursion. if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) { KnownZero = APInt::getAllOnesValue(BitWidth); KnownOne = APInt::getAllOnesValue(BitWidth); for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i) { // Skip direct self references. if (P->getIncomingValue(i) == P) continue; KnownZero2 = APInt(BitWidth, 0); KnownOne2 = APInt(BitWidth, 0); // Recurse, but cap the recursion to one level, because we don't // want to waste time spinning around in loops. ComputeMaskedBits(P->getIncomingValue(i), KnownZero | KnownOne, KnownZero2, KnownOne2, TD, MaxDepth-1); KnownZero &= KnownZero2; KnownOne &= KnownOne2; // If all bits have been ruled out, there's no need to check // more operands. if (!KnownZero && !KnownOne) break; } } break; } case Instruction::Call: if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { switch (II->getIntrinsicID()) { default: break; case Intrinsic::ctpop: case Intrinsic::ctlz: case Intrinsic::cttz: { unsigned LowBits = Log2_32(BitWidth)+1; KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); break; } } } break; } }
SymbolicValue ConstExprFunctionState::computeConstantValueBuiltin(BuiltinInst *inst) { const BuiltinInfo &builtin = inst->getBuiltinInfo(); // Handle various cases in groups. auto unknownResult = [&]() -> SymbolicValue { return evaluator.getUnknown(SILValue(inst), UnknownReason::Default); }; // Unary operations. if (inst->getNumOperands() == 1) { auto operand = getConstantValue(inst->getOperand(0)); // TODO: Could add a "value used here" sort of diagnostic. if (!operand.isConstant()) return operand; // TODO: SUCheckedConversion/USCheckedConversion // Implement support for s_to_s_checked_trunc_Int2048_Int64 and other // checking integer truncates. These produce a tuple of the result value // and an overflow bit. // // TODO: We can/should diagnose statically detectable integer overflow // errors and subsume the ConstantFolding.cpp mandatory SIL pass. auto IntCheckedTruncFn = [&](bool srcSigned, bool dstSigned) -> SymbolicValue { if (operand.getKind() != SymbolicValue::Integer) return unknownResult(); auto operandVal = operand.getIntegerValue(); uint32_t srcBitWidth = operandVal.getBitWidth(); auto dstBitWidth = builtin.Types[1]->castTo<BuiltinIntegerType>()->getGreatestWidth(); APInt result = operandVal.trunc(dstBitWidth); // Compute the overflow by re-extending the value back to its source and // checking for loss of value. APInt reextended = dstSigned ? result.sext(srcBitWidth) : result.zext(srcBitWidth); bool overflowed = (operandVal != reextended); if (!srcSigned && dstSigned) overflowed |= result.isSignBitSet(); if (overflowed) return evaluator.getUnknown(SILValue(inst), UnknownReason::Overflow); auto &astContext = evaluator.getASTContext(); // Build the Symbolic value result for our truncated value. return SymbolicValue::getAggregate( {SymbolicValue::getInteger(result, astContext), SymbolicValue::getInteger(APInt(1, overflowed), astContext)}, astContext); }; switch (builtin.ID) { default: break; case BuiltinValueKind::SToSCheckedTrunc: return IntCheckedTruncFn(true, true); case BuiltinValueKind::UToSCheckedTrunc: return IntCheckedTruncFn(false, true); case BuiltinValueKind::SToUCheckedTrunc: return IntCheckedTruncFn(true, false); case BuiltinValueKind::UToUCheckedTrunc: return IntCheckedTruncFn(false, false); case BuiltinValueKind::Trunc: case BuiltinValueKind::TruncOrBitCast: case BuiltinValueKind::ZExt: case BuiltinValueKind::ZExtOrBitCast: case BuiltinValueKind::SExt: case BuiltinValueKind::SExtOrBitCast: { if (operand.getKind() != SymbolicValue::Integer) return unknownResult(); unsigned destBitWidth = inst->getType().castTo<BuiltinIntegerType>()->getGreatestWidth(); APInt result = operand.getIntegerValue(); if (result.getBitWidth() != destBitWidth) { switch (builtin.ID) { default: assert(0 && "Unknown case"); case BuiltinValueKind::Trunc: case BuiltinValueKind::TruncOrBitCast: result = result.trunc(destBitWidth); break; case BuiltinValueKind::ZExt: case BuiltinValueKind::ZExtOrBitCast: result = result.zext(destBitWidth); break; case BuiltinValueKind::SExt: case BuiltinValueKind::SExtOrBitCast: result = result.sext(destBitWidth); break; } } return SymbolicValue::getInteger(result, evaluator.getASTContext()); } } } // Binary operations. if (inst->getNumOperands() == 2) { auto operand0 = getConstantValue(inst->getOperand(0)); auto operand1 = getConstantValue(inst->getOperand(1)); if (!operand0.isConstant()) return operand0; if (!operand1.isConstant()) return operand1; auto constFoldIntCompare = [&](const std::function<bool(const APInt &, const APInt &)> &fn) -> SymbolicValue { if (operand0.getKind() != SymbolicValue::Integer || operand1.getKind() != SymbolicValue::Integer) return unknownResult(); auto result = fn(operand0.getIntegerValue(), operand1.getIntegerValue()); return SymbolicValue::getInteger(APInt(1, result), evaluator.getASTContext()); }; #define REQUIRE_KIND(KIND) \ if (operand0.getKind() != SymbolicValue::KIND || \ operand1.getKind() != SymbolicValue::KIND) \ return unknownResult(); switch (builtin.ID) { default: break; #define INT_BINOP(OPCODE, EXPR) \ case BuiltinValueKind::OPCODE: { \ REQUIRE_KIND(Integer) \ auto l = operand0.getIntegerValue(), r = operand1.getIntegerValue(); \ return SymbolicValue::getInteger((EXPR), evaluator.getASTContext()); \ } INT_BINOP(Add, l + r) INT_BINOP(And, l & r) INT_BINOP(AShr, l.ashr(r)) INT_BINOP(LShr, l.lshr(r)) INT_BINOP(Or, l | r) INT_BINOP(Mul, l * r) INT_BINOP(SDiv, l.sdiv(r)) INT_BINOP(Shl, l << r) INT_BINOP(SRem, l.srem(r)) INT_BINOP(Sub, l - r) INT_BINOP(UDiv, l.udiv(r)) INT_BINOP(URem, l.urem(r)) INT_BINOP(Xor, l ^ r) #undef INT_BINOP #define INT_COMPARE(OPCODE, EXPR) \ case BuiltinValueKind::OPCODE: \ REQUIRE_KIND(Integer) \ return constFoldIntCompare( \ [&](const APInt &l, const APInt &r) -> bool { return (EXPR); }) INT_COMPARE(ICMP_EQ, l == r); INT_COMPARE(ICMP_NE, l != r); INT_COMPARE(ICMP_SLT, l.slt(r)); INT_COMPARE(ICMP_SGT, l.sgt(r)); INT_COMPARE(ICMP_SLE, l.sle(r)); INT_COMPARE(ICMP_SGE, l.sge(r)); INT_COMPARE(ICMP_ULT, l.ult(r)); INT_COMPARE(ICMP_UGT, l.ugt(r)); INT_COMPARE(ICMP_ULE, l.ule(r)); INT_COMPARE(ICMP_UGE, l.uge(r)); #undef INT_COMPARE #undef REQUIRE_KIND } } // Three operand builtins. if (inst->getNumOperands() == 3) { auto operand0 = getConstantValue(inst->getOperand(0)); auto operand1 = getConstantValue(inst->getOperand(1)); auto operand2 = getConstantValue(inst->getOperand(2)); if (!operand0.isConstant()) return operand0; if (!operand1.isConstant()) return operand1; if (!operand2.isConstant()) return operand2; // Overflowing integer operations like sadd_with_overflow take three // operands: the last one is a "should report overflow" bit. auto constFoldIntOverflow = [&](const std::function<APInt(const APInt &, const APInt &, bool &)> &fn) -> SymbolicValue { if (operand0.getKind() != SymbolicValue::Integer || operand1.getKind() != SymbolicValue::Integer || operand2.getKind() != SymbolicValue::Integer) return unknownResult(); auto l = operand0.getIntegerValue(), r = operand1.getIntegerValue(); bool overflowed = false; auto result = fn(l, r, overflowed); // Return a statically diagnosed overflow if the operation is supposed to // trap on overflow. if (overflowed && !operand2.getIntegerValue().isNullValue()) return evaluator.getUnknown(SILValue(inst), UnknownReason::Overflow); auto &astContext = evaluator.getASTContext(); // Build the Symbolic value result for our normal and overflow bit. return SymbolicValue::getAggregate( {SymbolicValue::getInteger(result, astContext), SymbolicValue::getInteger(APInt(1, overflowed), astContext)}, astContext); }; switch (builtin.ID) { default: break; #define INT_OVERFLOW(OPCODE, METHOD) \ case BuiltinValueKind::OPCODE: \ return constFoldIntOverflow( \ [&](const APInt &l, const APInt &r, bool &overflowed) -> APInt { \ return l.METHOD(r, overflowed); \ }) INT_OVERFLOW(SAddOver, sadd_ov); INT_OVERFLOW(UAddOver, uadd_ov); INT_OVERFLOW(SSubOver, ssub_ov); INT_OVERFLOW(USubOver, usub_ov); INT_OVERFLOW(SMulOver, smul_ov); INT_OVERFLOW(UMulOver, umul_ov); #undef INT_OVERFLOW } } LLVM_DEBUG(llvm::dbgs() << "ConstExpr Unknown Builtin: " << *inst << "\n"); // Otherwise, we don't know how to handle this builtin. return unknownResult(); }
static SILInstruction * constantFoldAndCheckIntegerConversions(BuiltinInst *BI, const BuiltinInfo &Builtin, Optional<bool> &ResultsInError) { assert(Builtin.ID == BuiltinValueKind::SToSCheckedTrunc || Builtin.ID == BuiltinValueKind::UToUCheckedTrunc || Builtin.ID == BuiltinValueKind::SToUCheckedTrunc || Builtin.ID == BuiltinValueKind::UToSCheckedTrunc || Builtin.ID == BuiltinValueKind::SUCheckedConversion || Builtin.ID == BuiltinValueKind::USCheckedConversion); // Check if we are converting a constant integer. OperandValueArrayRef Args = BI->getArguments(); auto *V = dyn_cast<IntegerLiteralInst>(Args[0]); if (!V) return nullptr; APInt SrcVal = V->getValue(); // Get source type and bit width. Type SrcTy = Builtin.Types[0]; uint32_t SrcBitWidth = Builtin.Types[0]->castTo<BuiltinIntegerType>()->getGreatestWidth(); // Compute the destination (for SrcBitWidth < DestBitWidth) and enough info // to check for overflow. APInt Result; bool OverflowError; Type DstTy; // Process conversions signed <-> unsigned for same size integers. if (Builtin.ID == BuiltinValueKind::SUCheckedConversion || Builtin.ID == BuiltinValueKind::USCheckedConversion) { DstTy = SrcTy; Result = SrcVal; // Report an error if the sign bit is set. OverflowError = SrcVal.isNegative(); // Process truncation from unsigned to signed. } else if (Builtin.ID != BuiltinValueKind::UToSCheckedTrunc) { assert(Builtin.Types.size() == 2); DstTy = Builtin.Types[1]; uint32_t DstBitWidth = DstTy->castTo<BuiltinIntegerType>()->getGreatestWidth(); // Result = trunc_IntFrom_IntTo(Val) // For signed destination: // sext_IntFrom(Result) == Val ? Result : overflow_error // For signed destination: // zext_IntFrom(Result) == Val ? Result : overflow_error Result = SrcVal.trunc(DstBitWidth); // Get the signedness of the destination. bool Signed = (Builtin.ID == BuiltinValueKind::SToSCheckedTrunc); APInt Ext = Signed ? Result.sext(SrcBitWidth) : Result.zext(SrcBitWidth); OverflowError = (SrcVal != Ext); // Process the rest of truncations. } else { assert(Builtin.Types.size() == 2); DstTy = Builtin.Types[1]; uint32_t DstBitWidth = Builtin.Types[1]->castTo<BuiltinIntegerType>()->getGreatestWidth(); // Compute the destination (for SrcBitWidth < DestBitWidth): // Result = trunc_IntTo(Val) // Trunc = trunc_'IntTo-1bit'(Val) // zext_IntFrom(Trunc) == Val ? Result : overflow_error Result = SrcVal.trunc(DstBitWidth); APInt TruncVal = SrcVal.trunc(DstBitWidth - 1); OverflowError = (SrcVal != TruncVal.zext(SrcBitWidth)); } // Check for overflow. if (OverflowError) { // If we are not asked to emit overflow diagnostics, just return nullptr on // overflow. if (!ResultsInError.hasValue()) return nullptr; SILLocation Loc = BI->getLoc(); SILModule &M = BI->getModule(); const ApplyExpr *CE = Loc.getAsASTNode<ApplyExpr>(); Type UserSrcTy; Type UserDstTy; // Primitive heuristics to get the user-written type. // Eventually we might be able to use SILLocation (when it contains info // about inlined call chains). if (CE) { if (const TupleType *RTy = CE->getArg()->getType()->getAs<TupleType>()) { if (RTy->getNumElements() == 1) { UserSrcTy = RTy->getElementType(0); UserDstTy = CE->getType(); } } else { UserSrcTy = CE->getArg()->getType(); UserDstTy = CE->getType(); } } // Assume that we are converting from a literal if the Source size is // 2048. Is there a better way to identify conversions from literals? bool Literal = (SrcBitWidth == 2048); // FIXME: This will prevent hard error in cases the error is coming // from ObjC interoperability code. Currently, we treat NSUInteger as // Int. if (Loc.getSourceLoc().isInvalid()) { // Otherwise emit the appropriate diagnostic and set ResultsInError. if (Literal) diagnose(M.getASTContext(), Loc.getSourceLoc(), diag::integer_literal_overflow_warn, UserDstTy.isNull() ? DstTy : UserDstTy); else diagnose(M.getASTContext(), Loc.getSourceLoc(), diag::integer_conversion_overflow_warn, UserSrcTy.isNull() ? SrcTy : UserSrcTy, UserDstTy.isNull() ? DstTy : UserDstTy); ResultsInError = Optional<bool>(true); return nullptr; } // Otherwise report the overflow error. if (Literal) { bool SrcTySigned, DstTySigned; std::tie(SrcTySigned, DstTySigned) = getTypeSignedness(Builtin); SmallString<10> SrcAsString; SrcVal.toString(SrcAsString, /*radix*/10, SrcTySigned); // Try to print user-visible types if they are available. if (!UserDstTy.isNull()) { auto diagID = diag::integer_literal_overflow; // If this is a negative literal in an unsigned type, use a specific // diagnostic. if (SrcTySigned && !DstTySigned && SrcVal.isNegative()) diagID = diag::negative_integer_literal_overflow_unsigned; diagnose(M.getASTContext(), Loc.getSourceLoc(), diagID, UserDstTy, SrcAsString); // Otherwise, print the Builtin Types. } else { bool SrcTySigned, DstTySigned; std::tie(SrcTySigned, DstTySigned) = getTypeSignedness(Builtin); diagnose(M.getASTContext(), Loc.getSourceLoc(), diag::integer_literal_overflow_builtin_types, DstTySigned, DstTy, SrcAsString); } } else { if (Builtin.ID == BuiltinValueKind::SUCheckedConversion) { diagnose(M.getASTContext(), Loc.getSourceLoc(), diag::integer_conversion_sign_error, UserDstTy.isNull() ? DstTy : UserDstTy); } else { // Try to print user-visible types if they are available. if (!UserSrcTy.isNull()) { diagnose(M.getASTContext(), Loc.getSourceLoc(), diag::integer_conversion_overflow, UserSrcTy, UserDstTy); // Otherwise, print the Builtin Types. } else { // Since builtin types are sign-agnostic, print the signedness // separately. bool SrcTySigned, DstTySigned; std::tie(SrcTySigned, DstTySigned) = getTypeSignedness(Builtin); diagnose(M.getASTContext(), Loc.getSourceLoc(), diag::integer_conversion_overflow_builtin_types, SrcTySigned, SrcTy, DstTySigned, DstTy); } } } ResultsInError = Optional<bool>(true); return nullptr; } // The call to the builtin should be replaced with the constant value. return constructResultWithOverflowTuple(BI, Result, false); }