/// signExtend - Return a new range in the specified integer type, which must /// be strictly larger than the current type. The returned range will /// correspond to the possible range of values as if the source range had been /// sign extended. ConstantRange ConstantRange::signExtend(uint32_t DstTySize) const { unsigned SrcTySize = getBitWidth(); assert(SrcTySize < DstTySize && "Not a value extension"); if (isFullSet()) { return ConstantRange(APInt::getHighBitsSet(DstTySize,DstTySize-SrcTySize+1), APInt::getLowBitsSet(DstTySize, SrcTySize-1) + 1); } APInt L = Lower; L.sext(DstTySize); APInt U = Upper; U.sext(DstTySize); return ConstantRange(L, U); }
int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { assert(Ty->isIntegerTy()); unsigned BitSize = Ty->getPrimitiveSizeInBits(); if (BitSize == 0) return ~0U; // Never hoist constants larger than 128bit, because this might lead to // incorrect code generation or assertions in codegen. // Fixme: Create a cost model for types larger than i128 once the codegen // issues have been fixed. if (BitSize > 128) return TTI::TCC_Free; if (Imm == 0) return TTI::TCC_Free; // Sign-extend all constants to a multiple of 64-bit. APInt ImmVal = Imm; if (BitSize & 0x3f) ImmVal = Imm.sext((BitSize + 63) & ~0x3fU); // Split the constant into 64-bit chunks and calculate the cost for each // chunk. int Cost = 0; for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); int64_t Val = Tmp.getSExtValue(); Cost += getIntImmCost(Val); } // We need at least one instruction to materialze the constant. return std::max(1, Cost); }
APInt swift::constantFoldCast(APInt val, const BuiltinInfo &BI) { // Get the cast result. Type SrcTy = BI.Types[0]; Type DestTy = BI.Types.size() == 2 ? BI.Types[1] : Type(); uint32_t SrcBitWidth = SrcTy->castTo<BuiltinIntegerType>()->getGreatestWidth(); uint32_t DestBitWidth = DestTy->castTo<BuiltinIntegerType>()->getGreatestWidth(); APInt CastResV; if (SrcBitWidth == DestBitWidth) { return val; } else switch (BI.ID) { default : llvm_unreachable("Invalid case."); case BuiltinValueKind::Trunc: case BuiltinValueKind::TruncOrBitCast: return val.trunc(DestBitWidth); case BuiltinValueKind::ZExt: case BuiltinValueKind::ZExtOrBitCast: return val.zext(DestBitWidth); break; case BuiltinValueKind::SExt: case BuiltinValueKind::SExtOrBitCast: return val.sext(DestBitWidth); } }
/// MultiplyOverflows - True if the multiply can not be expressed in an int /// this size. static bool MultiplyOverflows(ConstantInt *C1, ConstantInt *C2, bool sign) { uint32_t W = C1->getBitWidth(); APInt LHSExt = C1->getValue(), RHSExt = C2->getValue(); if (sign) { LHSExt = LHSExt.sext(W * 2); RHSExt = RHSExt.sext(W * 2); } else { LHSExt = LHSExt.zext(W * 2); RHSExt = RHSExt.zext(W * 2); } APInt MulExt = LHSExt * RHSExt; if (!sign) return MulExt.ugt(APInt::getLowBitsSet(W * 2, W)); APInt Min = APInt::getSignedMinValue(W).sext(W * 2); APInt Max = APInt::getSignedMaxValue(W).sext(W * 2); return MulExt.slt(Min) || MulExt.sgt(Max); }
/// \brief Calculate the cost of materializing the given constant. int AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { assert(Ty->isIntegerTy()); unsigned BitSize = Ty->getPrimitiveSizeInBits(); if (BitSize == 0) return ~0U; // Sign-extend all constants to a multiple of 64-bit. APInt ImmVal = Imm; if (BitSize & 0x3f) ImmVal = Imm.sext((BitSize + 63) & ~0x3fU); // Split the constant into 64-bit chunks and calculate the cost for each // chunk. int Cost = 0; for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); int64_t Val = Tmp.getSExtValue(); Cost += getIntImmCost(Val); } // We need at least one instruction to materialze the constant. return std::max(1, Cost); }
SymbolicValue ConstExprFunctionState::computeConstantValueBuiltin(BuiltinInst *inst) { const BuiltinInfo &builtin = inst->getBuiltinInfo(); // Handle various cases in groups. auto unknownResult = [&]() -> SymbolicValue { return evaluator.getUnknown(SILValue(inst), UnknownReason::Default); }; // Unary operations. if (inst->getNumOperands() == 1) { auto operand = getConstantValue(inst->getOperand(0)); // TODO: Could add a "value used here" sort of diagnostic. if (!operand.isConstant()) return operand; // TODO: SUCheckedConversion/USCheckedConversion // Implement support for s_to_s_checked_trunc_Int2048_Int64 and other // checking integer truncates. These produce a tuple of the result value // and an overflow bit. // // TODO: We can/should diagnose statically detectable integer overflow // errors and subsume the ConstantFolding.cpp mandatory SIL pass. auto IntCheckedTruncFn = [&](bool srcSigned, bool dstSigned) -> SymbolicValue { if (operand.getKind() != SymbolicValue::Integer) return unknownResult(); auto operandVal = operand.getIntegerValue(); uint32_t srcBitWidth = operandVal.getBitWidth(); auto dstBitWidth = builtin.Types[1]->castTo<BuiltinIntegerType>()->getGreatestWidth(); APInt result = operandVal.trunc(dstBitWidth); // Compute the overflow by re-extending the value back to its source and // checking for loss of value. APInt reextended = dstSigned ? result.sext(srcBitWidth) : result.zext(srcBitWidth); bool overflowed = (operandVal != reextended); if (!srcSigned && dstSigned) overflowed |= result.isSignBitSet(); if (overflowed) return evaluator.getUnknown(SILValue(inst), UnknownReason::Overflow); auto &astContext = evaluator.getASTContext(); // Build the Symbolic value result for our truncated value. return SymbolicValue::getAggregate( {SymbolicValue::getInteger(result, astContext), SymbolicValue::getInteger(APInt(1, overflowed), astContext)}, astContext); }; switch (builtin.ID) { default: break; case BuiltinValueKind::SToSCheckedTrunc: return IntCheckedTruncFn(true, true); case BuiltinValueKind::UToSCheckedTrunc: return IntCheckedTruncFn(false, true); case BuiltinValueKind::SToUCheckedTrunc: return IntCheckedTruncFn(true, false); case BuiltinValueKind::UToUCheckedTrunc: return IntCheckedTruncFn(false, false); case BuiltinValueKind::Trunc: case BuiltinValueKind::TruncOrBitCast: case BuiltinValueKind::ZExt: case BuiltinValueKind::ZExtOrBitCast: case BuiltinValueKind::SExt: case BuiltinValueKind::SExtOrBitCast: { if (operand.getKind() != SymbolicValue::Integer) return unknownResult(); unsigned destBitWidth = inst->getType().castTo<BuiltinIntegerType>()->getGreatestWidth(); APInt result = operand.getIntegerValue(); if (result.getBitWidth() != destBitWidth) { switch (builtin.ID) { default: assert(0 && "Unknown case"); case BuiltinValueKind::Trunc: case BuiltinValueKind::TruncOrBitCast: result = result.trunc(destBitWidth); break; case BuiltinValueKind::ZExt: case BuiltinValueKind::ZExtOrBitCast: result = result.zext(destBitWidth); break; case BuiltinValueKind::SExt: case BuiltinValueKind::SExtOrBitCast: result = result.sext(destBitWidth); break; } } return SymbolicValue::getInteger(result, evaluator.getASTContext()); } } } // Binary operations. if (inst->getNumOperands() == 2) { auto operand0 = getConstantValue(inst->getOperand(0)); auto operand1 = getConstantValue(inst->getOperand(1)); if (!operand0.isConstant()) return operand0; if (!operand1.isConstant()) return operand1; auto constFoldIntCompare = [&](const std::function<bool(const APInt &, const APInt &)> &fn) -> SymbolicValue { if (operand0.getKind() != SymbolicValue::Integer || operand1.getKind() != SymbolicValue::Integer) return unknownResult(); auto result = fn(operand0.getIntegerValue(), operand1.getIntegerValue()); return SymbolicValue::getInteger(APInt(1, result), evaluator.getASTContext()); }; #define REQUIRE_KIND(KIND) \ if (operand0.getKind() != SymbolicValue::KIND || \ operand1.getKind() != SymbolicValue::KIND) \ return unknownResult(); switch (builtin.ID) { default: break; #define INT_BINOP(OPCODE, EXPR) \ case BuiltinValueKind::OPCODE: { \ REQUIRE_KIND(Integer) \ auto l = operand0.getIntegerValue(), r = operand1.getIntegerValue(); \ return SymbolicValue::getInteger((EXPR), evaluator.getASTContext()); \ } INT_BINOP(Add, l + r) INT_BINOP(And, l & r) INT_BINOP(AShr, l.ashr(r)) INT_BINOP(LShr, l.lshr(r)) INT_BINOP(Or, l | r) INT_BINOP(Mul, l * r) INT_BINOP(SDiv, l.sdiv(r)) INT_BINOP(Shl, l << r) INT_BINOP(SRem, l.srem(r)) INT_BINOP(Sub, l - r) INT_BINOP(UDiv, l.udiv(r)) INT_BINOP(URem, l.urem(r)) INT_BINOP(Xor, l ^ r) #undef INT_BINOP #define INT_COMPARE(OPCODE, EXPR) \ case BuiltinValueKind::OPCODE: \ REQUIRE_KIND(Integer) \ return constFoldIntCompare( \ [&](const APInt &l, const APInt &r) -> bool { return (EXPR); }) INT_COMPARE(ICMP_EQ, l == r); INT_COMPARE(ICMP_NE, l != r); INT_COMPARE(ICMP_SLT, l.slt(r)); INT_COMPARE(ICMP_SGT, l.sgt(r)); INT_COMPARE(ICMP_SLE, l.sle(r)); INT_COMPARE(ICMP_SGE, l.sge(r)); INT_COMPARE(ICMP_ULT, l.ult(r)); INT_COMPARE(ICMP_UGT, l.ugt(r)); INT_COMPARE(ICMP_ULE, l.ule(r)); INT_COMPARE(ICMP_UGE, l.uge(r)); #undef INT_COMPARE #undef REQUIRE_KIND } } // Three operand builtins. if (inst->getNumOperands() == 3) { auto operand0 = getConstantValue(inst->getOperand(0)); auto operand1 = getConstantValue(inst->getOperand(1)); auto operand2 = getConstantValue(inst->getOperand(2)); if (!operand0.isConstant()) return operand0; if (!operand1.isConstant()) return operand1; if (!operand2.isConstant()) return operand2; // Overflowing integer operations like sadd_with_overflow take three // operands: the last one is a "should report overflow" bit. auto constFoldIntOverflow = [&](const std::function<APInt(const APInt &, const APInt &, bool &)> &fn) -> SymbolicValue { if (operand0.getKind() != SymbolicValue::Integer || operand1.getKind() != SymbolicValue::Integer || operand2.getKind() != SymbolicValue::Integer) return unknownResult(); auto l = operand0.getIntegerValue(), r = operand1.getIntegerValue(); bool overflowed = false; auto result = fn(l, r, overflowed); // Return a statically diagnosed overflow if the operation is supposed to // trap on overflow. if (overflowed && !operand2.getIntegerValue().isNullValue()) return evaluator.getUnknown(SILValue(inst), UnknownReason::Overflow); auto &astContext = evaluator.getASTContext(); // Build the Symbolic value result for our normal and overflow bit. return SymbolicValue::getAggregate( {SymbolicValue::getInteger(result, astContext), SymbolicValue::getInteger(APInt(1, overflowed), astContext)}, astContext); }; switch (builtin.ID) { default: break; #define INT_OVERFLOW(OPCODE, METHOD) \ case BuiltinValueKind::OPCODE: \ return constFoldIntOverflow( \ [&](const APInt &l, const APInt &r, bool &overflowed) -> APInt { \ return l.METHOD(r, overflowed); \ }) INT_OVERFLOW(SAddOver, sadd_ov); INT_OVERFLOW(UAddOver, uadd_ov); INT_OVERFLOW(SSubOver, ssub_ov); INT_OVERFLOW(USubOver, usub_ov); INT_OVERFLOW(SMulOver, smul_ov); INT_OVERFLOW(UMulOver, umul_ov); #undef INT_OVERFLOW } } LLVM_DEBUG(llvm::dbgs() << "ConstExpr Unknown Builtin: " << *inst << "\n"); // Otherwise, we don't know how to handle this builtin. return unknownResult(); }
// A helper function that unifies the bitwidth of A and B. static void unifyBitWidth(APInt &A, APInt &B) { if (A.getBitWidth() < B.getBitWidth()) A = A.sext(B.getBitWidth()); else if (A.getBitWidth() > B.getBitWidth()) B = B.sext(A.getBitWidth()); }
static SILInstruction * constantFoldAndCheckIntegerConversions(BuiltinInst *BI, const BuiltinInfo &Builtin, Optional<bool> &ResultsInError) { assert(Builtin.ID == BuiltinValueKind::SToSCheckedTrunc || Builtin.ID == BuiltinValueKind::UToUCheckedTrunc || Builtin.ID == BuiltinValueKind::SToUCheckedTrunc || Builtin.ID == BuiltinValueKind::UToSCheckedTrunc || Builtin.ID == BuiltinValueKind::SUCheckedConversion || Builtin.ID == BuiltinValueKind::USCheckedConversion); // Check if we are converting a constant integer. OperandValueArrayRef Args = BI->getArguments(); auto *V = dyn_cast<IntegerLiteralInst>(Args[0]); if (!V) return nullptr; APInt SrcVal = V->getValue(); // Get source type and bit width. Type SrcTy = Builtin.Types[0]; uint32_t SrcBitWidth = Builtin.Types[0]->castTo<BuiltinIntegerType>()->getGreatestWidth(); // Compute the destination (for SrcBitWidth < DestBitWidth) and enough info // to check for overflow. APInt Result; bool OverflowError; Type DstTy; // Process conversions signed <-> unsigned for same size integers. if (Builtin.ID == BuiltinValueKind::SUCheckedConversion || Builtin.ID == BuiltinValueKind::USCheckedConversion) { DstTy = SrcTy; Result = SrcVal; // Report an error if the sign bit is set. OverflowError = SrcVal.isNegative(); // Process truncation from unsigned to signed. } else if (Builtin.ID != BuiltinValueKind::UToSCheckedTrunc) { assert(Builtin.Types.size() == 2); DstTy = Builtin.Types[1]; uint32_t DstBitWidth = DstTy->castTo<BuiltinIntegerType>()->getGreatestWidth(); // Result = trunc_IntFrom_IntTo(Val) // For signed destination: // sext_IntFrom(Result) == Val ? Result : overflow_error // For signed destination: // zext_IntFrom(Result) == Val ? Result : overflow_error Result = SrcVal.trunc(DstBitWidth); // Get the signedness of the destination. bool Signed = (Builtin.ID == BuiltinValueKind::SToSCheckedTrunc); APInt Ext = Signed ? Result.sext(SrcBitWidth) : Result.zext(SrcBitWidth); OverflowError = (SrcVal != Ext); // Process the rest of truncations. } else { assert(Builtin.Types.size() == 2); DstTy = Builtin.Types[1]; uint32_t DstBitWidth = Builtin.Types[1]->castTo<BuiltinIntegerType>()->getGreatestWidth(); // Compute the destination (for SrcBitWidth < DestBitWidth): // Result = trunc_IntTo(Val) // Trunc = trunc_'IntTo-1bit'(Val) // zext_IntFrom(Trunc) == Val ? Result : overflow_error Result = SrcVal.trunc(DstBitWidth); APInt TruncVal = SrcVal.trunc(DstBitWidth - 1); OverflowError = (SrcVal != TruncVal.zext(SrcBitWidth)); } // Check for overflow. if (OverflowError) { // If we are not asked to emit overflow diagnostics, just return nullptr on // overflow. if (!ResultsInError.hasValue()) return nullptr; SILLocation Loc = BI->getLoc(); SILModule &M = BI->getModule(); const ApplyExpr *CE = Loc.getAsASTNode<ApplyExpr>(); Type UserSrcTy; Type UserDstTy; // Primitive heuristics to get the user-written type. // Eventually we might be able to use SILLocation (when it contains info // about inlined call chains). if (CE) { if (const TupleType *RTy = CE->getArg()->getType()->getAs<TupleType>()) { if (RTy->getNumElements() == 1) { UserSrcTy = RTy->getElementType(0); UserDstTy = CE->getType(); } } else { UserSrcTy = CE->getArg()->getType(); UserDstTy = CE->getType(); } } // Assume that we are converting from a literal if the Source size is // 2048. Is there a better way to identify conversions from literals? bool Literal = (SrcBitWidth == 2048); // FIXME: This will prevent hard error in cases the error is coming // from ObjC interoperability code. Currently, we treat NSUInteger as // Int. if (Loc.getSourceLoc().isInvalid()) { // Otherwise emit the appropriate diagnostic and set ResultsInError. if (Literal) diagnose(M.getASTContext(), Loc.getSourceLoc(), diag::integer_literal_overflow_warn, UserDstTy.isNull() ? DstTy : UserDstTy); else diagnose(M.getASTContext(), Loc.getSourceLoc(), diag::integer_conversion_overflow_warn, UserSrcTy.isNull() ? SrcTy : UserSrcTy, UserDstTy.isNull() ? DstTy : UserDstTy); ResultsInError = Optional<bool>(true); return nullptr; } // Otherwise report the overflow error. if (Literal) { bool SrcTySigned, DstTySigned; std::tie(SrcTySigned, DstTySigned) = getTypeSignedness(Builtin); SmallString<10> SrcAsString; SrcVal.toString(SrcAsString, /*radix*/10, SrcTySigned); // Try to print user-visible types if they are available. if (!UserDstTy.isNull()) { auto diagID = diag::integer_literal_overflow; // If this is a negative literal in an unsigned type, use a specific // diagnostic. if (SrcTySigned && !DstTySigned && SrcVal.isNegative()) diagID = diag::negative_integer_literal_overflow_unsigned; diagnose(M.getASTContext(), Loc.getSourceLoc(), diagID, UserDstTy, SrcAsString); // Otherwise, print the Builtin Types. } else { bool SrcTySigned, DstTySigned; std::tie(SrcTySigned, DstTySigned) = getTypeSignedness(Builtin); diagnose(M.getASTContext(), Loc.getSourceLoc(), diag::integer_literal_overflow_builtin_types, DstTySigned, DstTy, SrcAsString); } } else { if (Builtin.ID == BuiltinValueKind::SUCheckedConversion) { diagnose(M.getASTContext(), Loc.getSourceLoc(), diag::integer_conversion_sign_error, UserDstTy.isNull() ? DstTy : UserDstTy); } else { // Try to print user-visible types if they are available. if (!UserSrcTy.isNull()) { diagnose(M.getASTContext(), Loc.getSourceLoc(), diag::integer_conversion_overflow, UserSrcTy, UserDstTy); // Otherwise, print the Builtin Types. } else { // Since builtin types are sign-agnostic, print the signedness // separately. bool SrcTySigned, DstTySigned; std::tie(SrcTySigned, DstTySigned) = getTypeSignedness(Builtin); diagnose(M.getASTContext(), Loc.getSourceLoc(), diag::integer_conversion_overflow_builtin_types, SrcTySigned, SrcTy, DstTySigned, DstTy); } } } ResultsInError = Optional<bool>(true); return nullptr; } // The call to the builtin should be replaced with the constant value. return constructResultWithOverflowTuple(BI, Result, false); }