// Constants smaller than 256 fit in the immediate field of // Thumb1 instructions so we return a zero cost and 1 otherwise. int ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty) { if (Imm.isNonNegative() && Imm.getLimitedValue() < 256) return 0; return 1; }
/// We do not support symbolic projections yet, only 32-bit unsigned integers. bool swift::getIntegerIndex(SILValue IndexVal, unsigned &IndexConst) { if (auto *IndexLiteral = dyn_cast<IntegerLiteralInst>(IndexVal)) { APInt ConstInt = IndexLiteral->getValue(); // IntegerLiterals are signed. if (ConstInt.isIntN(32) && ConstInt.isNonNegative()) { IndexConst = (unsigned)ConstInt.getSExtValue(); return true; } } return false; }
static bool isDereferenceableFromAttribute(const Value *BV, APInt Offset, Type *Ty, const DataLayout &DL, const Instruction *CtxI, const DominatorTree *DT, const TargetLibraryInfo *TLI) { assert(Offset.isNonNegative() && "offset can't be negative"); assert(Ty->isSized() && "must be sized"); APInt DerefBytes(Offset.getBitWidth(), 0); bool CheckForNonNull = false; if (const Argument *A = dyn_cast<Argument>(BV)) { DerefBytes = A->getDereferenceableBytes(); if (!DerefBytes.getBoolValue()) { DerefBytes = A->getDereferenceableOrNullBytes(); CheckForNonNull = true; } } else if (auto CS = ImmutableCallSite(BV)) { DerefBytes = CS.getDereferenceableBytes(0); if (!DerefBytes.getBoolValue()) { DerefBytes = CS.getDereferenceableOrNullBytes(0); CheckForNonNull = true; } } else if (const LoadInst *LI = dyn_cast<LoadInst>(BV)) { if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable)) { ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0)); DerefBytes = CI->getLimitedValue(); } if (!DerefBytes.getBoolValue()) { if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable_or_null)) { ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0)); DerefBytes = CI->getLimitedValue(); } CheckForNonNull = true; } } if (DerefBytes.getBoolValue()) if (DerefBytes.uge(Offset + DL.getTypeStoreSize(Ty))) if (!CheckForNonNull || isKnownNonNullAt(BV, CtxI, DT, TLI)) return true; return false; }
static Value *emit_untyped_intrinsic(jl_codectx_t &ctx, intrinsic f, Value **argvalues, size_t nargs, jl_datatype_t **newtyp, jl_value_t *xtyp) { Value *x = nargs > 0 ? argvalues[0] : NULL; Value *y = nargs > 1 ? argvalues[1] : NULL; Value *z = nargs > 2 ? argvalues[2] : NULL; Type *t = x->getType(); switch (f) { case neg_int: return ctx.builder.CreateNeg(x); case add_int: return ctx.builder.CreateAdd(x, y); case sub_int: return ctx.builder.CreateSub(x, y); case mul_int: return ctx.builder.CreateMul(x, y); case sdiv_int: return ctx.builder.CreateSDiv(x, y); case udiv_int: return ctx.builder.CreateUDiv(x, y); case srem_int: return ctx.builder.CreateSRem(x, y); case urem_int: return ctx.builder.CreateURem(x, y); // LLVM will not fold ptrtoint+arithmetic+inttoptr to GEP. The reason for this // has to do with alias analysis. When adding two integers, either one of them // could be the pointer base. With getelementptr, it is clear which of the // operands is the pointer base. We also have this information at the julia // level. Thus, to not lose information, we need to have a separate intrinsic // for pointer arithmetic which lowers to getelementptr. case add_ptr: { return ctx.builder.CreatePtrToInt( ctx.builder.CreateGEP(T_int8, ctx.builder.CreateIntToPtr(x, T_pint8), y), t); } case sub_ptr: { return ctx.builder.CreatePtrToInt( ctx.builder.CreateGEP(T_int8, ctx.builder.CreateIntToPtr(x, T_pint8), ctx.builder.CreateNeg(y)), t); } // Implements IEEE negate. See issue #7868 case neg_float: return math_builder(ctx)().CreateFSub(ConstantFP::get(t, -0.0), x); case neg_float_fast: return math_builder(ctx, true)().CreateFNeg(x); case add_float: return math_builder(ctx)().CreateFAdd(x, y); case sub_float: return math_builder(ctx)().CreateFSub(x, y); case mul_float: return math_builder(ctx)().CreateFMul(x, y); case div_float: return math_builder(ctx)().CreateFDiv(x, y); case rem_float: return math_builder(ctx)().CreateFRem(x, y); case add_float_fast: return math_builder(ctx, true)().CreateFAdd(x, y); case sub_float_fast: return math_builder(ctx, true)().CreateFSub(x, y); case mul_float_fast: return math_builder(ctx, true)().CreateFMul(x, y); case div_float_fast: return math_builder(ctx, true)().CreateFDiv(x, y); case rem_float_fast: return math_builder(ctx, true)().CreateFRem(x, y); case fma_float: { assert(y->getType() == x->getType()); assert(z->getType() == y->getType()); Value *fmaintr = Intrinsic::getDeclaration(jl_Module, Intrinsic::fma, makeArrayRef(t)); return ctx.builder.CreateCall(fmaintr, {x, y, z}); } case muladd_float: { #if JL_LLVM_VERSION >= 50000 // LLVM 5.0 can create FMA in the backend for contractable fmul and fadd // Emitting fmul and fadd here since they are easier for other LLVM passes to // optimize. auto mathb = math_builder(ctx, false, true); return mathb().CreateFAdd(mathb().CreateFMul(x, y), z); #else assert(y->getType() == x->getType()); assert(z->getType() == y->getType()); Value *muladdintr = Intrinsic::getDeclaration(jl_Module, Intrinsic::fmuladd, makeArrayRef(t)); return ctx.builder.CreateCall(muladdintr, {x, y, z}); #endif } case checked_sadd_int: case checked_uadd_int: case checked_ssub_int: case checked_usub_int: case checked_smul_int: case checked_umul_int: { assert(x->getType() == y->getType()); Intrinsic::ID intr_id = (f == checked_sadd_int ? Intrinsic::sadd_with_overflow : (f == checked_uadd_int ? Intrinsic::uadd_with_overflow : (f == checked_ssub_int ? Intrinsic::ssub_with_overflow : (f == checked_usub_int ? Intrinsic::usub_with_overflow : (f == checked_smul_int ? Intrinsic::smul_with_overflow : Intrinsic::umul_with_overflow))))); Value *intr = Intrinsic::getDeclaration(jl_Module, intr_id, makeArrayRef(t)); Value *res = ctx.builder.CreateCall(intr, {x, y}); Value *val = ctx.builder.CreateExtractValue(res, ArrayRef<unsigned>(0)); Value *obit = ctx.builder.CreateExtractValue(res, ArrayRef<unsigned>(1)); Value *obyte = ctx.builder.CreateZExt(obit, T_int8); jl_value_t *params[2]; params[0] = xtyp; params[1] = (jl_value_t*)jl_bool_type; jl_datatype_t *tuptyp = jl_apply_tuple_type_v(params, 2); *newtyp = tuptyp; Value *tupval; tupval = UndefValue::get(julia_type_to_llvm((jl_value_t*)tuptyp)); tupval = ctx.builder.CreateInsertValue(tupval, val, ArrayRef<unsigned>(0)); tupval = ctx.builder.CreateInsertValue(tupval, obyte, ArrayRef<unsigned>(1)); return tupval; } case checked_sdiv_int: { Value *typemin = ctx.builder.CreateShl(ConstantInt::get(t, 1), t->getPrimitiveSizeInBits() - 1); raise_exception_unless(ctx, ctx.builder.CreateAnd( ctx.builder.CreateICmpNE(y, ConstantInt::get(t, 0)), ctx.builder.CreateOr( ctx.builder.CreateICmpNE(y, ConstantInt::get(t, -1, true)), ctx.builder.CreateICmpNE(x, typemin))), literal_pointer_val(ctx, jl_diverror_exception)); return ctx.builder.CreateSDiv(x, y); } case checked_udiv_int: raise_exception_unless(ctx, ctx.builder.CreateICmpNE(y, ConstantInt::get(t, 0)), literal_pointer_val(ctx, jl_diverror_exception)); return ctx.builder.CreateUDiv(x, y); case checked_srem_int: return emit_checked_srem_int(ctx, x, y); case checked_urem_int: raise_exception_unless(ctx, ctx.builder.CreateICmpNE(y, ConstantInt::get(t, 0)), literal_pointer_val(ctx, jl_diverror_exception)); return ctx.builder.CreateURem(x, y); case eq_int: *newtyp = jl_bool_type; return ctx.builder.CreateICmpEQ(x, y); case ne_int: *newtyp = jl_bool_type; return ctx.builder.CreateICmpNE(x, y); case slt_int: *newtyp = jl_bool_type; return ctx.builder.CreateICmpSLT(x, y); case ult_int: *newtyp = jl_bool_type; return ctx.builder.CreateICmpULT(x, y); case sle_int: *newtyp = jl_bool_type; return ctx.builder.CreateICmpSLE(x, y); case ule_int: *newtyp = jl_bool_type; return ctx.builder.CreateICmpULE(x, y); case eq_float: *newtyp = jl_bool_type; return math_builder(ctx)().CreateFCmpOEQ(x, y); case ne_float: *newtyp = jl_bool_type; return math_builder(ctx)().CreateFCmpUNE(x, y); case lt_float: *newtyp = jl_bool_type; return math_builder(ctx)().CreateFCmpOLT(x, y); case le_float: *newtyp = jl_bool_type; return math_builder(ctx)().CreateFCmpOLE(x, y); case eq_float_fast: *newtyp = jl_bool_type; return math_builder(ctx, true)().CreateFCmpOEQ(x, y); case ne_float_fast: *newtyp = jl_bool_type; return math_builder(ctx, true)().CreateFCmpUNE(x, y); case lt_float_fast: *newtyp = jl_bool_type; return math_builder(ctx, true)().CreateFCmpOLT(x, y); case le_float_fast: *newtyp = jl_bool_type; return math_builder(ctx, true)().CreateFCmpOLE(x, y); case fpiseq: { *newtyp = jl_bool_type; Type *it = INTT(t); Value *xi = ctx.builder.CreateBitCast(x, it); Value *yi = ctx.builder.CreateBitCast(y, it); return ctx.builder.CreateOr(ctx.builder.CreateAnd(ctx.builder.CreateFCmpUNO(x, x), ctx.builder.CreateFCmpUNO(y, y)), ctx.builder.CreateICmpEQ(xi, yi)); } case fpislt: { *newtyp = jl_bool_type; Type *it = INTT(t); Value *xi = ctx.builder.CreateBitCast(x, it); Value *yi = ctx.builder.CreateBitCast(y, it); return ctx.builder.CreateOr( ctx.builder.CreateAnd( ctx.builder.CreateFCmpORD(x, x), ctx.builder.CreateFCmpUNO(y, y)), ctx.builder.CreateAnd( ctx.builder.CreateFCmpORD(x, y), ctx.builder.CreateOr( ctx.builder.CreateAnd( ctx.builder.CreateICmpSGE(xi, ConstantInt::get(it, 0)), ctx.builder.CreateICmpSLT(xi, yi)), ctx.builder.CreateAnd( ctx.builder.CreateICmpSLT(xi, ConstantInt::get(it, 0)), ctx.builder.CreateICmpUGT(xi, yi))))); } case and_int: return ctx.builder.CreateAnd(x, y); case or_int: return ctx.builder.CreateOr(x, y); case xor_int: return ctx.builder.CreateXor(x, y); case shl_int: return ctx.builder.CreateSelect( ctx.builder.CreateICmpUGE(y, ConstantInt::get(y->getType(), t->getPrimitiveSizeInBits())), ConstantInt::get(t, 0), ctx.builder.CreateShl(x, uint_cnvt(ctx, t, y))); case lshr_int: return ctx.builder.CreateSelect( ctx.builder.CreateICmpUGE(y, ConstantInt::get(y->getType(), t->getPrimitiveSizeInBits())), ConstantInt::get(t, 0), ctx.builder.CreateLShr(x, uint_cnvt(ctx, t, y))); case ashr_int: return ctx.builder.CreateSelect( ctx.builder.CreateICmpUGE(y, ConstantInt::get(y->getType(), t->getPrimitiveSizeInBits())), ctx.builder.CreateAShr(x, ConstantInt::get(t, t->getPrimitiveSizeInBits() - 1)), ctx.builder.CreateAShr(x, uint_cnvt(ctx, t, y))); case bswap_int: { Value *bswapintr = Intrinsic::getDeclaration(jl_Module, Intrinsic::bswap, makeArrayRef(t)); return ctx.builder.CreateCall(bswapintr, x); } case ctpop_int: { Value *ctpopintr = Intrinsic::getDeclaration(jl_Module, Intrinsic::ctpop, makeArrayRef(t)); return ctx.builder.CreateCall(ctpopintr, x); } case ctlz_int: { Value *ctlz = Intrinsic::getDeclaration(jl_Module, Intrinsic::ctlz, makeArrayRef(t)); y = ConstantInt::get(T_int1, 0); return ctx.builder.CreateCall(ctlz, {x, y}); } case cttz_int: { Value *cttz = Intrinsic::getDeclaration(jl_Module, Intrinsic::cttz, makeArrayRef(t)); y = ConstantInt::get(T_int1, 0); return ctx.builder.CreateCall(cttz, {x, y}); } case abs_float: { Value *absintr = Intrinsic::getDeclaration(jl_Module, Intrinsic::fabs, makeArrayRef(t)); return ctx.builder.CreateCall(absintr, x); } case copysign_float: { Value *bits = ctx.builder.CreateBitCast(x, t); Value *sbits = ctx.builder.CreateBitCast(y, t); unsigned nb = cast<IntegerType>(t)->getBitWidth(); APInt notsignbit = APInt::getSignedMaxValue(nb); APInt signbit0(nb, 0); signbit0.setBit(nb - 1); return ctx.builder.CreateOr( ctx.builder.CreateAnd(bits, ConstantInt::get(t, notsignbit)), ctx.builder.CreateAnd(sbits, ConstantInt::get(t, signbit0))); } case flipsign_int: { ConstantInt *cx = dyn_cast<ConstantInt>(x); ConstantInt *cy = dyn_cast<ConstantInt>(y); if (cx && cy) { APInt ix = cx->getValue(); APInt iy = cy->getValue(); return ConstantInt::get(t, iy.isNonNegative() ? ix : -ix); } if (cy) { APInt iy = cy->getValue(); return iy.isNonNegative() ? x : ctx.builder.CreateSub(ConstantInt::get(t, 0), x); } Value *tmp = ctx.builder.CreateAShr(y, ConstantInt::get(t, cast<IntegerType>(t)->getBitWidth() - 1)); return ctx.builder.CreateXor(ctx.builder.CreateAdd(x, tmp), tmp); } case ceil_llvm: { Value *ceilintr = Intrinsic::getDeclaration(jl_Module, Intrinsic::ceil, makeArrayRef(t)); return ctx.builder.CreateCall(ceilintr, x); } case floor_llvm: { Value *floorintr = Intrinsic::getDeclaration(jl_Module, Intrinsic::floor, makeArrayRef(t)); return ctx.builder.CreateCall(floorintr, x); } case trunc_llvm: { Value *truncintr = Intrinsic::getDeclaration(jl_Module, Intrinsic::trunc, makeArrayRef(t)); return ctx.builder.CreateCall(truncintr, x); } case rint_llvm: { Value *rintintr = Intrinsic::getDeclaration(jl_Module, Intrinsic::rint, makeArrayRef(t)); return ctx.builder.CreateCall(rintintr, x); } case sqrt_llvm: { Value *sqrtintr = Intrinsic::getDeclaration(jl_Module, Intrinsic::sqrt, makeArrayRef(t)); return ctx.builder.CreateCall(sqrtintr, x); } default: assert(0 && "invalid intrinsic"); abort(); } assert(0 && "unreachable"); }