void NVPTXFloatMCExpr::PrintImpl(raw_ostream &OS) const { bool Ignored; unsigned NumHex; APFloat APF = getAPFloat(); switch (Kind) { default: llvm_unreachable("Invalid kind!"); case VK_NVPTX_SINGLE_PREC_FLOAT: OS << "0f"; NumHex = 8; APF.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &Ignored); break; case VK_NVPTX_DOUBLE_PREC_FLOAT: OS << "0d"; NumHex = 16; APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &Ignored); break; } APInt API = APF.bitcastToAPInt(); std::string HexStr(utohexstr(API.getZExtValue())); if (HexStr.length() < NumHex) OS << std::string(NumHex - HexStr.length(), '0'); OS << utohexstr(API.getZExtValue()); }
int SystemZTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { assert(Ty->isIntegerTy()); unsigned BitSize = Ty->getPrimitiveSizeInBits(); // There is no cost model for constants with a bit size of 0. Return TCC_Free // here, so that constant hoisting will ignore this constant. if (BitSize == 0) return TTI::TCC_Free; // No cost model for operations on integers larger than 64 bit implemented yet. if (BitSize > 64) return TTI::TCC_Free; if (Imm == 0) return TTI::TCC_Free; if (Imm.getBitWidth() <= 64) { // Constants loaded via lgfi. if (isInt<32>(Imm.getSExtValue())) return TTI::TCC_Basic; // Constants loaded via llilf. if (isUInt<32>(Imm.getZExtValue())) return TTI::TCC_Basic; // Constants loaded via llihf: if ((Imm.getZExtValue() & 0xffffffff) == 0) return TTI::TCC_Basic; return 2 * TTI::TCC_Basic; } return 4 * TTI::TCC_Basic; }
void NVPTXFloatMCExpr::printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const { bool Ignored; unsigned NumHex; APFloat APF = getAPFloat(); switch (Kind) { default: llvm_unreachable("Invalid kind!"); case VK_NVPTX_HALF_PREC_FLOAT: // ptxas does not have a way to specify half-precision floats. // Instead we have to print and load fp16 constants as .b16 OS << "0x"; NumHex = 4; APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored); break; case VK_NVPTX_SINGLE_PREC_FLOAT: OS << "0f"; NumHex = 8; APF.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven, &Ignored); break; case VK_NVPTX_DOUBLE_PREC_FLOAT: OS << "0d"; NumHex = 16; APF.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &Ignored); break; } APInt API = APF.bitcastToAPInt(); std::string HexStr(utohexstr(API.getZExtValue())); if (HexStr.length() < NumHex) OS << std::string(NumHex - HexStr.length(), '0'); OS << utohexstr(API.getZExtValue()); }
int PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { if (DisablePPCConstHoist) return BaseT::getIntImmCost(Imm, Ty); assert(Ty->isIntegerTy()); unsigned BitSize = Ty->getPrimitiveSizeInBits(); if (BitSize == 0) return ~0U; if (Imm == 0) return TTI::TCC_Free; if (Imm.getBitWidth() <= 64) { if (isInt<16>(Imm.getSExtValue())) return TTI::TCC_Basic; if (isInt<32>(Imm.getSExtValue())) { // A constant that can be materialized using lis. if ((Imm.getZExtValue() & 0xFFFF) == 0) return TTI::TCC_Basic; return 2 * TTI::TCC_Basic; } } return 4 * TTI::TCC_Basic; }
void PTXInstPrinter::printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) { const MCOperand &Op = MI->getOperand(OpNo); if (Op.isImm()) { O << Op.getImm(); } else if (Op.isFPImm()) { double Imm = Op.getFPImm(); APFloat FPImm(Imm); APInt FPIntImm = FPImm.bitcastToAPInt(); O << "0D"; // PTX requires us to output the full 64 bits, even if the number is zero if (FPIntImm.getZExtValue() > 0) { O << FPIntImm.toString(16, false); } else { O << "0000000000000000"; } } else if (Op.isReg()) { printRegName(O, Op.getReg()); } else { assert(Op.isExpr() && "unknown operand kind in printOperand"); const MCExpr *Expr = Op.getExpr(); if (const MCSymbolRefExpr *SymRefExpr = dyn_cast<MCSymbolRefExpr>(Expr)) { const MCSymbol &Sym = SymRefExpr->getSymbol(); O << Sym.getName(); } else { O << *Op.getExpr(); } } }
int ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { assert(Ty->isIntegerTy()); unsigned Bits = Ty->getPrimitiveSizeInBits(); if (Bits == 0 || Imm.getActiveBits() >= 64) return 4; int64_t SImmVal = Imm.getSExtValue(); uint64_t ZImmVal = Imm.getZExtValue(); if (!ST->isThumb()) { if ((SImmVal >= 0 && SImmVal < 65536) || (ARM_AM::getSOImmVal(ZImmVal) != -1) || (ARM_AM::getSOImmVal(~ZImmVal) != -1)) return 1; return ST->hasV6T2Ops() ? 2 : 3; } if (ST->isThumb2()) { if ((SImmVal >= 0 && SImmVal < 65536) || (ARM_AM::getT2SOImmVal(ZImmVal) != -1) || (ARM_AM::getT2SOImmVal(~ZImmVal) != -1)) return 1; return ST->hasV6T2Ops() ? 2 : 3; } // Thumb1, any i8 imm cost 1. if (Bits == 8 || (SImmVal >= 0 && SImmVal < 256)) return 1; if ((~SImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal)) return 2; // Load from constantpool. return 3; }
unsigned ARMTTI::getIntImmCost(const APInt &Imm, Type *Ty) const { assert(Ty->isIntegerTy()); unsigned Bits = Ty->getPrimitiveSizeInBits(); if (Bits == 0 || Bits > 32) return 4; int32_t SImmVal = Imm.getSExtValue(); uint32_t ZImmVal = Imm.getZExtValue(); if (!ST->isThumb()) { if ((SImmVal >= 0 && SImmVal < 65536) || (ARM_AM::getSOImmVal(ZImmVal) != -1) || (ARM_AM::getSOImmVal(~ZImmVal) != -1)) return 1; return ST->hasV6T2Ops() ? 2 : 3; } else if (ST->isThumb2()) { if ((SImmVal >= 0 && SImmVal < 65536) || (ARM_AM::getT2SOImmVal(ZImmVal) != -1) || (ARM_AM::getT2SOImmVal(~ZImmVal) != -1)) return 1; return ST->hasV6T2Ops() ? 2 : 3; } else /*Thumb1*/ { if (SImmVal >= 0 && SImmVal < 256) return 1; if ((~ZImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal)) return 2; // Load from constantpool. return 3; } return 2; }
unsigned X86TTI::getIntImmCost(const APInt &Imm, Type *Ty) const { assert(Ty->isIntegerTy()); unsigned BitSize = Ty->getPrimitiveSizeInBits(); if (BitSize == 0) return ~0U; if (Imm.getBitWidth() <= 64 && (isInt<32>(Imm.getSExtValue()) || isUInt<32>(Imm.getZExtValue()))) return TCC_Basic; else return 2 * TCC_Basic; }
/// Return true if it is OK to use SIToFPInst for an induction variable /// with given initial and exit values. static bool useSIToFPInst(ConstantFP &InitV, ConstantFP &ExitV, uint64_t intIV, uint64_t intEV) { if (InitV.getValueAPF().isNegative() || ExitV.getValueAPF().isNegative()) return true; // If the iteration range can be handled by SIToFPInst then use it. APInt Max = APInt::getSignedMaxValue(32); if (Max.getZExtValue() > static_cast<uint64_t>(abs64(intEV - intIV))) return true; return false; }
int SystemZTTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty) { assert(Ty->isIntegerTy()); unsigned BitSize = Ty->getPrimitiveSizeInBits(); // There is no cost model for constants with a bit size of 0. Return TCC_Free // here, so that constant hoisting will ignore this constant. if (BitSize == 0) return TTI::TCC_Free; // No cost model for operations on integers larger than 64 bit implemented yet. if (BitSize > 64) return TTI::TCC_Free; switch (IID) { default: return TTI::TCC_Free; case Intrinsic::sadd_with_overflow: case Intrinsic::uadd_with_overflow: case Intrinsic::ssub_with_overflow: case Intrinsic::usub_with_overflow: // These get expanded to include a normal addition/subtraction. if (Idx == 1 && Imm.getBitWidth() <= 64) { if (isUInt<32>(Imm.getZExtValue())) return TTI::TCC_Free; if (isUInt<32>(-Imm.getSExtValue())) return TTI::TCC_Free; } break; case Intrinsic::smul_with_overflow: case Intrinsic::umul_with_overflow: // These get expanded to include a normal multiplication. if (Idx == 1 && Imm.getBitWidth() <= 64) { if (isInt<32>(Imm.getSExtValue())) return TTI::TCC_Free; } break; case Intrinsic::experimental_stackmap: if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) return TTI::TCC_Free; break; case Intrinsic::experimental_patchpoint_void: case Intrinsic::experimental_patchpoint_i64: if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) return TTI::TCC_Free; break; } return SystemZTTIImpl::getIntImmCost(Imm, Ty); }
error_code Archive::Child::getName(StringRef &Result) const { StringRef name = ToHeader(Data.data())->getName(); // Check if it's a special name. if (name[0] == '/') { if (name.size() == 1) { // Linker member. Result = name; return object_error::success; } if (name.size() == 2 && name[1] == '/') { // String table. Result = name; return object_error::success; } // It's a long name. // Get the offset. APInt offset; name.substr(1).getAsInteger(10, offset); const char *addr = Parent->StringTable->Data.begin() + sizeof(ArchiveMemberHeader) + offset.getZExtValue(); // Verify it. if (Parent->StringTable == Parent->end_children() || addr < (Parent->StringTable->Data.begin() + sizeof(ArchiveMemberHeader)) || addr > (Parent->StringTable->Data.begin() + sizeof(ArchiveMemberHeader) + Parent->StringTable->getSize())) return object_error::parse_failed; Result = addr; return object_error::success; } else if (name.startswith("#1/")) { APInt name_size; name.substr(3).getAsInteger(10, name_size); Result = Data.substr(0, name_size.getZExtValue()); return object_error::success; } // It's a simple name. if (name[name.size() - 1] == '/') Result = name.substr(0, name.size() - 1); else Result = name; return object_error::success; }
bool PointsToNodeFactory::matchGEPNode(const GEPOperator *I, const PointsToNode *N) const { if (const GEPPointsToNode *GEPNode = dyn_cast<GEPPointsToNode>(N)) { auto GEPNodeI = GEPNode->indices.begin(), GEPNodeE = GEPNode->indices.end(); for (auto Index = I->idx_begin(), E = I->idx_end(); Index != E; ++Index, ++GEPNodeI) { if (GEPNodeI == GEPNodeE) return false; ConstantInt *Int = cast<ConstantInt>(Index); APInt a = Int->getValue(), b = *GEPNodeI; // We can't compare a and b directly here because they might have // different bitwidths, so we assume that the values fit into 64 // bits and compare the zero-extended 64-bit values. if (a.getZExtValue() != b.getZExtValue()) return false; } return GEPNodeI == GEPNodeE; } return false; }
static std::string toString(const APFloat &FP) { // Print NaNs with custom payloads specially. if (FP.isNaN() && !FP.bitwiseIsEqual(APFloat::getQNaN(FP.getSemantics())) && !FP.bitwiseIsEqual(APFloat::getQNaN(FP.getSemantics(), /*Negative=*/true))) { APInt AI = FP.bitcastToAPInt(); return std::string(AI.isNegative() ? "-" : "") + "nan:0x" + utohexstr(AI.getZExtValue() & (AI.getBitWidth() == 32 ? INT64_C(0x007fffff) : INT64_C(0x000fffffffffffff)), /*LowerCase=*/true); } // Use C99's hexadecimal floating-point representation. static const size_t BufBytes = 128; char buf[BufBytes]; auto Written = FP.convertToHexString( buf, /*hexDigits=*/0, /*upperCase=*/false, APFloat::rmNearestTiesToEven); (void)Written; assert(Written != 0); assert(Written < BufBytes); return buf; }
int SystemZTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty) { assert(Ty->isIntegerTy()); unsigned BitSize = Ty->getPrimitiveSizeInBits(); // There is no cost model for constants with a bit size of 0. Return TCC_Free // here, so that constant hoisting will ignore this constant. if (BitSize == 0) return TTI::TCC_Free; // No cost model for operations on integers larger than 64 bit implemented yet. if (BitSize > 64) return TTI::TCC_Free; switch (Opcode) { default: return TTI::TCC_Free; case Instruction::GetElementPtr: // Always hoist the base address of a GetElementPtr. This prevents the // creation of new constants for every base constant that gets constant // folded with the offset. if (Idx == 0) return 2 * TTI::TCC_Basic; return TTI::TCC_Free; case Instruction::Store: if (Idx == 0 && Imm.getBitWidth() <= 64) { // Any 8-bit immediate store can by implemented via mvi. if (BitSize == 8) return TTI::TCC_Free; // 16-bit immediate values can be stored via mvhhi/mvhi/mvghi. if (isInt<16>(Imm.getSExtValue())) return TTI::TCC_Free; } break; case Instruction::ICmp: if (Idx == 1 && Imm.getBitWidth() <= 64) { // Comparisons against signed 32-bit immediates implemented via cgfi. if (isInt<32>(Imm.getSExtValue())) return TTI::TCC_Free; // Comparisons against unsigned 32-bit immediates implemented via clgfi. if (isUInt<32>(Imm.getZExtValue())) return TTI::TCC_Free; } break; case Instruction::Add: case Instruction::Sub: if (Idx == 1 && Imm.getBitWidth() <= 64) { // We use algfi/slgfi to add/subtract 32-bit unsigned immediates. if (isUInt<32>(Imm.getZExtValue())) return TTI::TCC_Free; // Or their negation, by swapping addition vs. subtraction. if (isUInt<32>(-Imm.getSExtValue())) return TTI::TCC_Free; } break; case Instruction::Mul: if (Idx == 1 && Imm.getBitWidth() <= 64) { // We use msgfi to multiply by 32-bit signed immediates. if (isInt<32>(Imm.getSExtValue())) return TTI::TCC_Free; } break; case Instruction::Or: case Instruction::Xor: if (Idx == 1 && Imm.getBitWidth() <= 64) { // Masks supported by oilf/xilf. if (isUInt<32>(Imm.getZExtValue())) return TTI::TCC_Free; // Masks supported by oihf/xihf. if ((Imm.getZExtValue() & 0xffffffff) == 0) return TTI::TCC_Free; } break; case Instruction::And: if (Idx == 1 && Imm.getBitWidth() <= 64) { // Any 32-bit AND operation can by implemented via nilf. if (BitSize <= 32) return TTI::TCC_Free; // 64-bit masks supported by nilf. if (isUInt<32>(~Imm.getZExtValue())) return TTI::TCC_Free; // 64-bit masks supported by nilh. if ((Imm.getZExtValue() & 0xffffffff) == 0xffffffff) return TTI::TCC_Free; // Some 64-bit AND operations can be implemented via risbg. const SystemZInstrInfo *TII = ST->getInstrInfo(); unsigned Start, End; if (TII->isRxSBGMask(Imm.getZExtValue(), BitSize, Start, End)) return TTI::TCC_Free; } break; case Instruction::Shl: case Instruction::LShr: case Instruction::AShr: // Always return TCC_Free for the shift value of a shift instruction. if (Idx == 1) return TTI::TCC_Free; break; case Instruction::UDiv: case Instruction::SDiv: case Instruction::URem: case Instruction::SRem: case Instruction::Trunc: case Instruction::ZExt: case Instruction::SExt: case Instruction::IntToPtr: case Instruction::PtrToInt: case Instruction::BitCast: case Instruction::PHI: case Instruction::Call: case Instruction::Select: case Instruction::Ret: case Instruction::Load: break; } return SystemZTTIImpl::getIntImmCost(Imm, Ty); }
void ELFWriter::EmitGlobalConstant(const Constant *CV, ELFSection &GblS) { const TargetData *TD = TM.getTargetData(); unsigned Size = TD->getTypeAllocSize(CV->getType()); if (const ConstantArray *CVA = dyn_cast<ConstantArray>(CV)) { for (unsigned i = 0, e = CVA->getNumOperands(); i != e; ++i) EmitGlobalConstant(CVA->getOperand(i), GblS); return; } else if (isa<ConstantAggregateZero>(CV)) { GblS.emitZeros(Size); return; } else if (const ConstantStruct *CVS = dyn_cast<ConstantStruct>(CV)) { EmitGlobalConstantStruct(CVS, GblS); return; } else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CV)) { APInt Val = CFP->getValueAPF().bitcastToAPInt(); if (CFP->getType()->isDoubleTy()) GblS.emitWord64(Val.getZExtValue()); else if (CFP->getType()->isFloatTy()) GblS.emitWord32(Val.getZExtValue()); else if (CFP->getType()->isX86_FP80Ty()) { unsigned PadSize = TD->getTypeAllocSize(CFP->getType())- TD->getTypeStoreSize(CFP->getType()); GblS.emitWordFP80(Val.getRawData(), PadSize); } else if (CFP->getType()->isPPC_FP128Ty()) llvm_unreachable("PPC_FP128Ty global emission not implemented"); return; } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) { if (Size == 1) GblS.emitByte(CI->getZExtValue()); else if (Size == 2) GblS.emitWord16(CI->getZExtValue()); else if (Size == 4) GblS.emitWord32(CI->getZExtValue()); else EmitGlobalConstantLargeInt(CI, GblS); return; } else if (const ConstantVector *CP = dyn_cast<ConstantVector>(CV)) { const VectorType *PTy = CP->getType(); for (unsigned I = 0, E = PTy->getNumElements(); I < E; ++I) EmitGlobalConstant(CP->getOperand(I), GblS); return; } else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV)) { // Resolve a constant expression which returns a (Constant, Offset) // pair. If 'Res.first' is a GlobalValue, emit a relocation with // the offset 'Res.second', otherwise emit a global constant like // it is always done for not contant expression types. CstExprResTy Res = ResolveConstantExpr(CE); const Constant *Op = Res.first; if (isa<GlobalValue>(Op)) EmitGlobalDataRelocation(cast<const GlobalValue>(Op), TD->getTypeAllocSize(Op->getType()), GblS, Res.second); else EmitGlobalConstant(Op, GblS); return; } else if (CV->getType()->getTypeID() == Type::PointerTyID) { // Fill the data entry with zeros or emit a relocation entry if (isa<ConstantPointerNull>(CV)) GblS.emitZeros(Size); else EmitGlobalDataRelocation(cast<const GlobalValue>(CV), Size, GblS); return; } else if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV)) { // This is a constant address for a global variable or function and // therefore must be referenced using a relocation entry. EmitGlobalDataRelocation(GV, Size, GblS); return; } std::string msg; raw_string_ostream ErrorMsg(msg); ErrorMsg << "Constant unimp for type: " << *CV->getType(); report_fatal_error(ErrorMsg.str()); }
int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty) { assert(Ty->isIntegerTy()); unsigned BitSize = Ty->getPrimitiveSizeInBits(); // There is no cost model for constants with a bit size of 0. Return TCC_Free // here, so that constant hoisting will ignore this constant. if (BitSize == 0) return TTI::TCC_Free; unsigned ImmIdx = ~0U; switch (Opcode) { default: return TTI::TCC_Free; case Instruction::GetElementPtr: // Always hoist the base address of a GetElementPtr. This prevents the // creation of new constants for every base constant that gets constant // folded with the offset. if (Idx == 0) return 2 * TTI::TCC_Basic; return TTI::TCC_Free; case Instruction::Store: ImmIdx = 0; break; case Instruction::And: // We support 64-bit ANDs with immediates with 32-bits of leading zeroes // by using a 32-bit operation with implicit zero extension. Detect such // immediates here as the normal path expects bit 31 to be sign extended. if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue())) return TTI::TCC_Free; // Fallthrough case Instruction::Add: case Instruction::Sub: case Instruction::Mul: case Instruction::UDiv: case Instruction::SDiv: case Instruction::URem: case Instruction::SRem: case Instruction::Or: case Instruction::Xor: case Instruction::ICmp: ImmIdx = 1; break; // Always return TCC_Free for the shift value of a shift instruction. case Instruction::Shl: case Instruction::LShr: case Instruction::AShr: if (Idx == 1) return TTI::TCC_Free; break; case Instruction::Trunc: case Instruction::ZExt: case Instruction::SExt: case Instruction::IntToPtr: case Instruction::PtrToInt: case Instruction::BitCast: case Instruction::PHI: case Instruction::Call: case Instruction::Select: case Instruction::Ret: case Instruction::Load: break; } if (Idx == ImmIdx) { int NumConstants = (BitSize + 63) / 64; int Cost = X86TTIImpl::getIntImmCost(Imm, Ty); return (Cost <= NumConstants * TTI::TCC_Basic) ? static_cast<int>(TTI::TCC_Free) : Cost; } return X86TTIImpl::getIntImmCost(Imm, Ty); }
APInt ObjectSizeOffsetVisitor::align(APInt Size, uint64_t Align) { if (RoundToAlign && Align) return APInt(IntTyBits, RoundUpToAlignment(Size.getZExtValue(), Align)); return Size; }
int PPCTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty) { if (DisablePPCConstHoist) return BaseT::getIntImmCost(Opcode, Idx, Imm, Ty); assert(Ty->isIntegerTy()); unsigned BitSize = Ty->getPrimitiveSizeInBits(); if (BitSize == 0) return ~0U; unsigned ImmIdx = ~0U; bool ShiftedFree = false, RunFree = false, UnsignedFree = false, ZeroFree = false; switch (Opcode) { default: return TTI::TCC_Free; case Instruction::GetElementPtr: // Always hoist the base address of a GetElementPtr. This prevents the // creation of new constants for every base constant that gets constant // folded with the offset. if (Idx == 0) return 2 * TTI::TCC_Basic; return TTI::TCC_Free; case Instruction::And: RunFree = true; // (for the rotate-and-mask instructions) LLVM_FALLTHROUGH; case Instruction::Add: case Instruction::Or: case Instruction::Xor: ShiftedFree = true; LLVM_FALLTHROUGH; case Instruction::Sub: case Instruction::Mul: case Instruction::Shl: case Instruction::LShr: case Instruction::AShr: ImmIdx = 1; break; case Instruction::ICmp: UnsignedFree = true; ImmIdx = 1; // Zero comparisons can use record-form instructions. LLVM_FALLTHROUGH; case Instruction::Select: ZeroFree = true; break; case Instruction::PHI: case Instruction::Call: case Instruction::Ret: case Instruction::Load: case Instruction::Store: break; } if (ZeroFree && Imm == 0) return TTI::TCC_Free; if (Idx == ImmIdx && Imm.getBitWidth() <= 64) { if (isInt<16>(Imm.getSExtValue())) return TTI::TCC_Free; if (RunFree) { if (Imm.getBitWidth() <= 32 && (isShiftedMask_32(Imm.getZExtValue()) || isShiftedMask_32(~Imm.getZExtValue()))) return TTI::TCC_Free; if (ST->isPPC64() && (isShiftedMask_64(Imm.getZExtValue()) || isShiftedMask_64(~Imm.getZExtValue()))) return TTI::TCC_Free; } if (UnsignedFree && isUInt<16>(Imm.getZExtValue())) return TTI::TCC_Free; if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0) return TTI::TCC_Free; } return PPCTTIImpl::getIntImmCost(Imm, Ty); }
APInt ObjectSizeOffsetVisitor::align(APInt Size, uint64_t Align) { if (Options.RoundToAlign && Align) return APInt(IntTyBits, alignTo(Size.getZExtValue(), Align)); return Size; }
/// \brief Fold binary operations. /// /// The list of operations we constant fold might not be complete. Start with /// folding the operations used by the standard library. static SILInstruction *constantFoldBinary(BuiltinInst *BI, BuiltinValueKind ID, Optional<bool> &ResultsInError) { switch (ID) { default: llvm_unreachable("Not all BUILTIN_BINARY_OPERATIONs are covered!"); // Not supported yet (not easily computable for APInt). case BuiltinValueKind::ExactSDiv: case BuiltinValueKind::ExactUDiv: return nullptr; // Not supported now. case BuiltinValueKind::FRem: return nullptr; // Fold constant division operations and report div by zero. case BuiltinValueKind::SDiv: case BuiltinValueKind::SRem: case BuiltinValueKind::UDiv: case BuiltinValueKind::URem: { return constantFoldAndCheckDivision(BI, ID, ResultsInError); } // Are there valid uses for these in stdlib? case BuiltinValueKind::Add: case BuiltinValueKind::Mul: case BuiltinValueKind::Sub: return nullptr; case BuiltinValueKind::And: case BuiltinValueKind::AShr: case BuiltinValueKind::LShr: case BuiltinValueKind::Or: case BuiltinValueKind::Shl: case BuiltinValueKind::Xor: { OperandValueArrayRef Args = BI->getArguments(); auto *LHS = dyn_cast<IntegerLiteralInst>(Args[0]); auto *RHS = dyn_cast<IntegerLiteralInst>(Args[1]); if (!RHS || !LHS) return nullptr; APInt LHSI = LHS->getValue(); APInt RHSI = RHS->getValue(); bool IsShift = ID == BuiltinValueKind::AShr || ID == BuiltinValueKind::LShr || ID == BuiltinValueKind::Shl; // Reject shifting all significant bits if (IsShift && RHSI.getZExtValue() >= LHSI.getBitWidth()) { diagnose(BI->getModule().getASTContext(), RHS->getLoc().getSourceLoc(), diag::shifting_all_significant_bits); ResultsInError = Optional<bool>(true); return nullptr; } APInt ResI = constantFoldBitOperation(LHSI, RHSI, ID); // Add the literal instruction to represent the result. SILBuilderWithScope B(BI); return B.createIntegerLiteral(BI->getLoc(), BI->getType(), ResI); } case BuiltinValueKind::FAdd: case BuiltinValueKind::FDiv: case BuiltinValueKind::FMul: case BuiltinValueKind::FSub: { OperandValueArrayRef Args = BI->getArguments(); auto *LHS = dyn_cast<FloatLiteralInst>(Args[0]); auto *RHS = dyn_cast<FloatLiteralInst>(Args[1]); if (!RHS || !LHS) return nullptr; APFloat LHSF = LHS->getValue(); APFloat RHSF = RHS->getValue(); switch (ID) { default: llvm_unreachable("Not all cases are covered!"); case BuiltinValueKind::FAdd: LHSF.add(RHSF, APFloat::rmNearestTiesToEven); break; case BuiltinValueKind::FDiv: LHSF.divide(RHSF, APFloat::rmNearestTiesToEven); break; case BuiltinValueKind::FMul: LHSF.multiply(RHSF, APFloat::rmNearestTiesToEven); break; case BuiltinValueKind::FSub: LHSF.subtract(RHSF, APFloat::rmNearestTiesToEven); break; } // Add the literal instruction to represent the result. SILBuilderWithScope B(BI); return B.createFloatLiteral(BI->getLoc(), BI->getType(), LHSF); } } }