bool MeasureMetric::runOnFunction(Function &F) { for (Function::iterator b = F.begin(), be = F.end(); b != be; b++) { for (BasicBlock::iterator i = b->begin(), ie = b->end(); i != ie; i++) { Instruction *inst = i; if (dyn_cast<CastInst>(inst)) { castInstCount++; } else if (FCmpInst* target = dyn_cast<FCmpInst>(inst)) { Value *val1 = target->getOperand(0); increaseCounter(val1->getType(), cmpOp); } else if (BinaryOperator *target = dyn_cast<BinaryOperator>(inst)) { Value *val1 = target->getOperand(0); increaseCounter(val1->getType(), arithOp); } else if (LoadInst *target = dyn_cast<LoadInst>(inst)) { Value *val1 = target->getPointerOperand(); PointerType* pointerType = (PointerType*) val1->getType(); increaseCounter(pointerType->getElementType(), loadOp); } else if (StoreInst* target = dyn_cast<StoreInst>(inst)) { Value *val1 = target->getOperand(0); increaseCounter(val1->getType(), storeOp); } } } return false; }
bool MemorySafetyChecker::runOnModule(Module& m) { DataLayout* dataLayout = new DataLayout(&m); Function* memorySafetyFunction = m.getFunction(Naming::MEMORY_SAFETY_FUNCTION); assert(memorySafetyFunction != NULL && "Couldn't find memory safety function"); for (auto& F : m) { if (!Naming::isSmackName(F.getName())) { for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) { Value* pointer = NULL; if (LoadInst* li = dyn_cast<LoadInst>(&*I)) { pointer = li->getPointerOperand(); } else if (StoreInst* si = dyn_cast<StoreInst>(&*I)) { pointer = si->getPointerOperand(); } if (pointer) { // Finding the exact type of the second argument to our memory safety function Type* sizeType = memorySafetyFunction->getFunctionType()->getParamType(1); PointerType* pointerType = cast<PointerType>(pointer->getType()); uint64_t storeSize = dataLayout->getTypeStoreSize(pointerType->getPointerElementType()); Value* size = ConstantInt::get(sizeType, storeSize); Type *voidPtrTy = PointerType::getUnqual(IntegerType::getInt8Ty(F.getContext())); CastInst* castPointer = CastInst::Create(Instruction::BitCast, pointer, voidPtrTy, "", &*I); Value* args[] = {castPointer, size}; CallInst::Create(memorySafetyFunction, ArrayRef<Value*>(args, 2), "", &*I); } } } } return true; }
CallInst *IRBuilderBase::CreateGCStatepoint(Value *ActualCallee, ArrayRef<Value*> CallArgs, ArrayRef<Value*> DeoptArgs, ArrayRef<Value*> GCArgs, const Twine& Name) { // Extract out the type of the callee. PointerType *FuncPtrType = cast<PointerType>(ActualCallee->getType()); assert(isa<FunctionType>(FuncPtrType->getElementType()) && "actual callee must be a callable value"); Module *M = BB->getParent()->getParent(); // Fill in the one generic type'd argument (the function is also vararg) Type *ArgTypes[] = { FuncPtrType }; Function *FnStatepoint = Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_statepoint, ArgTypes); std::vector<llvm::Value *> args; args.push_back(ActualCallee); args.push_back(getInt32(CallArgs.size())); args.push_back(getInt32(0 /*unused*/)); args.insert(args.end(), CallArgs.begin(), CallArgs.end()); args.push_back(getInt32(DeoptArgs.size())); args.insert(args.end(), DeoptArgs.begin(), DeoptArgs.end()); args.insert(args.end(), GCArgs.begin(), GCArgs.end()); return createCallHelper(FnStatepoint, args, this, Name); }
void SelectionDAGBuilder::visitGCResult(const CallInst &CI) { // The result value of the gc_result is simply the result of the actual // call. We've already emitted this, so just grab the value. Instruction *I = cast<Instruction>(CI.getArgOperand(0)); assert(isStatepoint(I) && "first argument must be a statepoint token"); if (I->getParent() != CI.getParent()) { // Statepoint is in different basic block so we should have stored call // result in a virtual register. // We can not use default getValue() functionality to copy value from this // register because statepoint and actuall call return types can be // different, and getValue() will use CopyFromReg of the wrong type, // which is always i32 in our case. PointerType *CalleeType = cast<PointerType>( ImmutableStatepoint(I).getCalledValue()->getType()); Type *RetTy = cast<FunctionType>(CalleeType->getElementType())->getReturnType(); SDValue CopyFromReg = getCopyFromRegs(I, RetTy); assert(CopyFromReg.getNode()); setValue(&CI, CopyFromReg); } else { setValue(&CI, getValue(I)); } }
bool GlobalMerge::doInitialization(Module &M) { DenseMap<unsigned, SmallVector<GlobalVariable*, 16> > Globals, ConstGlobals, BSSGlobals; const DataLayout *TD = TLI->getDataLayout(); unsigned MaxOffset = TLI->getMaximalGlobalOffset(); bool Changed = false; // Grab all non-const globals. for (Module::global_iterator I = M.global_begin(), E = M.global_end(); I != E; ++I) { // Merge is safe for "normal" internal globals only if (!I->hasLocalLinkage() || I->isThreadLocal() || I->hasSection()) continue; PointerType *PT = dyn_cast<PointerType>(I->getType()); assert(PT && "Global variable is not a pointer!"); unsigned AddressSpace = PT->getAddressSpace(); // Ignore fancy-aligned globals for now. unsigned Alignment = TD->getPreferredAlignment(I); Type *Ty = I->getType()->getElementType(); if (Alignment > TD->getABITypeAlignment(Ty)) continue; // Ignore all 'special' globals. if (I->getName().startswith("llvm.") || I->getName().startswith(".llvm.")) continue; if (TD->getTypeAllocSize(Ty) < MaxOffset) { if (TargetLoweringObjectFile::getKindForGlobal(I, TLI->getTargetMachine()) .isBSSLocal()) BSSGlobals[AddressSpace].push_back(I); else if (I->isConstant()) ConstGlobals[AddressSpace].push_back(I); else Globals[AddressSpace].push_back(I); } } for (DenseMap<unsigned, SmallVector<GlobalVariable*, 16> >::iterator I = Globals.begin(), E = Globals.end(); I != E; ++I) if (I->second.size() > 1) Changed |= doMerge(I->second, M, false, I->first); for (DenseMap<unsigned, SmallVector<GlobalVariable*, 16> >::iterator I = BSSGlobals.begin(), E = BSSGlobals.end(); I != E; ++I) if (I->second.size() > 1) Changed |= doMerge(I->second, M, false, I->first); // FIXME: This currently breaks the EH processing due to way how the // typeinfo detection works. We might want to detect the TIs and ignore // them in the future. // if (ConstGlobals.size() > 1) // Changed |= doMerge(ConstGlobals, M, true); return Changed; }
/* This routine extracts the filename of the file input to the open call This function has been borrowed from the LLPE toolchain by Smowton. */ bool getConstantStringInfo(const Value *V, StringRef &Str) { // Look through bitcast instructions and geps. V = V->stripPointerCasts(); // If the value is a GEP instruction or constant expression, treat it as an offset. if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { // Make sure the GEP has exactly three arguments. if (GEP->getNumOperands() != 3) return false; // Make sure the index-ee is a pointer to array of i8. PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType()); ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType()); if (AT == 0 || !AT->getElementType()->isIntegerTy(8)) return false; // Check to make sure that the first operand of the GEP is an integer and // has value 0 so that we are sure we're indexing into the initializer. const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); if (FirstIdx == 0 || !FirstIdx->isZero()) return false; // If the second index isn't a ConstantInt, then this is a variable index // into the array. If this occurs, we can't say anything meaningful about // the string. uint64_t StartIdx = 0; if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2))) StartIdx = CI->getZExtValue(); else return false; } // The GEP instruction, constant or instruction, must reference a global // variable that is a constant and is initialized. The referenced constant // initializer is the array that we'll use for optimization. const GlobalVariable *GV = dyn_cast<GlobalVariable>(V); if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) return false; // Handle the all-zeros case if (GV->getInitializer()->isNullValue()) { // This is a degenerate case. The initializer is constant zero so the // length of the string must be zero. Str = ""; return true; } // Must be a Constant Array const ConstantDataArray *Array = dyn_cast<ConstantDataArray>(GV->getInitializer()); if (Array == 0 || !Array->isString()) return false; // Start out with the entire array in the StringRef. Str = Array->getAsString(); return true; }
/// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible. static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI, const DataLayout *DL) { User *CI = cast<User>(LI.getOperand(0)); Value *CastOp = CI->getOperand(0); PointerType *DestTy = cast<PointerType>(CI->getType()); Type *DestPTy = DestTy->getElementType(); if (PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType())) { // If the address spaces don't match, don't eliminate the cast. if (DestTy->getAddressSpace() != SrcTy->getAddressSpace()) return 0; Type *SrcPTy = SrcTy->getElementType(); if (DestPTy->isIntegerTy() || DestPTy->isPointerTy() || DestPTy->isVectorTy()) { // If the source is an array, the code below will not succeed. Check to // see if a trivial 'gep P, 0, 0' will help matters. Only do this for // constants. if (ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy)) if (Constant *CSrc = dyn_cast<Constant>(CastOp)) if (ASrcTy->getNumElements() != 0) { Type *IdxTy = DL ? DL->getIntPtrType(SrcTy) : Type::getInt64Ty(SrcTy->getContext()); Value *Idx = Constant::getNullValue(IdxTy); Value *Idxs[2] = { Idx, Idx }; CastOp = ConstantExpr::getGetElementPtr(CSrc, Idxs); SrcTy = cast<PointerType>(CastOp->getType()); SrcPTy = SrcTy->getElementType(); } if (IC.getDataLayout() && (SrcPTy->isIntegerTy() || SrcPTy->isPointerTy() || SrcPTy->isVectorTy()) && // Do not allow turning this into a load of an integer, which is then // casted to a pointer, this pessimizes pointer analysis a lot. (SrcPTy->isPtrOrPtrVectorTy() == LI.getType()->isPtrOrPtrVectorTy()) && IC.getDataLayout()->getTypeSizeInBits(SrcPTy) == IC.getDataLayout()->getTypeSizeInBits(DestPTy)) { // Okay, we are casting from one integer or pointer type to another of // the same size. Instead of casting the pointer before the load, cast // the result of the loaded value. LoadInst *NewLoad = IC.Builder->CreateLoad(CastOp, LI.isVolatile(), CI->getName()); NewLoad->setAlignment(LI.getAlignment()); NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope()); // Now cast the result of the load. return new BitCastInst(NewLoad, LI.getType()); } } } return 0; }
Type *VectorBlockGenerator::getVectorPtrTy(const Value *Val, int Width) { PointerType *PointerTy = dyn_cast<PointerType>(Val->getType()); assert(PointerTy && "PointerType expected"); Type *ScalarType = PointerTy->getElementType(); VectorType *VectorType = VectorType::get(ScalarType, Width); return PointerType::getUnqual(VectorType); }
/// \brief Create a call to a Masked Store intrinsic. /// \p Val - data to be stored, /// \p Ptr - base pointer for the store /// \p Align - alignment of the destination location /// \p Mask - vector of booleans which indicates what vector lanes should /// be accessed in memory CallInst *IRBuilderBase::CreateMaskedStore(Value *Val, Value *Ptr, unsigned Align, Value *Mask) { PointerType *PtrTy = cast<PointerType>(Ptr->getType()); Type *DataTy = PtrTy->getElementType(); assert(DataTy->isVectorTy() && "Ptr should point to a vector"); Type *OverloadedTypes[] = { DataTy, PtrTy }; Value *Ops[] = { Val, Ptr, getInt32(Align), Mask }; return CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, OverloadedTypes); }
AllocaInst* Variables::changeLocal(Value* value, ArrayType* newType) { AllocaInst* oldTarget = dyn_cast<AllocaInst>(value); PointerType* oldPointerType = dyn_cast<PointerType>(oldTarget->getType()); ArrayType* oldType = dyn_cast<ArrayType>(oldPointerType->getElementType()); AllocaInst* newTarget = NULL; errs() << "Changing the precision of variable \"" << oldTarget->getName() << "\" from " << *oldType << " to " << *newType << ".\n"; if (newType->getElementType()->getTypeID() != oldType->getElementType()->getTypeID()) { newTarget = new AllocaInst(newType, getInt32(1), "", oldTarget); // we are not calling getAlignment because in this case double requires 16. Investigate further. unsigned alignment; switch(newType->getElementType()->getTypeID()) { case Type::FloatTyID: alignment = 4; break; case Type::DoubleTyID: alignment = 16; break; case Type::X86_FP80TyID: alignment = 16; break; default: alignment = 0; } newTarget->setAlignment(alignment); // depends on type? 8 for float? 16 for double? newTarget->takeName(oldTarget); // iterating through instructions using old AllocaInst vector<Instruction*> erase; Value::use_iterator it = oldTarget->use_begin(); for(; it != oldTarget->use_end(); it++) { bool is_erased = Transformer::transform(it, newTarget, oldTarget, newType, oldType, alignment); if (!is_erased) erase.push_back(dyn_cast<Instruction>(*it)); } // erasing uses of old instructions for(unsigned int i = 0; i < erase.size(); i++) { erase[i]->eraseFromParent(); } // erase old instruction //oldTarget->eraseFromParent(); } else { errs() << "\tNo changes required.\n"; } return newTarget; }
bool AMDGPUPromoteAlloca::runOnFunction(Function &F) { if (!TM || skipFunction(F)) return false; FunctionType *FTy = F.getFunctionType(); // If the function has any arguments in the local address space, then it's // possible these arguments require the entire local memory space, so // we cannot use local memory in the pass. for (Type *ParamTy : FTy->params()) { PointerType *PtrTy = dyn_cast<PointerType>(ParamTy); if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { LocalMemAvailable = 0; DEBUG(dbgs() << "Function has local memory argument. Promoting to " "local memory disabled.\n"); return false; } } const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(F); LocalMemAvailable = ST.getLocalMemorySize(); if (LocalMemAvailable == 0) return false; // Check how much local memory is being used by global objects for (GlobalVariable &GV : Mod->globals()) { if (GV.getType()->getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS) continue; for (User *U : GV.users()) { Instruction *Use = dyn_cast<Instruction>(U); if (!Use) continue; if (Use->getParent()->getParent() == &F) { LocalMemAvailable -= Mod->getDataLayout().getTypeAllocSize(GV.getValueType()); break; } } } LocalMemAvailable = std::max(0, LocalMemAvailable); DEBUG(dbgs() << LocalMemAvailable << " bytes free in local memory.\n"); BasicBlock &EntryBB = *F.begin(); for (auto I = EntryBB.begin(), E = EntryBB.end(); I != E; ) { AllocaInst *AI = dyn_cast<AllocaInst>(I); ++I; if (AI) handleAlloca(*AI); } return true; }
SizeOffsetType ObjectSizeOffsetVisitor::visitArgument(Argument &A) { // no interprocedural analysis is done at the moment if (!A.hasByValAttr()) { ++ObjectVisitorArgument; return unknown(); } PointerType *PT = cast<PointerType>(A.getType()); APInt Size(IntTyBits, TD->getTypeAllocSize(PT->getElementType())); return std::make_pair(align(Size, A.getParamAlignment()), Zero); }
Value *IRBuilderBase::getCastedInt8PtrValue(Value *Ptr) { PointerType *PT = cast<PointerType>(Ptr->getType()); if (PT->getElementType()->isIntegerTy(8)) return Ptr; // Otherwise, we need to insert a bitcast. PT = getInt8PtrTy(PT->getAddressSpace()); BitCastInst *BCI = new BitCastInst(Ptr, PT, ""); BB->getInstList().insert(InsertPt, BCI); SetInstDebugLocation(BCI); return BCI; }
/// \brief Create a call to a Masked Load intrinsic. /// \p Ptr - base pointer for the load /// \p Align - alignment of the source location /// \p Mask - vector of booleans which indicates what vector lanes should /// be accessed in memory /// \p PassThru - pass-through value that is used to fill the masked-off lanes /// of the result /// \p Name - name of the result variable CallInst *IRBuilderBase::CreateMaskedLoad(Value *Ptr, unsigned Align, Value *Mask, Value *PassThru, const Twine &Name) { PointerType *PtrTy = cast<PointerType>(Ptr->getType()); Type *DataTy = PtrTy->getElementType(); assert(DataTy->isVectorTy() && "Ptr should point to a vector"); if (!PassThru) PassThru = UndefValue::get(DataTy); Type *OverloadedTypes[] = { DataTy, PtrTy }; Value *Ops[] = { Ptr, getInt32(Align), Mask, PassThru}; return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops, OverloadedTypes, Name); }
QualType TypeContext::getPointerType(QualType ref) { assert(ref.isValid()); for (unsigned i=0; i<types.size(); i++) { Type* t = types[i]; if (isa<PointerType>(t)) { PointerType* P = cast<PointerType>(t); if (P->getPointeeType() == ref) return t; } } Type* N = new PointerType(ref); if (ref->hasCanonicalType()) N->setCanonicalType(N); return add(N); }
bool AMDGPURewriteOutArguments::isOutArgumentCandidate(Argument &Arg) const { const unsigned MaxOutArgSizeBytes = 4 * MaxNumRetRegs; PointerType *ArgTy = dyn_cast<PointerType>(Arg.getType()); // TODO: It might be useful for any out arguments, not just privates. if (!ArgTy || (ArgTy->getAddressSpace() != DL->getAllocaAddrSpace() && !AnyAddressSpace) || Arg.hasByValAttr() || Arg.hasStructRetAttr() || DL->getTypeStoreSize(ArgTy->getPointerElementType()) > MaxOutArgSizeBytes) { return false; } return checkArgumentUses(Arg); }
bool AMDGPUPromoteAlloca::runOnFunction(Function &F) { const FunctionType *FTy = F.getFunctionType(); LocalMemAvailable = ST.getLocalMemorySize(); // If the function has any arguments in the local address space, then it's // possible these arguments require the entire local memory space, so // we cannot use local memory in the pass. for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) { const Type *ParamTy = FTy->getParamType(i); if (ParamTy->isPointerTy() && ParamTy->getPointerAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { LocalMemAvailable = 0; DEBUG(dbgs() << "Function has local memory argument. Promoting to " "local memory disabled.\n"); break; } } if (LocalMemAvailable > 0) { // Check how much local memory is being used by global objects for (Module::global_iterator I = Mod->global_begin(), E = Mod->global_end(); I != E; ++I) { GlobalVariable *GV = I; PointerType *GVTy = GV->getType(); if (GVTy->getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS) continue; for (Value::use_iterator U = GV->use_begin(), UE = GV->use_end(); U != UE; ++U) { Instruction *Use = dyn_cast<Instruction>(*U); if (!Use) continue; if (Use->getParent()->getParent() == &F) LocalMemAvailable -= Mod->getDataLayout()->getTypeAllocSize(GVTy->getElementType()); } } } LocalMemAvailable = std::max(0, LocalMemAvailable); DEBUG(dbgs() << LocalMemAvailable << "bytes free in local memory.\n"); visit(F); return false; }
inline PointerType clone_ptr( PointerType p, user_function* udf, expr::substitution_t &s ) { return static_cast<PointerType>( p->clone( udf, s ).release() ); }
void CastSizeChecker::PreVisitCastExpr(CheckerContext &C, const CastExpr *CE) { const Expr *E = CE->getSubExpr(); ASTContext &Ctx = C.getASTContext(); QualType ToTy = Ctx.getCanonicalType(CE->getType()); PointerType *ToPTy = dyn_cast<PointerType>(ToTy.getTypePtr()); if (!ToPTy) return; QualType ToPointeeTy = ToPTy->getPointeeType(); const GRState *state = C.getState(); const MemRegion *R = state->getSVal(E).getAsRegion(); if (R == 0) return; const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R); if (SR == 0) return; ValueManager &ValMgr = C.getValueManager(); SVal Extent = SR->getExtent(ValMgr); SValuator &SVator = ValMgr.getSValuator(); const llvm::APSInt *ExtentInt = SVator.getKnownValue(state, Extent); if (!ExtentInt) return; CharUnits RegionSize = CharUnits::fromQuantity(ExtentInt->getSExtValue()); CharUnits TypeSize = C.getASTContext().getTypeSizeInChars(ToPointeeTy); // Ignore void, and a few other un-sizeable types. if (TypeSize.isZero()) return; if (RegionSize % TypeSize != 0) { if (ExplodedNode *N = C.GenerateSink()) { if (!BT) BT = new BuiltinBug("Cast region with wrong size.", "Cast a region whose size is not a multiple of the" " destination type size."); RangedBugReport *R = new RangedBugReport(*BT, BT->getDescription(), N); R->addRange(CE->getSourceRange()); C.EmitReport(R); } } }
bool AMDGPUPromoteAlloca::runOnFunction(Function &F) { if (!TM || F.hasFnAttribute(Attribute::OptimizeNone)) return false; FunctionType *FTy = F.getFunctionType(); // If the function has any arguments in the local address space, then it's // possible these arguments require the entire local memory space, so // we cannot use local memory in the pass. for (Type *ParamTy : FTy->params()) { PointerType *PtrTy = dyn_cast<PointerType>(ParamTy); if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { LocalMemAvailable = 0; DEBUG(dbgs() << "Function has local memory argument. Promoting to " "local memory disabled.\n"); return false; } } const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(F); LocalMemAvailable = ST.getLocalMemorySize(); if (LocalMemAvailable == 0) return false; // Check how much local memory is being used by global objects for (GlobalVariable &GV : Mod->globals()) { if (GV.getType()->getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS) continue; for (Use &U : GV.uses()) { Instruction *Use = dyn_cast<Instruction>(U); if (!Use) continue; if (Use->getParent()->getParent() == &F) LocalMemAvailable -= Mod->getDataLayout().getTypeAllocSize(GV.getValueType()); } } LocalMemAvailable = std::max(0, LocalMemAvailable); DEBUG(dbgs() << LocalMemAvailable << " bytes free in local memory.\n"); visit(F); return true; }
TEST(AttrOrderTest, pointerAttributes4) { // func(restrict const int*) const char* s = "_Z4funcPrKi"; FunctionDescriptor fd; RefParamType primitiveInt(new PrimitiveType(PRIMITIVE_INT)); PointerType *ptrInt = new PointerType(primitiveInt); ptrInt->setQualifier(ATTR_RESTRICT, true); ptrInt->setQualifier(ATTR_CONST, true); RefParamType ptrIntRef(ptrInt); fd.name = "func"; fd.parameters.push_back(ptrIntRef); std::string mangled = mangle(fd); ASSERT_STREQ(s, mangled.c_str()); }
static bool CanCheckValue(Value *V) { if (isa<Constant>(V)) return false; Type *ValTy = V->getType(); // Pointers are generally not valid to cross check, since they may vary due // to randomization. Floating point values might not compare exactly // indentical across variants, so ignore for now. Thus, we're left with ints if (!ValTy->isIntegerTy()) return false; Value *PointerOperand = nullptr; if (auto *I = dyn_cast<LoadInst>(V)) PointerOperand = I->getPointerOperand(); else if (auto *I = dyn_cast<VAArgInst>(V)) PointerOperand = I->getPointerOperand(); else if (auto *I = dyn_cast<AtomicCmpXchgInst>(V)) PointerOperand = I->getPointerOperand(); if (PointerOperand) { auto LoadedValue = PointerOperand->stripPointerCasts(); // Check if we are loading a pointer that has been cast as an int. PointerType *OrigType = dyn_cast<PointerType>(LoadedValue->getType()); if (OrigType && OrigType->getElementType()->isPointerTy()) return false; // Check that we aren't loading a NoCrossCheck global auto GV = dyn_cast<GlobalVariable>(LoadedValue); if (GV && GV->isNoCrossCheck()) return false; } for (auto I = V->user_begin(), E = V->user_end(); I != E; I++ ) { auto CI = dyn_cast<CastInst>(*I); if (CI && !CI->isIntegerCast()) return false; } if (HasTBAAPointerAccess(V)) return false; return true; }
void KisFavoriteResourceManager::removingResource(PointerType resource) { if (m_blockUpdates) { return; } if (m_favoritePresetsList.contains(resource.data())) { updateFavoritePresets(); } }
Type* IndexExpression::GetType() { Type *containerType = Container->GetType(); if (typeid(*containerType) == typeid(ArrayType)) { ArrayType *arrayType = dynamic_cast<ArrayType *>(containerType); return arrayType->GetElementType(); } else if (typeid(*containerType) == typeid(PointerType)) { PointerType *pointerType = dynamic_cast<PointerType *>(containerType); return pointerType->GetUnderlyingType(); } else { abort(); } }
DataType* ExportPass::CloneDataType(DataType* t) { assert(t != NULL) ; PointerType* pointerClone = dynamic_cast<PointerType*>(t) ; ReferenceType* referenceClone = dynamic_cast<ReferenceType*>(t) ; ArrayType* arrayClone = dynamic_cast<ArrayType*>(t) ; if (pointerClone != NULL) { QualifiedType* refType = dynamic_cast<QualifiedType*>(pointerClone->get_reference_type()) ; assert(refType != NULL) ; DataType* cloneType = CloneDataType(refType->get_base_type()) ; assert(cloneType != NULL) ; return create_pointer_type(theEnv, IInteger(32), 0, create_qualified_type(theEnv, cloneType)) ; } if (referenceClone != NULL) { QualifiedType* refType = dynamic_cast<QualifiedType*>(referenceClone->get_reference_type()) ; assert(refType != NULL) ; DataType* clonedType = CloneDataType(refType->get_base_type()) ; return create_reference_type(theEnv, IInteger(32), 0, create_qualified_type(theEnv, clonedType)) ; } if (arrayClone != NULL) { QualifiedType* elementType = arrayClone->get_element_type() ; DataType* internalType = CloneDataType(elementType->get_base_type()) ; QualifiedType* finalQual = create_qualified_type(theEnv, internalType) ; return create_pointer_type(theEnv, IInteger(32), 0, finalQual) ; } return dynamic_cast<DataType*>(t->deep_clone()) ; }
void InterleavedAccessInfo::collectConstStrideAccesses( MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, const ValueToValueMap &Strides) { auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); // Since it's desired that the load/store instructions be maintained in // "program order" for the interleaved access analysis, we have to visit the // blocks in the loop in reverse postorder (i.e., in a topological order). // Such an ordering will ensure that any load/store that may be executed // before a second load/store will precede the second load/store in // AccessStrideInfo. LoopBlocksDFS DFS(TheLoop); DFS.perform(LI); for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) for (auto &I : *BB) { auto *LI = dyn_cast<LoadInst>(&I); auto *SI = dyn_cast<StoreInst>(&I); if (!LI && !SI) continue; Value *Ptr = getLoadStorePointerOperand(&I); // We don't check wrapping here because we don't know yet if Ptr will be // part of a full group or a group with gaps. Checking wrapping for all // pointers (even those that end up in groups with no gaps) will be overly // conservative. For full groups, wrapping should be ok since if we would // wrap around the address space we would do a memory access at nullptr // even without the transformation. The wrapping checks are therefore // deferred until after we've formed the interleaved groups. int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides, /*Assume=*/true, /*ShouldCheckWrap=*/false); const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); // An alignment of 0 means target ABI alignment. unsigned Align = getLoadStoreAlignment(&I); if (!Align) Align = DL.getABITypeAlignment(PtrTy->getElementType()); AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, Align); } }
Value *GenericToNVVM::getOrInsertCVTA(Module *M, Function *F, GlobalVariable *GV, IRBuilder<> &Builder) { PointerType *GVType = GV->getType(); Value *CVTA = nullptr; // See if the address space conversion requires the operand to be bitcast // to i8 addrspace(n)* first. EVT ExtendedGVType = EVT::getEVT(GVType->getElementType(), true); if (!ExtendedGVType.isInteger() && !ExtendedGVType.isFloatingPoint()) { // A bitcast to i8 addrspace(n)* on the operand is needed. LLVMContext &Context = M->getContext(); unsigned int AddrSpace = GVType->getAddressSpace(); Type *DestTy = PointerType::get(Type::getInt8Ty(Context), AddrSpace); CVTA = Builder.CreateBitCast(GV, DestTy, "cvta"); // Insert the address space conversion. Type *ResultType = PointerType::get(Type::getInt8Ty(Context), llvm::ADDRESS_SPACE_GENERIC); SmallVector<Type *, 2> ParamTypes; ParamTypes.push_back(ResultType); ParamTypes.push_back(DestTy); Function *CVTAFunction = Intrinsic::getDeclaration( M, Intrinsic::nvvm_ptr_global_to_gen, ParamTypes); CVTA = Builder.CreateCall(CVTAFunction, CVTA, "cvta"); // Another bitcast from i8 * to <the element type of GVType> * is // required. DestTy = PointerType::get(GVType->getElementType(), llvm::ADDRESS_SPACE_GENERIC); CVTA = Builder.CreateBitCast(CVTA, DestTy, "cvta"); } else { // A simple CVTA is enough. SmallVector<Type *, 2> ParamTypes; ParamTypes.push_back(PointerType::get(GVType->getElementType(), llvm::ADDRESS_SPACE_GENERIC)); ParamTypes.push_back(GVType); Function *CVTAFunction = Intrinsic::getDeclaration( M, Intrinsic::nvvm_ptr_global_to_gen, ParamTypes); CVTA = Builder.CreateCall(CVTAFunction, GV, "cvta"); } return CVTA; }
static std::unique_ptr<StructInfo> getCpuArchStructInfo(Module *module) { GlobalVariable *env = module->getGlobalVariable("cpuarchstruct_type_anchor", false); assert(env); assert(env->getType() && env->getType()->isPointerTy()); assert(env->getType()->getElementType() && env->getType()->getElementType()->isPointerTy()); PointerType *envDeref = dyn_cast<PointerType>(env->getType()->getElementType()); assert(envDeref && envDeref->getElementType() && envDeref->getElementType()->isStructTy()); StructType *structType = dyn_cast<StructType>(envDeref->getElementType()); assert(structType); NamedMDNode *mdCuNodes = module->getNamedMetadata("llvm.dbg.cu"); if (!mdCuNodes) { return nullptr; } std::shared_ptr<DITypeIdentifierMap> typeIdentifierMap(new DITypeIdentifierMap(generateDITypeIdentifierMap(mdCuNodes))); DICompositeType *diStructType = nullptr; for ( unsigned i = 0; i < mdCuNodes->getNumOperands() && !diStructType; ++i ) { DICompileUnit diCu(mdCuNodes->getOperand(i)); for ( unsigned j = 0; j < diCu.getGlobalVariables().getNumElements(); ++j ) { DIGlobalVariable diGlobalVar(diCu.getGlobalVariables().getElement(j)); if (diGlobalVar.getName() != "cpuarchstruct_type_anchor") { continue; } assert(diGlobalVar.getType().isDerivedType()); DIDerivedType diEnvPtrType(diGlobalVar.getType()); assert(diEnvPtrType.getTypeDerivedFrom().resolve(*typeIdentifierMap).isCompositeType()); return std::unique_ptr<StructInfo>(new StructInfo(module, structType, new DICompositeType(diEnvPtrType.getTypeDerivedFrom().resolve(*typeIdentifierMap)), typeIdentifierMap)); } } llvm::errs() << "WARNING: Debug information for struct CPUArchState not found" << '\n'; return nullptr; }
TEST(MangleTest, vecAndVecPtr) { // "frexp(float2, __global int2*)" const char* s = "_Z5frexpDv2_fPU3AS1Dv2_i"; FunctionDescriptor fd; RefParamType primitiveFloat(new PrimitiveType(PRIMITIVE_FLOAT)); RefParamType vectorFloat(new VectorType(primitiveFloat, 2)); RefParamType primitiveInt(new PrimitiveType(PRIMITIVE_INT)); RefParamType vectorInt(new VectorType(primitiveInt, 2)); PointerType *ptrInt = new PointerType(vectorInt); ptrInt->setAddressSpace(ATTR_GLOBAL); RefParamType ptrIntRef(ptrInt); fd.name = "frexp"; fd.parameters.push_back(vectorFloat); fd.parameters.push_back(ptrIntRef); std::string mangled = mangle(fd); ASSERT_STREQ(s, mangled.c_str()); }
InvokeInst *IRBuilderBase::CreateGCStatepointInvoke( uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee, BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef<Value *> InvokeArgs, ArrayRef<Value *> DeoptArgs, ArrayRef<Value *> GCArgs, const Twine &Name) { // Extract out the type of the callee. PointerType *FuncPtrType = cast<PointerType>(ActualInvokee->getType()); assert(isa<FunctionType>(FuncPtrType->getElementType()) && "actual callee must be a callable value"); Module *M = BB->getParent()->getParent(); // Fill in the one generic type'd argument (the function is also vararg) Function *FnStatepoint = Intrinsic::getDeclaration( M, Intrinsic::experimental_gc_statepoint, {FuncPtrType}); std::vector<llvm::Value *> Args = getStatepointArgs( *this, ID, NumPatchBytes, ActualInvokee, InvokeArgs, DeoptArgs, GCArgs); return createInvokeHelper(FnStatepoint, NormalDest, UnwindDest, Args, this, Name); }