Exemplo n.º 1
0
bool MeasureMetric::runOnFunction(Function &F) {
	for (Function::iterator b = F.begin(), be = F.end(); b != be; b++) {
		for (BasicBlock::iterator i = b->begin(), ie = b->end(); i != ie; i++) {
			Instruction *inst = i;
			if (dyn_cast<CastInst>(inst)) {
				castInstCount++;
			} else if (FCmpInst* target = dyn_cast<FCmpInst>(inst)) {
				Value *val1 = target->getOperand(0);
				increaseCounter(val1->getType(), cmpOp);
			} else if (BinaryOperator *target = dyn_cast<BinaryOperator>(inst)) {
				Value *val1 = target->getOperand(0);
				increaseCounter(val1->getType(), arithOp);
			} else if (LoadInst *target = dyn_cast<LoadInst>(inst)) {
				Value *val1 = target->getPointerOperand();
				PointerType* pointerType = (PointerType*) val1->getType();
				increaseCounter(pointerType->getElementType(), loadOp);
			} else if (StoreInst* target = dyn_cast<StoreInst>(inst)) {
				Value *val1 = target->getOperand(0);
				increaseCounter(val1->getType(), storeOp);
			}
		}
	}

	return false;
}
Exemplo n.º 2
0
CallInst *IRBuilderBase::CreateGCStatepoint(Value *ActualCallee,
                                            ArrayRef<Value*> CallArgs,
                                            ArrayRef<Value*> DeoptArgs,
                                            ArrayRef<Value*> GCArgs,
                                            const Twine& Name) {
 // Extract out the type of the callee.
 PointerType *FuncPtrType = cast<PointerType>(ActualCallee->getType());
 assert(isa<FunctionType>(FuncPtrType->getElementType()) &&
        "actual callee must be a callable value");


 Module *M = BB->getParent()->getParent();
 // Fill in the one generic type'd argument (the function is also vararg)
 Type *ArgTypes[] = { FuncPtrType };
 Function *FnStatepoint =
   Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_statepoint,
                             ArgTypes);

 std::vector<llvm::Value *> args;
 args.push_back(ActualCallee);
 args.push_back(getInt32(CallArgs.size()));
 args.push_back(getInt32(0 /*unused*/));
 args.insert(args.end(), CallArgs.begin(), CallArgs.end());
 args.push_back(getInt32(DeoptArgs.size()));
 args.insert(args.end(), DeoptArgs.begin(), DeoptArgs.end());
 args.insert(args.end(), GCArgs.begin(), GCArgs.end());

 return createCallHelper(FnStatepoint, args, this, Name);
}
Exemplo n.º 3
0
void SelectionDAGBuilder::visitGCResult(const CallInst &CI) {
  // The result value of the gc_result is simply the result of the actual
  // call.  We've already emitted this, so just grab the value.
  Instruction *I = cast<Instruction>(CI.getArgOperand(0));
  assert(isStatepoint(I) && "first argument must be a statepoint token");

  if (I->getParent() != CI.getParent()) {
    // Statepoint is in different basic block so we should have stored call
    // result in a virtual register.
    // We can not use default getValue() functionality to copy value from this
    // register because statepoint and actuall call return types can be
    // different, and getValue() will use CopyFromReg of the wrong type,
    // which is always i32 in our case.
    PointerType *CalleeType = cast<PointerType>(
        ImmutableStatepoint(I).getCalledValue()->getType());
    Type *RetTy =
        cast<FunctionType>(CalleeType->getElementType())->getReturnType();
    SDValue CopyFromReg = getCopyFromRegs(I, RetTy);

    assert(CopyFromReg.getNode());
    setValue(&CI, CopyFromReg);
  } else {
    setValue(&CI, getValue(I));
  }
}
Exemplo n.º 4
0
  /* This routine extracts the filename of the file input to the open call
     This function has been borrowed from the LLPE toolchain by Smowton.
   */ 
  bool getConstantStringInfo(const Value *V, StringRef &Str) {

    // Look through bitcast instructions and geps.
    V = V->stripPointerCasts();
    // If the value is a GEP instruction or constant expression, treat it as an offset.
    if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
      // Make sure the GEP has exactly three arguments.
      if (GEP->getNumOperands() != 3)
	return false;

      // Make sure the index-ee is a pointer to array of i8.
      PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType());
      ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType());
      if (AT == 0 || !AT->getElementType()->isIntegerTy(8))
	return false;

      // Check to make sure that the first operand of the GEP is an integer and
      // has value 0 so that we are sure we're indexing into the initializer.
      const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
      if (FirstIdx == 0 || !FirstIdx->isZero())
	return false;

      // If the second index isn't a ConstantInt, then this is a variable index
      // into the array.  If this occurs, we can't say anything meaningful about
      // the string.
      uint64_t StartIdx = 0;
      if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
	StartIdx = CI->getZExtValue();
      else
	return false;

    }

    // The GEP instruction, constant or instruction, must reference a global
    // variable that is a constant and is initialized. The referenced constant
    // initializer is the array that we'll use for optimization.
    const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
    if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
      return false;

    // Handle the all-zeros case
    if (GV->getInitializer()->isNullValue()) {
      // This is a degenerate case. The initializer is constant zero so the
      // length of the string must be zero.
      Str = "";
      return true;
    }

    // Must be a Constant Array
    const ConstantDataArray *Array =
      dyn_cast<ConstantDataArray>(GV->getInitializer());

    if (Array == 0 || !Array->isString())
      return false;

    // Start out with the entire array in the StringRef.
    Str = Array->getAsString();
    return true;
  }
Exemplo n.º 5
0
/// \brief Create a call to a Masked Store intrinsic.
/// \p Val   - data to be stored,
/// \p Ptr   - base pointer for the store
/// \p Align - alignment of the destination location
/// \p Mask  - vector of booleans which indicates what vector lanes should
///            be accessed in memory
CallInst *IRBuilderBase::CreateMaskedStore(Value *Val, Value *Ptr,
                                           unsigned Align, Value *Mask) {
  PointerType *PtrTy = cast<PointerType>(Ptr->getType());
  Type *DataTy = PtrTy->getElementType();
  assert(DataTy->isVectorTy() && "Ptr should point to a vector");
  Type *OverloadedTypes[] = { DataTy, PtrTy };
  Value *Ops[] = { Val, Ptr, getInt32(Align), Mask };
  return CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, OverloadedTypes);
}
/// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible.
static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
                                        const DataLayout *DL) {
  User *CI = cast<User>(LI.getOperand(0));
  Value *CastOp = CI->getOperand(0);

  PointerType *DestTy = cast<PointerType>(CI->getType());
  Type *DestPTy = DestTy->getElementType();
  if (PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType())) {

    // If the address spaces don't match, don't eliminate the cast.
    if (DestTy->getAddressSpace() != SrcTy->getAddressSpace())
      return 0;

    Type *SrcPTy = SrcTy->getElementType();

    if (DestPTy->isIntegerTy() || DestPTy->isPointerTy() ||
         DestPTy->isVectorTy()) {
      // If the source is an array, the code below will not succeed.  Check to
      // see if a trivial 'gep P, 0, 0' will help matters.  Only do this for
      // constants.
      if (ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))
        if (Constant *CSrc = dyn_cast<Constant>(CastOp))
          if (ASrcTy->getNumElements() != 0) {
            Type *IdxTy = DL
                        ? DL->getIntPtrType(SrcTy)
                        : Type::getInt64Ty(SrcTy->getContext());
            Value *Idx = Constant::getNullValue(IdxTy);
            Value *Idxs[2] = { Idx, Idx };
            CastOp = ConstantExpr::getGetElementPtr(CSrc, Idxs);
            SrcTy = cast<PointerType>(CastOp->getType());
            SrcPTy = SrcTy->getElementType();
          }

      if (IC.getDataLayout() &&
          (SrcPTy->isIntegerTy() || SrcPTy->isPointerTy() ||
            SrcPTy->isVectorTy()) &&
          // Do not allow turning this into a load of an integer, which is then
          // casted to a pointer, this pessimizes pointer analysis a lot.
          (SrcPTy->isPtrOrPtrVectorTy() ==
           LI.getType()->isPtrOrPtrVectorTy()) &&
          IC.getDataLayout()->getTypeSizeInBits(SrcPTy) ==
               IC.getDataLayout()->getTypeSizeInBits(DestPTy)) {

        // Okay, we are casting from one integer or pointer type to another of
        // the same size.  Instead of casting the pointer before the load, cast
        // the result of the loaded value.
        LoadInst *NewLoad =
          IC.Builder->CreateLoad(CastOp, LI.isVolatile(), CI->getName());
        NewLoad->setAlignment(LI.getAlignment());
        NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope());
        // Now cast the result of the load.
        return new BitCastInst(NewLoad, LI.getType());
      }
    }
  }
  return 0;
}
Exemplo n.º 7
0
Type *VectorBlockGenerator::getVectorPtrTy(const Value *Val, int Width) {
  PointerType *PointerTy = dyn_cast<PointerType>(Val->getType());
  assert(PointerTy && "PointerType expected");

  Type *ScalarType = PointerTy->getElementType();
  VectorType *VectorType = VectorType::get(ScalarType, Width);

  return PointerType::getUnqual(VectorType);
}
Exemplo n.º 8
0
AllocaInst* Variables::changeLocal(Value* value, ArrayType* newType) {

  AllocaInst* oldTarget = dyn_cast<AllocaInst>(value);
  PointerType* oldPointerType = dyn_cast<PointerType>(oldTarget->getType());
  ArrayType* oldType = dyn_cast<ArrayType>(oldPointerType->getElementType());
  AllocaInst* newTarget = NULL;

  errs() << "Changing the precision of variable \"" << oldTarget->getName() << "\" from " << *oldType 
	 << " to " << *newType << ".\n";

  if (newType->getElementType()->getTypeID() != oldType->getElementType()->getTypeID()) {

    newTarget = new AllocaInst(newType, getInt32(1), "", oldTarget);
    
    // we are not calling getAlignment because in this case double requires 16. Investigate further.
    unsigned alignment;
    switch(newType->getElementType()->getTypeID()) {
    case Type::FloatTyID: 
      alignment = 4;
      break;
    case Type::DoubleTyID:
      alignment = 16;
      break;
    case Type::X86_FP80TyID:
      alignment = 16;
      break;
    default:
      alignment = 0;
    } 
    
    newTarget->setAlignment(alignment); // depends on type? 8 for float? 16 for double?
    newTarget->takeName(oldTarget);
    
    // iterating through instructions using old AllocaInst
    vector<Instruction*> erase;
    Value::use_iterator it = oldTarget->use_begin();
    for(; it != oldTarget->use_end(); it++) {
      bool is_erased = Transformer::transform(it, newTarget, oldTarget, newType, oldType, alignment);

      if (!is_erased)
        erase.push_back(dyn_cast<Instruction>(*it));
    }	  
    
    // erasing uses of old instructions
    for(unsigned int i = 0; i < erase.size(); i++) {
      erase[i]->eraseFromParent();
    }
    // erase old instruction
    //oldTarget->eraseFromParent();
  }
  else {
    errs() << "\tNo changes required.\n";    
  }

  return newTarget;
}
Exemplo n.º 9
0
void InterleavedAccessInfo::collectConstStrideAccesses(
    MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
    const ValueToValueMap &Strides) {
  auto &DL = TheLoop->getHeader()->getModule()->getDataLayout();

  // Since it's desired that the load/store instructions be maintained in
  // "program order" for the interleaved access analysis, we have to visit the
  // blocks in the loop in reverse postorder (i.e., in a topological order).
  // Such an ordering will ensure that any load/store that may be executed
  // before a second load/store will precede the second load/store in
  // AccessStrideInfo.
  LoopBlocksDFS DFS(TheLoop);
  DFS.perform(LI);
  for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO()))
    for (auto &I : *BB) {
      auto *LI = dyn_cast<LoadInst>(&I);
      auto *SI = dyn_cast<StoreInst>(&I);
      if (!LI && !SI)
        continue;

      Value *Ptr = getLoadStorePointerOperand(&I);
      // We don't check wrapping here because we don't know yet if Ptr will be
      // part of a full group or a group with gaps. Checking wrapping for all
      // pointers (even those that end up in groups with no gaps) will be overly
      // conservative. For full groups, wrapping should be ok since if we would
      // wrap around the address space we would do a memory access at nullptr
      // even without the transformation. The wrapping checks are therefore
      // deferred until after we've formed the interleaved groups.
      int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides,
                                    /*Assume=*/true, /*ShouldCheckWrap=*/false);

      const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
      PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
      uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());

      // An alignment of 0 means target ABI alignment.
      unsigned Align = getLoadStoreAlignment(&I);
      if (!Align)
        Align = DL.getABITypeAlignment(PtrTy->getElementType());

      AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, Align);
    }
}
Exemplo n.º 10
0
SizeOffsetType ObjectSizeOffsetVisitor::visitArgument(Argument &A) {
  // no interprocedural analysis is done at the moment
  if (!A.hasByValAttr()) {
    ++ObjectVisitorArgument;
    return unknown();
  }
  PointerType *PT = cast<PointerType>(A.getType());
  APInt Size(IntTyBits, TD->getTypeAllocSize(PT->getElementType()));
  return std::make_pair(align(Size, A.getParamAlignment()), Zero);
}
Exemplo n.º 11
0
Value *GenericToNVVM::getOrInsertCVTA(Module *M, Function *F,
                                      GlobalVariable *GV,
                                      IRBuilder<> &Builder) {
  PointerType *GVType = GV->getType();
  Value *CVTA = nullptr;

  // See if the address space conversion requires the operand to be bitcast
  // to i8 addrspace(n)* first.
  EVT ExtendedGVType = EVT::getEVT(GVType->getElementType(), true);
  if (!ExtendedGVType.isInteger() && !ExtendedGVType.isFloatingPoint()) {
    // A bitcast to i8 addrspace(n)* on the operand is needed.
    LLVMContext &Context = M->getContext();
    unsigned int AddrSpace = GVType->getAddressSpace();
    Type *DestTy = PointerType::get(Type::getInt8Ty(Context), AddrSpace);
    CVTA = Builder.CreateBitCast(GV, DestTy, "cvta");
    // Insert the address space conversion.
    Type *ResultType =
        PointerType::get(Type::getInt8Ty(Context), llvm::ADDRESS_SPACE_GENERIC);
    SmallVector<Type *, 2> ParamTypes;
    ParamTypes.push_back(ResultType);
    ParamTypes.push_back(DestTy);
    Function *CVTAFunction = Intrinsic::getDeclaration(
        M, Intrinsic::nvvm_ptr_global_to_gen, ParamTypes);
    CVTA = Builder.CreateCall(CVTAFunction, CVTA, "cvta");
    // Another bitcast from i8 * to <the element type of GVType> * is
    // required.
    DestTy =
        PointerType::get(GVType->getElementType(), llvm::ADDRESS_SPACE_GENERIC);
    CVTA = Builder.CreateBitCast(CVTA, DestTy, "cvta");
  } else {
    // A simple CVTA is enough.
    SmallVector<Type *, 2> ParamTypes;
    ParamTypes.push_back(PointerType::get(GVType->getElementType(),
                                          llvm::ADDRESS_SPACE_GENERIC));
    ParamTypes.push_back(GVType);
    Function *CVTAFunction = Intrinsic::getDeclaration(
        M, Intrinsic::nvvm_ptr_global_to_gen, ParamTypes);
    CVTA = Builder.CreateCall(CVTAFunction, GV, "cvta");
  }

  return CVTA;
}
Exemplo n.º 12
0
static std::unique_ptr<StructInfo> getCpuArchStructInfo(Module *module)
{
    GlobalVariable *env = module->getGlobalVariable("cpuarchstruct_type_anchor", false);
    assert(env);
    assert(env->getType() && env->getType()->isPointerTy());
    assert(env->getType()->getElementType() && env->getType()->getElementType()->isPointerTy());
    PointerType *envDeref = dyn_cast<PointerType>(env->getType()->getElementType());
    assert(envDeref && envDeref->getElementType() && envDeref->getElementType()->isStructTy());

    StructType *structType = dyn_cast<StructType>(envDeref->getElementType());
    assert(structType);

    NamedMDNode *mdCuNodes = module->getNamedMetadata("llvm.dbg.cu");
    if (!mdCuNodes) {
        return nullptr;
    }
    
    std::shared_ptr<DITypeIdentifierMap> typeIdentifierMap(new DITypeIdentifierMap(generateDITypeIdentifierMap(mdCuNodes)));
     

    DICompositeType *diStructType = nullptr;
    for ( unsigned i = 0; i < mdCuNodes->getNumOperands() && !diStructType; ++i )
    {
        DICompileUnit diCu(mdCuNodes->getOperand(i));

        for ( unsigned j = 0; j < diCu.getGlobalVariables().getNumElements(); ++j )
        {
            DIGlobalVariable diGlobalVar(diCu.getGlobalVariables().getElement(j));
            if (diGlobalVar.getName() != "cpuarchstruct_type_anchor")  {
                continue;
            }

            assert(diGlobalVar.getType().isDerivedType());
            DIDerivedType diEnvPtrType(diGlobalVar.getType());
            assert(diEnvPtrType.getTypeDerivedFrom().resolve(*typeIdentifierMap).isCompositeType());
            return std::unique_ptr<StructInfo>(new StructInfo(module, structType, new DICompositeType(diEnvPtrType.getTypeDerivedFrom().resolve(*typeIdentifierMap)), typeIdentifierMap));
        }
    }

    llvm::errs() << "WARNING: Debug information for struct CPUArchState not found" << '\n';
    return nullptr;
}
Exemplo n.º 13
0
Value *IRBuilderBase::getCastedInt8PtrValue(Value *Ptr) {
  PointerType *PT = cast<PointerType>(Ptr->getType());
  if (PT->getElementType()->isIntegerTy(8))
    return Ptr;
  
  // Otherwise, we need to insert a bitcast.
  PT = getInt8PtrTy(PT->getAddressSpace());
  BitCastInst *BCI = new BitCastInst(Ptr, PT, "");
  BB->getInstList().insert(InsertPt, BCI);
  SetInstDebugLocation(BCI);
  return BCI;
}
// Decides whether V is an addrspacecast and shortcutting V in load/store is
// valid and beneficial.
static bool isEliminableAddrSpaceCast(Value *V) {
  // Returns false if V is not even an addrspacecast.
  Operator *Cast = dyn_cast<Operator>(V);
  if (Cast == nullptr || Cast->getOpcode() != Instruction::AddrSpaceCast)
    return false;

  Value *Src = Cast->getOperand(0);
  PointerType *SrcTy = cast<PointerType>(Src->getType());
  PointerType *DestTy = cast<PointerType>(Cast->getType());
  // TODO: For now, we only handle the case where the addrspacecast only changes
  // the address space but not the type. If the type also changes, we could
  // still get rid of the addrspacecast by adding an extra bitcast, but we
  // rarely see such scenarios.
  if (SrcTy->getElementType() != DestTy->getElementType())
    return false;

  // Checks whether the addrspacecast is from a non-generic address space to the
  // generic address space.
  return (SrcTy->getAddressSpace() != AddressSpace::ADDRESS_SPACE_GENERIC &&
          DestTy->getAddressSpace() == AddressSpace::ADDRESS_SPACE_GENERIC);
}
Exemplo n.º 15
0
/// \brief Create a call to a Masked Load intrinsic.
/// \p Ptr      - base pointer for the load
/// \p Align    - alignment of the source location
/// \p Mask     - vector of booleans which indicates what vector lanes should
///               be accessed in memory
/// \p PassThru - pass-through value that is used to fill the masked-off lanes
///               of the result
/// \p Name     - name of the result variable
CallInst *IRBuilderBase::CreateMaskedLoad(Value *Ptr, unsigned Align,
                                          Value *Mask, Value *PassThru,
                                          const Twine &Name) {
  PointerType *PtrTy = cast<PointerType>(Ptr->getType());
  Type *DataTy = PtrTy->getElementType();
  assert(DataTy->isVectorTy() && "Ptr should point to a vector");
  if (!PassThru)
    PassThru = UndefValue::get(DataTy);
  Type *OverloadedTypes[] = { DataTy, PtrTy };
  Value *Ops[] = { Ptr, getInt32(Align), Mask,  PassThru};
  return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops,
                               OverloadedTypes, Name);
}
Exemplo n.º 16
0
bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {

  const FunctionType *FTy = F.getFunctionType();

  LocalMemAvailable = ST.getLocalMemorySize();


  // If the function has any arguments in the local address space, then it's
  // possible these arguments require the entire local memory space, so
  // we cannot use local memory in the pass.
  for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
    const Type *ParamTy = FTy->getParamType(i);
    if (ParamTy->isPointerTy() &&
        ParamTy->getPointerAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
      LocalMemAvailable = 0;
      DEBUG(dbgs() << "Function has local memory argument.  Promoting to "
                      "local memory disabled.\n");
      break;
    }
  }

  if (LocalMemAvailable > 0) {
    // Check how much local memory is being used by global objects
    for (Module::global_iterator I = Mod->global_begin(),
                                 E = Mod->global_end(); I != E; ++I) {
      GlobalVariable *GV = I;
      PointerType *GVTy = GV->getType();
      if (GVTy->getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
        continue;
      for (Value::use_iterator U = GV->use_begin(),
                               UE = GV->use_end(); U != UE; ++U) {
        Instruction *Use = dyn_cast<Instruction>(*U);
        if (!Use)
          continue;
        if (Use->getParent()->getParent() == &F)
          LocalMemAvailable -=
              Mod->getDataLayout()->getTypeAllocSize(GVTy->getElementType());
      }
    }
  }

  LocalMemAvailable = std::max(0, LocalMemAvailable);
  DEBUG(dbgs() << LocalMemAvailable << "bytes free in local memory.\n");

  visit(F);

  return false;
}
Exemplo n.º 17
0
static bool CanCheckValue(Value *V) {
  if (isa<Constant>(V))
    return false;

  Type *ValTy = V->getType();

  // Pointers are generally not valid to cross check, since they may vary due
  // to randomization. Floating point values might not compare exactly
  // indentical across variants, so ignore for now. Thus, we're left with ints
  if (!ValTy->isIntegerTy())
    return false;

  Value *PointerOperand = nullptr;
  if (auto *I = dyn_cast<LoadInst>(V))
    PointerOperand = I->getPointerOperand();
  else if (auto *I = dyn_cast<VAArgInst>(V))
    PointerOperand = I->getPointerOperand();
  else if (auto *I = dyn_cast<AtomicCmpXchgInst>(V))
    PointerOperand = I->getPointerOperand();

  if (PointerOperand) {
    auto LoadedValue = PointerOperand->stripPointerCasts();

    // Check if we are loading a pointer that has been cast as an int.
    PointerType *OrigType = dyn_cast<PointerType>(LoadedValue->getType());
    if (OrigType && OrigType->getElementType()->isPointerTy())
      return false;

    // Check that we aren't loading a NoCrossCheck global
    auto GV = dyn_cast<GlobalVariable>(LoadedValue);
    if (GV && GV->isNoCrossCheck())
      return false;
  }

  for (auto I = V->user_begin(), E = V->user_end(); I != E; I++ ) {
    auto CI = dyn_cast<CastInst>(*I);
    if (CI && !CI->isIntegerCast())
      return false;
  }

  if (HasTBAAPointerAccess(V))
    return false;

  return true;
}
Exemplo n.º 18
0
InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
    uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
    BasicBlock *NormalDest, BasicBlock *UnwindDest,
    ArrayRef<Value *> InvokeArgs, ArrayRef<Value *> DeoptArgs,
    ArrayRef<Value *> GCArgs, const Twine &Name) {
  // Extract out the type of the callee.
  PointerType *FuncPtrType = cast<PointerType>(ActualInvokee->getType());
  assert(isa<FunctionType>(FuncPtrType->getElementType()) &&
         "actual callee must be a callable value");

  Module *M = BB->getParent()->getParent();
  // Fill in the one generic type'd argument (the function is also vararg)
  Function *FnStatepoint = Intrinsic::getDeclaration(
      M, Intrinsic::experimental_gc_statepoint, {FuncPtrType});

  std::vector<llvm::Value *> Args = getStatepointArgs(
      *this, ID, NumPatchBytes, ActualInvokee, InvokeArgs, DeoptArgs, GCArgs);
  return createInvokeHelper(FnStatepoint, NormalDest, UnwindDest, Args, this,
                            Name);
}
void ForwardControlFlowIntegrity::updateIndirectCalls(Module &M,
                                                      CFITables &CFIT) {
  Type *Int64Ty = Type::getInt64Ty(M.getContext());
  for (Instruction *I : IndirectCalls) {
    CallSite CS(I);
    Value *CalledValue = CS.getCalledValue();

    // Get the function type for this call and look it up in the tables.
    Type *VTy = CalledValue->getType();
    PointerType *PTy = dyn_cast<PointerType>(VTy);
    Type *EltTy = PTy->getElementType();
    FunctionType *FunTy = dyn_cast<FunctionType>(EltTy);
    FunctionType *TransformedTy = JumpInstrTables::transformType(JTType, FunTy);
    ++NumCFIIndirectCalls;
    Constant *JumpTableStart = nullptr;
    Constant *JumpTableMask = nullptr;
    Constant *JumpTableSize = nullptr;

    // Some call sites have function types that don't correspond to any
    // address-taken function in the module. This happens when function pointers
    // are passed in from external code.
    auto it = CFIT.find(TransformedTy);
    if (it == CFIT.end()) {
      // In this case, make sure that the function pointer will change by
      // setting the mask and the start to be 0 so that the transformed
      // function is 0.
      JumpTableStart = ConstantInt::get(Int64Ty, 0);
      JumpTableMask = ConstantInt::get(Int64Ty, 0);
      JumpTableSize = ConstantInt::get(Int64Ty, 0);
    } else {
      JumpTableStart = it->second.StartValue;
      JumpTableMask = it->second.MaskValue;
      JumpTableSize = it->second.Size;
    }

    rewriteFunctionPointer(M, I, CalledValue, JumpTableStart, JumpTableMask,
                           JumpTableSize);
  }

  return;
}
Exemplo n.º 20
0
// =============================================================================
// If the function had a byval struct ptr arg, say foo(%struct.x* byval %d),
// then add the following instructions to the first basic block:
//
// %temp = alloca %struct.x, align 8
// %tempd = addrspacecast %struct.x* %d to %struct.x addrspace(101)*
// %tv = load %struct.x addrspace(101)* %tempd
// store %struct.x %tv, %struct.x* %temp, align 8
//
// The above code allocates some space in the stack and copies the incoming
// struct from param space to local space.
// Then replace all occurrences of %d by %temp.
// =============================================================================
void NVPTXLowerArgs::handleByValParam(Argument *Arg) {
  Function *Func = Arg->getParent();
  Instruction *FirstInst = &(Func->getEntryBlock().front());
  PointerType *PType = dyn_cast<PointerType>(Arg->getType());

  assert(PType && "Expecting pointer type in handleByValParam");

  Type *StructType = PType->getElementType();
  AllocaInst *AllocA = new AllocaInst(StructType, Arg->getName(), FirstInst);
  // Set the alignment to alignment of the byval parameter. This is because,
  // later load/stores assume that alignment, and we are going to replace
  // the use of the byval parameter with this alloca instruction.
  AllocA->setAlignment(Func->getParamAlignment(Arg->getArgNo() + 1));
  Arg->replaceAllUsesWith(AllocA);

  Value *ArgInParam = new AddrSpaceCastInst(
      Arg, PointerType::get(StructType, ADDRESS_SPACE_PARAM), Arg->getName(),
      FirstInst);
  LoadInst *LI = new LoadInst(ArgInParam, Arg->getName(), FirstInst);
  new StoreInst(LI, AllocA, FirstInst);
}
Exemplo n.º 21
0
static Value *getFieldAddress(IRBuilder<> &Builder, Value *Base,
                              uint32_t Offset, Type *FieldTy) {
  // The base value should be an i8* or i8[]*.
  assert(Base->getType()->isPointerTy());
  assert(Base->getType()->getPointerElementType()->isIntegerTy(8) ||
         Base->getType()
             ->getPointerElementType()
             ->getArrayElementType()
             ->isIntegerTy(8));

  Type *Int32Ty = Type::getInt32Ty(Builder.getContext());
  Value *Indices[] = {ConstantInt::get(Int32Ty, 0),
                      ConstantInt::get(Int32Ty, Offset)};
  Value *Address = Builder.CreateInBoundsGEP(Base, Indices);
  PointerType *AddressTy = cast<PointerType>(Address->getType());
  if (AddressTy->getElementType() != FieldTy) {
    AddressTy = PointerType::get(FieldTy, AddressTy->getAddressSpace());
    Address = Builder.CreatePointerCast(Address, AddressTy);
  }
  return Address;
}
Exemplo n.º 22
0
void NVPTXLowerStructArgs::handleParam(Argument *Arg) {
  Function *Func = Arg->getParent();
  Instruction *FirstInst = &(Func->getEntryBlock().front());
  PointerType *PType = dyn_cast<PointerType>(Arg->getType());

  assert(PType && "Expecting pointer type in handleParam");

  Type *StructType = PType->getElementType();
  AllocaInst *AllocA = new AllocaInst(StructType, Arg->getName(), FirstInst);

  /* Set the alignment to alignment of the byval parameter. This is because,
   * later load/stores assume that alignment, and we are going to replace
   * the use of the byval parameter with this alloca instruction.
   */
  AllocA->setAlignment(Func->getParamAlignment(Arg->getArgNo() + 1));

  Arg->replaceAllUsesWith(AllocA);

  // Get the cvt.gen.to.param intrinsic
  Type *CvtTypes[] = {
      Type::getInt8PtrTy(Func->getParent()->getContext(), ADDRESS_SPACE_PARAM),
      Type::getInt8PtrTy(Func->getParent()->getContext(),
                         ADDRESS_SPACE_GENERIC)};
  Function *CvtFunc = Intrinsic::getDeclaration(
      Func->getParent(), Intrinsic::nvvm_ptr_gen_to_param, CvtTypes);

  Value *BitcastArgs[] = {
      new BitCastInst(Arg, Type::getInt8PtrTy(Func->getParent()->getContext(),
                                              ADDRESS_SPACE_GENERIC),
                      Arg->getName(), FirstInst)};
  CallInst *CallCVT =
      CallInst::Create(CvtFunc, BitcastArgs, "cvt_to_param", FirstInst);

  BitCastInst *BitCast = new BitCastInst(
      CallCVT, PointerType::get(StructType, ADDRESS_SPACE_PARAM),
      Arg->getName(), FirstInst);
  LoadInst *LI = new LoadInst(BitCast, Arg->getName(), FirstInst);
  new StoreInst(LI, AllocA, FirstInst);
}
unsigned
VectorProcTargetLowering::getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const
{
	const Function *CalleeFn = 0;
	if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
		CalleeFn = dyn_cast<Function>(G->getGlobal());
	} else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
		const Function *Fn = DAG.getMachineFunction().getFunction();
		const Module *M = Fn->getParent();
		CalleeFn = M->getFunction(E->getSymbol());
	}

	if (!CalleeFn)
		return 0;

	assert(CalleeFn->hasStructRetAttr() &&
		 "Callee does not have the StructRet attribute.");

	PointerType *Ty = cast<PointerType>(CalleeFn->arg_begin()->getType());
	Type *ElementTy = Ty->getElementType();
	return getDataLayout()->getTypeAllocSize(ElementTy);
}
Exemplo n.º 24
0
static CallInst *CreateGCStatepointCallCommon(
    IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
    Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs,
    ArrayRef<T1> TransitionArgs, ArrayRef<T2> DeoptArgs, ArrayRef<T3> GCArgs,
    const Twine &Name) {
  // Extract out the type of the callee.
  PointerType *FuncPtrType = cast<PointerType>(ActualCallee->getType());
  assert(isa<FunctionType>(FuncPtrType->getElementType()) &&
         "actual callee must be a callable value");

  Module *M = Builder->GetInsertBlock()->getParent()->getParent();
  // Fill in the one generic type'd argument (the function is also vararg)
  Type *ArgTypes[] = { FuncPtrType };
  Function *FnStatepoint =
    Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_statepoint,
                              ArgTypes);

  std::vector<llvm::Value *> Args =
      getStatepointArgs(*Builder, ID, NumPatchBytes, ActualCallee, Flags,
                        CallArgs, TransitionArgs, DeoptArgs, GCArgs);
  return createCallHelper(FnStatepoint, Args, Builder, Name);
}
Exemplo n.º 25
0
bool FixFunctionBitcasts::runOnModule(Module &M) {
  SmallVector<std::pair<Use *, Function *>, 0> Uses;

  // Collect all the places that need wrappers.
  for (Function &F : M)
    FindUses(&F, F, Uses);

  DenseMap<std::pair<Function *, FunctionType *>, Function *> Wrappers;

  for (auto &UseFunc : Uses) {
    Use *U = UseFunc.first;
    Function *F = UseFunc.second;
    PointerType *PTy = cast<PointerType>(U->get()->getType());
    FunctionType *Ty = dyn_cast<FunctionType>(PTy->getElementType());

    // If the function is casted to something like i8* as a "generic pointer"
    // to be later casted to something else, we can't generate a wrapper for it.
    // Just ignore such casts for now.
    if (!Ty)
      continue;

    auto Pair = Wrappers.insert(std::make_pair(std::make_pair(F, Ty), nullptr));
    if (Pair.second)
      Pair.first->second = CreateWrapper(F, Ty);

    Function *Wrapper = Pair.first->second;
    if (!Wrapper)
      continue;

    if (isa<Constant>(U->get()))
      U->get()->replaceAllUsesWith(Wrapper);
    else
      U->set(Wrapper);
  }

  return true;
}
Exemplo n.º 26
0
/// isSafeToLoadUnconditionally - Return true if we know that executing a load
/// from this value cannot trap.  If it is not obviously safe to load from the
/// specified pointer, we do a quick local scan of the basic block containing
/// ScanFrom, to determine if the address is already accessed.
bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
                                       unsigned Align, const DataLayout *TD) {
  int64_t ByteOffset = 0;
  Value *Base = V;
  Base = GetPointerBaseWithConstantOffset(V, ByteOffset, TD);

  if (ByteOffset < 0) // out of bounds
    return false;

  Type *BaseType = 0;
  unsigned BaseAlign = 0;
  if (const AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
    // An alloca is safe to load from as load as it is suitably aligned.
    BaseType = AI->getAllocatedType();
    BaseAlign = AI->getAlignment();
  } else if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) {
    // Global variables are safe to load from but their size cannot be
    // guaranteed if they are overridden.
    if (!GV->mayBeOverridden()) {
      BaseType = GV->getType()->getElementType();
      BaseAlign = GV->getAlignment();
    }
  }

  if (BaseType && BaseType->isSized()) {
    if (TD && BaseAlign == 0)
      BaseAlign = TD->getPrefTypeAlignment(BaseType);

    if (Align <= BaseAlign) {
      if (!TD)
        return true; // Loading directly from an alloca or global is OK.

      // Check if the load is within the bounds of the underlying object.
      PointerType *AddrTy = cast<PointerType>(V->getType());
      uint64_t LoadSize = TD->getTypeStoreSize(AddrTy->getElementType());
      if (ByteOffset + LoadSize <= TD->getTypeAllocSize(BaseType) &&
          (Align == 0 || (ByteOffset % Align) == 0))
        return true;
    }
  }

  // Otherwise, be a little bit aggressive by scanning the local block where we
  // want to check to see if the pointer is already being loaded or stored
  // from/to.  If so, the previous load or store would have already trapped,
  // so there is no harm doing an extra load (also, CSE will later eliminate
  // the load entirely).
  BasicBlock::iterator BBI = ScanFrom, E = ScanFrom->getParent()->begin();

  while (BBI != E) {
    --BBI;

    // If we see a free or a call which may write to memory (i.e. which might do
    // a free) the pointer could be marked invalid.
    if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
        !isa<DbgInfoIntrinsic>(BBI))
      return false;

    if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
      if (AreEquivalentAddressValues(LI->getOperand(0), V)) return true;
    } else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
      if (AreEquivalentAddressValues(SI->getOperand(1), V)) return true;
    }
  }
  return false;
}
Exemplo n.º 27
0
GetElementPtrInst *
NaryReassociate::tryReassociateGEPAtIndex(GetElementPtrInst *GEP, unsigned I,
                                          Value *LHS, Value *RHS,
                                          Type *IndexedType) {
  // Look for GEP's closest dominator that has the same SCEV as GEP except that
  // the I-th index is replaced with LHS.
  SmallVector<const SCEV *, 4> IndexExprs;
  for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index)
    IndexExprs.push_back(SE->getSCEV(*Index));
  // Replace the I-th index with LHS.
  IndexExprs[I] = SE->getSCEV(LHS);
  const SCEV *CandidateExpr = SE->getGEPExpr(
      GEP->getSourceElementType(), SE->getSCEV(GEP->getPointerOperand()),
      IndexExprs, GEP->isInBounds());

  auto *Candidate = findClosestMatchingDominator(CandidateExpr, GEP);
  if (Candidate == nullptr)
    return nullptr;

  PointerType *TypeOfCandidate = dyn_cast<PointerType>(Candidate->getType());
  // Pretty rare but theoretically possible when a numeric value happens to
  // share CandidateExpr.
  if (TypeOfCandidate == nullptr)
    return nullptr;

  // NewGEP = (char *)Candidate + RHS * sizeof(IndexedType)
  uint64_t IndexedSize = DL->getTypeAllocSize(IndexedType);
  Type *ElementType = TypeOfCandidate->getElementType();
  uint64_t ElementSize = DL->getTypeAllocSize(ElementType);
  // Another less rare case: because I is not necessarily the last index of the
  // GEP, the size of the type at the I-th index (IndexedSize) is not
  // necessarily divisible by ElementSize. For example,
  //
  // #pragma pack(1)
  // struct S {
  //   int a[3];
  //   int64 b[8];
  // };
  // #pragma pack()
  //
  // sizeof(S) = 100 is indivisible by sizeof(int64) = 8.
  //
  // TODO: bail out on this case for now. We could emit uglygep.
  if (IndexedSize % ElementSize != 0)
    return nullptr;

  // NewGEP = &Candidate[RHS * (sizeof(IndexedType) / sizeof(Candidate[0])));
  IRBuilder<> Builder(GEP);
  Type *IntPtrTy = DL->getIntPtrType(TypeOfCandidate);
  if (RHS->getType() != IntPtrTy)
    RHS = Builder.CreateSExtOrTrunc(RHS, IntPtrTy);
  if (IndexedSize != ElementSize) {
    RHS = Builder.CreateMul(
        RHS, ConstantInt::get(IntPtrTy, IndexedSize / ElementSize));
  }
  GetElementPtrInst *NewGEP =
      cast<GetElementPtrInst>(Builder.CreateGEP(Candidate, RHS));
  NewGEP->setIsInBounds(GEP->isInBounds());
  NewGEP->takeName(GEP);
  return NewGEP;
}
Exemplo n.º 28
0
/// \brief Analyze a call site for potential inlining.
///
/// Returns true if inlining this call is viable, and false if it is not
/// viable. It computes the cost and adjusts the threshold based on numerous
/// factors and heuristics. If this method returns false but the computed cost
/// is below the computed threshold, then inlining was forcibly disabled by
/// some artifact of the routine.
bool CallAnalyzer::analyzeCall(CallSite CS) {
  ++NumCallsAnalyzed;

  // Track whether the post-inlining function would have more than one basic
  // block. A single basic block is often intended for inlining. Balloon the
  // threshold by 50% until we pass the single-BB phase.
  bool SingleBB = true;
  int SingleBBBonus = Threshold / 2;
  Threshold += SingleBBBonus;

  // Perform some tweaks to the cost and threshold based on the direct
  // callsite information.

  // We want to more aggressively inline vector-dense kernels, so up the
  // threshold, and we'll lower it if the % of vector instructions gets too
  // low.
  assert(NumInstructions == 0);
  assert(NumVectorInstructions == 0);
  FiftyPercentVectorBonus = Threshold;
  TenPercentVectorBonus = Threshold / 2;

  // Give out bonuses per argument, as the instructions setting them up will
  // be gone after inlining.
  for (unsigned I = 0, E = CS.arg_size(); I != E; ++I) {
    if (TD && CS.isByValArgument(I)) {
      // We approximate the number of loads and stores needed by dividing the
      // size of the byval type by the target's pointer size.
      PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType());
      unsigned TypeSize = TD->getTypeSizeInBits(PTy->getElementType());
      unsigned PointerSize = TD->getPointerSizeInBits();
      // Ceiling division.
      unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;

      // If it generates more than 8 stores it is likely to be expanded as an
      // inline memcpy so we take that as an upper bound. Otherwise we assume
      // one load and one store per word copied.
      // FIXME: The maxStoresPerMemcpy setting from the target should be used
      // here instead of a magic number of 8, but it's not available via
      // DataLayout.
      NumStores = std::min(NumStores, 8U);

      Cost -= 2 * NumStores * InlineConstants::InstrCost;
    } else {
      // For non-byval arguments subtract off one instruction per call
      // argument.
      Cost -= InlineConstants::InstrCost;
    }
  }

  // If there is only one call of the function, and it has internal linkage,
  // the cost of inlining it drops dramatically.
  bool OnlyOneCallAndLocalLinkage = F.hasLocalLinkage() && F.hasOneUse() &&
    &F == CS.getCalledFunction();
  if (OnlyOneCallAndLocalLinkage)
    Cost += InlineConstants::LastCallToStaticBonus;

  // If the instruction after the call, or if the normal destination of the
  // invoke is an unreachable instruction, the function is noreturn. As such,
  // there is little point in inlining this unless there is literally zero
  // cost.
  Instruction *Instr = CS.getInstruction();
  if (InvokeInst *II = dyn_cast<InvokeInst>(Instr)) {
    if (isa<UnreachableInst>(II->getNormalDest()->begin()))
      Threshold = 1;
  } else if (isa<UnreachableInst>(++BasicBlock::iterator(Instr)))
    Threshold = 1;

  // If this function uses the coldcc calling convention, prefer not to inline
  // it.
  if (F.getCallingConv() == CallingConv::Cold)
    Cost += InlineConstants::ColdccPenalty;

  // Check if we're done. This can happen due to bonuses and penalties.
  if (Cost > Threshold)
    return false;

  if (F.empty())
    return true;

  Function *Caller = CS.getInstruction()->getParent()->getParent();
  // Check if the caller function is recursive itself.
  for (Value::use_iterator U = Caller->use_begin(), E = Caller->use_end();
       U != E; ++U) {
    CallSite Site(cast<Value>(*U));
    if (!Site)
      continue;
    Instruction *I = Site.getInstruction();
    if (I->getParent()->getParent() == Caller) {
      IsCallerRecursive = true;
      break;
    }
  }

  // Track whether we've seen a return instruction. The first return
  // instruction is free, as at least one will usually disappear in inlining.
  bool HasReturn = false;

  // Populate our simplified values by mapping from function arguments to call
  // arguments with known important simplifications.
  CallSite::arg_iterator CAI = CS.arg_begin();
  for (Function::arg_iterator FAI = F.arg_begin(), FAE = F.arg_end();
       FAI != FAE; ++FAI, ++CAI) {
    assert(CAI != CS.arg_end());
    if (Constant *C = dyn_cast<Constant>(CAI))
      SimplifiedValues[FAI] = C;

    Value *PtrArg = *CAI;
    if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) {
      ConstantOffsetPtrs[FAI] = std::make_pair(PtrArg, C->getValue());

      // We can SROA any pointer arguments derived from alloca instructions.
      if (isa<AllocaInst>(PtrArg)) {
        SROAArgValues[FAI] = PtrArg;
        SROAArgCosts[PtrArg] = 0;
      }
    }
  }
  NumConstantArgs = SimplifiedValues.size();
  NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size();
  NumAllocaArgs = SROAArgValues.size();

  // The worklist of live basic blocks in the callee *after* inlining. We avoid
  // adding basic blocks of the callee which can be proven to be dead for this
  // particular call site in order to get more accurate cost estimates. This
  // requires a somewhat heavyweight iteration pattern: we need to walk the
  // basic blocks in a breadth-first order as we insert live successors. To
  // accomplish this, prioritizing for small iterations because we exit after
  // crossing our threshold, we use a small-size optimized SetVector.
  typedef SetVector<BasicBlock *, SmallVector<BasicBlock *, 16>,
                                  SmallPtrSet<BasicBlock *, 16> > BBSetVector;
  BBSetVector BBWorklist;
  BBWorklist.insert(&F.getEntryBlock());
  // Note that we *must not* cache the size, this loop grows the worklist.
  for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) {
    // Bail out the moment we cross the threshold. This means we'll under-count
    // the cost, but only when undercounting doesn't matter.
    if (Cost > (Threshold + VectorBonus))
      break;

    BasicBlock *BB = BBWorklist[Idx];
    if (BB->empty())
      continue;

    // Handle the terminator cost here where we can track returns and other
    // function-wide constructs.
    TerminatorInst *TI = BB->getTerminator();

    // We never want to inline functions that contain an indirectbr.  This is
    // incorrect because all the blockaddress's (in static global initializers
    // for example) would be referring to the original function, and this
    // indirect jump would jump from the inlined copy of the function into the 
    // original function which is extremely undefined behavior.
    // FIXME: This logic isn't really right; we can safely inline functions
    // with indirectbr's as long as no other function or global references the
    // blockaddress of a block within the current function.  And as a QOI issue,
    // if someone is using a blockaddress without an indirectbr, and that
    // reference somehow ends up in another function or global, we probably
    // don't want to inline this function.
    if (isa<IndirectBrInst>(TI))
      return false;

    if (!HasReturn && isa<ReturnInst>(TI))
      HasReturn = true;
    else
      Cost += InlineConstants::InstrCost;

    // Analyze the cost of this block. If we blow through the threshold, this
    // returns false, and we can bail on out.
    if (!analyzeBlock(BB)) {
      if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca)
        return false;

      // If the caller is a recursive function then we don't want to inline
      // functions which allocate a lot of stack space because it would increase
      // the caller stack usage dramatically.
      if (IsCallerRecursive &&
          AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller)
        return false;

      break;
    }

    // Add in the live successors by first checking whether we have terminator
    // that may be simplified based on the values simplified by this call.
    if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
      if (BI->isConditional()) {
        Value *Cond = BI->getCondition();
        if (ConstantInt *SimpleCond
              = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
          BBWorklist.insert(BI->getSuccessor(SimpleCond->isZero() ? 1 : 0));
          continue;
        }
      }
    } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
      Value *Cond = SI->getCondition();
      if (ConstantInt *SimpleCond
            = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
        BBWorklist.insert(SI->findCaseValue(SimpleCond).getCaseSuccessor());
        continue;
      }
    }

    // If we're unable to select a particular successor, just count all of
    // them.
    for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize;
         ++TIdx)
      BBWorklist.insert(TI->getSuccessor(TIdx));

    // If we had any successors at this point, than post-inlining is likely to
    // have them as well. Note that we assume any basic blocks which existed
    // due to branches or switches which folded above will also fold after
    // inlining.
    if (SingleBB && TI->getNumSuccessors() > 1) {
      // Take off the bonus we applied to the threshold.
      Threshold -= SingleBBBonus;
      SingleBB = false;
    }
  }

  // If this is a noduplicate call, we can still inline as long as 
  // inlining this would cause the removal of the caller (so the instruction
  // is not actually duplicated, just moved).
  if (!OnlyOneCallAndLocalLinkage && ContainsNoDuplicateCall)
    return false;

  Threshold += VectorBonus;

  return Cost < Threshold;
}
Exemplo n.º 29
0
/// \brief Check if executing a load of this pointer value cannot trap.
///
/// If DT is specified this method performs context-sensitive analysis.
///
/// If it is not obviously safe to load from the specified pointer, we do
/// a quick local scan of the basic block containing \c ScanFrom, to determine
/// if the address is already accessed.
///
/// This uses the pointee type to determine how many bytes need to be safe to
/// load from the pointer.
bool llvm::isSafeToLoadUnconditionally(Value *V, unsigned Align,
                                       Instruction *ScanFrom,
                                       const DominatorTree *DT,
                                       const TargetLibraryInfo *TLI) {
    const DataLayout &DL = ScanFrom->getModule()->getDataLayout();

    // Zero alignment means that the load has the ABI alignment for the target
    if (Align == 0)
        Align = DL.getABITypeAlignment(V->getType()->getPointerElementType());
    assert(isPowerOf2_32(Align));

    // If DT is not specified we can't make context-sensitive query
    const Instruction* CtxI = DT ? ScanFrom : nullptr;
    if (isDereferenceableAndAlignedPointer(V, Align, DL, CtxI, DT, TLI))
        return true;

    int64_t ByteOffset = 0;
    Value *Base = V;
    Base = GetPointerBaseWithConstantOffset(V, ByteOffset, DL);

    if (ByteOffset < 0) // out of bounds
        return false;

    Type *BaseType = nullptr;
    unsigned BaseAlign = 0;
    if (const AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
        // An alloca is safe to load from as load as it is suitably aligned.
        BaseType = AI->getAllocatedType();
        BaseAlign = AI->getAlignment();
    } else if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) {
        // Global variables are not necessarily safe to load from if they are
        // interposed arbitrarily. Their size may change or they may be weak and
        // require a test to determine if they were in fact provided.
        if (!GV->isInterposable()) {
            BaseType = GV->getType()->getElementType();
            BaseAlign = GV->getAlignment();
        }
    }

    PointerType *AddrTy = cast<PointerType>(V->getType());
    uint64_t LoadSize = DL.getTypeStoreSize(AddrTy->getElementType());

    // If we found a base allocated type from either an alloca or global variable,
    // try to see if we are definitively within the allocated region. We need to
    // know the size of the base type and the loaded type to do anything in this
    // case.
    if (BaseType && BaseType->isSized()) {
        if (BaseAlign == 0)
            BaseAlign = DL.getPrefTypeAlignment(BaseType);

        if (Align <= BaseAlign) {
            // Check if the load is within the bounds of the underlying object.
            if (ByteOffset + LoadSize <= DL.getTypeAllocSize(BaseType) &&
                    ((ByteOffset % Align) == 0))
                return true;
        }
    }

    // Otherwise, be a little bit aggressive by scanning the local block where we
    // want to check to see if the pointer is already being loaded or stored
    // from/to.  If so, the previous load or store would have already trapped,
    // so there is no harm doing an extra load (also, CSE will later eliminate
    // the load entirely).
    BasicBlock::iterator BBI = ScanFrom->getIterator(),
                         E = ScanFrom->getParent()->begin();

    // We can at least always strip pointer casts even though we can't use the
    // base here.
    V = V->stripPointerCasts();

    while (BBI != E) {
        --BBI;

        // If we see a free or a call which may write to memory (i.e. which might do
        // a free) the pointer could be marked invalid.
        if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
                !isa<DbgInfoIntrinsic>(BBI))
            return false;

        Value *AccessedPtr;
        unsigned AccessedAlign;
        if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
            AccessedPtr = LI->getPointerOperand();
            AccessedAlign = LI->getAlignment();
        } else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
            AccessedPtr = SI->getPointerOperand();
            AccessedAlign = SI->getAlignment();
        } else
            continue;

        Type *AccessedTy = AccessedPtr->getType()->getPointerElementType();
        if (AccessedAlign == 0)
            AccessedAlign = DL.getABITypeAlignment(AccessedTy);
        if (AccessedAlign < Align)
            continue;

        // Handle trivial cases.
        if (AccessedPtr == V)
            return true;

        if (AreEquivalentAddressValues(AccessedPtr->stripPointerCasts(), V) &&
                LoadSize <= DL.getTypeStoreSize(AccessedTy))
            return true;
    }
    return false;
}
Exemplo n.º 30
0
/// getMallocAllocatedType - Returns the Type allocated by malloc call.
/// The Type depends on the number of bitcast uses of the malloc call:
///   0: PointerType is the malloc calls' return type.
///   1: PointerType is the bitcast's result type.
///  >1: Unique PointerType cannot be determined, return NULL.
Type *llvm::getMallocAllocatedType(const CallInst *CI,
                                   const TargetLibraryInfo *TLI) {
  PointerType *PT = getMallocType(CI, TLI);
  return PT ? PT->getElementType() : 0;
}