Beispiel #1
0
Value *VectorBlockGenerator::generateStrideOneLoad(
    ScopStmt &Stmt, const LoadInst *Load, VectorValueMapT &ScalarMaps,
    bool NegativeStride = false) {
  unsigned VectorWidth = getVectorWidth();
  const Value *Pointer = Load->getPointerOperand();
  Type *VectorPtrType = getVectorPtrTy(Pointer, VectorWidth);
  unsigned Offset = NegativeStride ? VectorWidth - 1 : 0;

  Value *NewPointer = nullptr;
  NewPointer = generateLocationAccessed(Stmt, Load, Pointer, ScalarMaps[Offset],
                                        GlobalMaps[Offset], VLTS[Offset]);
  Value *VectorPtr =
      Builder.CreateBitCast(NewPointer, VectorPtrType, "vector_ptr");
  LoadInst *VecLoad =
      Builder.CreateLoad(VectorPtr, Load->getName() + "_p_vec_full");
  if (!Aligned)
    VecLoad->setAlignment(8);

  if (NegativeStride) {
    SmallVector<Constant *, 16> Indices;
    for (int i = VectorWidth - 1; i >= 0; i--)
      Indices.push_back(ConstantInt::get(Builder.getInt32Ty(), i));
    Constant *SV = llvm::ConstantVector::get(Indices);
    Value *RevVecLoad = Builder.CreateShuffleVector(
        VecLoad, VecLoad, SV, Load->getName() + "_reverse");
    return RevVecLoad;
  }

  return VecLoad;
}
Beispiel #2
0
// Instrumenting some of the accesses may be proven redundant.
// Currently handled:
//  - read-before-write (within same BB, no calls between)
//
// We do not handle some of the patterns that should not survive
// after the classic compiler optimizations.
// E.g. two reads from the same temp should be eliminated by CSE,
// two writes should be eliminated by DSE, etc.
//
// 'Local' is a vector of insns within the same BB (no calls between).
// 'All' is a vector of insns that will be instrumented.
void ThreadSanitizer::chooseInstructionsToInstrument(
    SmallVectorImpl<Instruction*> &Local,
    SmallVectorImpl<Instruction*> &All) {
  SmallSet<Value*, 8> WriteTargets;
  // Iterate from the end.
  for (SmallVectorImpl<Instruction*>::reverse_iterator It = Local.rbegin(),
       E = Local.rend(); It != E; ++It) {
    Instruction *I = *It;
    if (StoreInst *Store = dyn_cast<StoreInst>(I)) {
      WriteTargets.insert(Store->getPointerOperand());
    } else {
      LoadInst *Load = cast<LoadInst>(I);
      Value *Addr = Load->getPointerOperand();
      if (WriteTargets.count(Addr)) {
        // We will write to this temp, so no reason to analyze the read.
        NumOmittedReadsBeforeWrite++;
        continue;
      }
      if (addrPointsToConstantData(Addr)) {
        // Addr points to some constant data -- it can not race with any writes.
        continue;
      }
    }
    All.push_back(I);
  }
  Local.clear();
}
      static bool hasPrivateLoadStore(Loop *L) {
        const std::vector<Loop*> subLoops = L->getSubLoops();
        std::set<BasicBlock*> subBlocks, blocks;

        for(auto l : subLoops)
          for(auto bb : l->getBlocks())
            subBlocks.insert(bb);
        for(auto bb : L->getBlocks())
          if (subBlocks.find(bb) == subBlocks.end())
            blocks.insert(bb);
        for(auto bb : blocks) {
          for (BasicBlock::iterator inst = bb->begin(), instE = bb->end(); inst != instE; ++inst) {
            unsigned addrSpace = -1;
            if (isa<LoadInst>(*inst)) {
              LoadInst *ld = cast<LoadInst>(&*inst);
              addrSpace = ld->getPointerAddressSpace();
            }
            else if (isa<StoreInst>(*inst)) {
              StoreInst *st = cast<StoreInst>(&*inst);
              addrSpace = st->getPointerAddressSpace();
            }
            if (addrSpace == 0)
              return true;
          }
        }
        return false;
      }
void InstrumentMemoryAccesses::visitLoadInst(LoadInst &LI) {
  // Instrument a load instruction with a load check.
  Value *AccessSize = ConstantInt::get(SizeTy,
                                       TD->getTypeStoreSize(LI.getType()));
  instrument(LI.getPointerOperand(), AccessSize, LoadCheckFunction, LI);
  ++LoadsInstrumented;
}
void AArch64PromoteConstant::insertDefinitions(Function &F,
                                               GlobalVariable &PromotedGV,
                                               InsertionPoints &InsertPts) {
#ifndef NDEBUG
  // Do more checking for debug purposes.
  DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>(F).getDomTree();
#endif
  assert(!InsertPts.empty() && "Empty uses does not need a definition");

  for (const auto &IPI : InsertPts) {
    // Create the load of the global variable.
    IRBuilder<> Builder(IPI.first);
    LoadInst *LoadedCst = Builder.CreateLoad(&PromotedGV);
    DEBUG(dbgs() << "**********\n");
    DEBUG(dbgs() << "New def: ");
    DEBUG(LoadedCst->print(dbgs()));
    DEBUG(dbgs() << '\n');

    // Update the dominated uses.
    for (auto Use : IPI.second) {
#ifndef NDEBUG
      assert(DT.dominates(LoadedCst,
                          findInsertionPoint(*Use.first, Use.second)) &&
             "Inserted definition does not dominate all its uses!");
#endif
      DEBUG({
            dbgs() << "Use to update " << Use.second << ":";
            Use.first->print(dbgs());
            dbgs() << '\n';
            });
      Use.first->setOperand(Use.second, LoadedCst);
      ++NumPromotedUses;
    }
  }
Beispiel #6
0
void DSGraphStats::visitLoad(LoadInst &LI) {
  if (isNodeForValueUntyped(LI.getOperand(0), 0,LI.getParent()->getParent())) {
    NumUntypedMemAccesses++;
  } else {
    NumTypedMemAccesses++;
  }
}
Beispiel #7
0
void Interpreter::visitLoadInst(LoadInst &I) {
  ExecutionContext &SF = ECStack.back();
  GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
  GenericValue *Ptr = (GenericValue*)GVTOP(SRC);
  GenericValue Result = LoadValueFromMemory(Ptr, I.getType());
  SetValue(&I, Result, SF);
}
Beispiel #8
0
void WorklessInstrument::CreateIfElseBlock(Loop * pLoop, vector<BasicBlock *> & vecAdded)
{
	BasicBlock * pPreHeader = pLoop->getLoopPreheader();
	BasicBlock * pHeader = pLoop->getHeader();
	Function * pInnerFunction = pPreHeader->getParent();
	Module * pModule = pPreHeader->getParent()->getParent();

	BasicBlock * pElseBody = NULL;
	TerminatorInst * pTerminator = NULL;

	BranchInst * pBranch = NULL;
	LoadInst * pLoad1 = NULL;
	LoadInst * pLoad2 = NULL;
	LoadInst * pLoadnumGlobalCounter = NULL;
	BinaryOperator * pAddOne = NULL;
	StoreInst * pStoreNew = NULL;
	CmpInst * pCmp = NULL;
	CallInst * pCall = NULL;
	StoreInst * pStore = NULL;
	AttributeSet emptySet;

	pTerminator = pPreHeader->getTerminator();
	pLoadnumGlobalCounter = new LoadInst(this->numGlobalCounter, "", false, pTerminator);
	pLoadnumGlobalCounter->setAlignment(8);
	pAddOne = BinaryOperator::Create(Instruction::Add, pLoadnumGlobalCounter, this->ConstantLong1, "add", pTerminator);
	pStoreNew = new StoreInst(pAddOne, this->numGlobalCounter, false, pTerminator);
	pStoreNew->setAlignment(8);

	pElseBody = BasicBlock::Create(pModule->getContext(), ".else.body.CPI", pInnerFunction, 0);

	pLoad2 = new LoadInst(this->CURRENT_SAMPLE, "", false, pTerminator);
	pLoad2->setAlignment(8);
	pCmp = new ICmpInst(pTerminator, ICmpInst::ICMP_SLT, pAddOne, pLoad2, "");
	pBranch = BranchInst::Create(pHeader, pElseBody, pCmp );
	ReplaceInstWithInst(pTerminator, pBranch);

	pLoad1 = new LoadInst(this->SAMPLE_RATE, "", false, pElseBody);
	pCall = CallInst::Create(this->geo, pLoad1, "", pElseBody);
  	pCall->setCallingConv(CallingConv::C);
  	pCall->setTailCall(false);
  	pCall->setAttributes(emptySet);

  	CastInst * pCast = CastInst::CreateIntegerCast(pCall, this->LongType, true, "", pElseBody);
  	//pBinary = BinaryOperator::Create(Instruction::Add, pLoad2, pCast, "add", pIfBody);
  	pStore = new StoreInst(pCast, this->CURRENT_SAMPLE, false, pElseBody);
  	pStore->setAlignment(8);

  	pStore = new StoreInst(this->ConstantLong0, this->numGlobalCounter, false, pElseBody);
  	pStore->setAlignment(8);

  	pLoad1 = new LoadInst(this->numInstances, "", false, pElseBody);
  	pLoad1->setAlignment(8);
  	pAddOne = BinaryOperator::Create(Instruction::Add, pLoad1, this->ConstantLong1, "add", pElseBody);
	pStore = new StoreInst(pAddOne, this->numInstances, false, pElseBody);
	pStore->setAlignment(8);

	vecAdded.push_back(pPreHeader);
	vecAdded.push_back(pElseBody);
}
Beispiel #9
0
extern "C" LLVMValueRef
LLVMRustBuildAtomicLoad(LLVMBuilderRef B, LLVMValueRef Source, const char *Name,
                        LLVMAtomicOrdering Order, unsigned Alignment) {
  LoadInst *LI = new LoadInst(unwrap(Source), 0);
  LI->setAtomic(fromRust(Order));
  LI->setAlignment(Alignment);
  return wrap(unwrap(B)->Insert(LI, Name));
}
void PropagateJuliaAddrspaces::visitLoadInst(LoadInst &LI) {
    unsigned AS = LI.getPointerAddressSpace();
    if (!isSpecialAS(AS))
        return;
    Value *Replacement = LiftPointer(LI.getPointerOperand(), LI.getType(), &LI);
    if (!Replacement)
        return;
    LI.setOperand(LoadInst::getPointerOperandIndex(), Replacement);
}
/// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible.
static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
                                        const DataLayout *DL) {
  User *CI = cast<User>(LI.getOperand(0));
  Value *CastOp = CI->getOperand(0);

  PointerType *DestTy = cast<PointerType>(CI->getType());
  Type *DestPTy = DestTy->getElementType();
  if (PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType())) {

    // If the address spaces don't match, don't eliminate the cast.
    if (DestTy->getAddressSpace() != SrcTy->getAddressSpace())
      return 0;

    Type *SrcPTy = SrcTy->getElementType();

    if (DestPTy->isIntegerTy() || DestPTy->isPointerTy() ||
         DestPTy->isVectorTy()) {
      // If the source is an array, the code below will not succeed.  Check to
      // see if a trivial 'gep P, 0, 0' will help matters.  Only do this for
      // constants.
      if (ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))
        if (Constant *CSrc = dyn_cast<Constant>(CastOp))
          if (ASrcTy->getNumElements() != 0) {
            Type *IdxTy = DL
                        ? DL->getIntPtrType(SrcTy)
                        : Type::getInt64Ty(SrcTy->getContext());
            Value *Idx = Constant::getNullValue(IdxTy);
            Value *Idxs[2] = { Idx, Idx };
            CastOp = ConstantExpr::getGetElementPtr(CSrc, Idxs);
            SrcTy = cast<PointerType>(CastOp->getType());
            SrcPTy = SrcTy->getElementType();
          }

      if (IC.getDataLayout() &&
          (SrcPTy->isIntegerTy() || SrcPTy->isPointerTy() ||
            SrcPTy->isVectorTy()) &&
          // Do not allow turning this into a load of an integer, which is then
          // casted to a pointer, this pessimizes pointer analysis a lot.
          (SrcPTy->isPtrOrPtrVectorTy() ==
           LI.getType()->isPtrOrPtrVectorTy()) &&
          IC.getDataLayout()->getTypeSizeInBits(SrcPTy) ==
               IC.getDataLayout()->getTypeSizeInBits(DestPTy)) {

        // Okay, we are casting from one integer or pointer type to another of
        // the same size.  Instead of casting the pointer before the load, cast
        // the result of the loaded value.
        LoadInst *NewLoad =
          IC.Builder->CreateLoad(CastOp, LI.isVolatile(), CI->getName());
        NewLoad->setAlignment(LI.getAlignment());
        NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope());
        // Now cast the result of the load.
        return new BitCastInst(NewLoad, LI.getType());
      }
    }
  }
  return 0;
}
bool AMDGPUCodeGenPrepare::canWidenScalarExtLoad(LoadInst &I) const {
  Type *Ty = I.getType();
  const DataLayout &DL = Mod->getDataLayout();
  int TySize = DL.getTypeSizeInBits(Ty);
  unsigned Align = I.getAlignment() ?
                   I.getAlignment() : DL.getABITypeAlignment(Ty);

  return I.isSimple() && TySize < 32 && Align >= 4 && DA->isUniform(&I);
}
Beispiel #13
0
/// Move cond_fail down if it can potentially help register promotion later.
static bool sinkCondFail(SILLoop *Loop) {
  // Only handle innermost loops for now.
  if (!Loop->getSubLoops().empty())
    return false;

  bool Changed = false;
  for (auto *BB : Loop->getBlocks()) {
    // A list of CondFails that can be moved down.
    SmallVector<CondFailInst*, 4> CFs;
    // A pair of load and store that are independent of the CondFails and
    // can potentially access the same memory.
    LoadInst *LIOfPair = nullptr;
    bool foundPair = false;

    for (auto &Inst : *BB) {
      if (foundPair) {
        // Move CFs to right before Inst.
        for (unsigned I = 0, E = CFs.size(); I < E; I++) {
          DEBUG(llvm::dbgs() << "sinking cond_fail down ");
          DEBUG(CFs[I]->dump());
          DEBUG(llvm::dbgs() << "  before ");
          DEBUG(Inst.dump());
          CFs[I]->moveBefore(&Inst);
        }
        Changed = true;

        foundPair = false;
        LIOfPair = nullptr;
      }

      if (auto CF = dyn_cast<CondFailInst>(&Inst)) {
        CFs.push_back(CF);
      } else if (auto LI = dyn_cast<LoadInst>(&Inst)) {
        if (addressIndependent(LI->getOperand())) {
          LIOfPair = LI;
        } else {
          CFs.clear();
          LIOfPair = nullptr;
        }
      } else if (auto SI = dyn_cast<StoreInst>(&Inst)) {
        if (addressIndependent(SI->getDest())) {
          if (LIOfPair &&
              addressCanPairUp(SI->getDest(), LIOfPair->getOperand()))
            foundPair = true;
        } else {
          CFs.clear();
          LIOfPair = nullptr;
        }
      } else if (Inst.mayHaveSideEffects()) {
        CFs.clear();
        LIOfPair = nullptr;
      }
    }
  }
  return Changed;
}
Beispiel #14
0
llvm::Value *StorageSoa::alignedArrayLoad(llvm::Value *val)
{
   VectorType  *vectorType = VectorType::get(Type::FloatTy, 4);
   PointerType *vectorPtr  = PointerType::get(vectorType, 0);

   CastInst *cast = new BitCastInst(val, vectorPtr, name("toVector"), m_block);
   LoadInst *load = new LoadInst(cast, name("alignLoad"), false, m_block);
   load->setAlignment(8);
   return load;
}
Beispiel #15
0
extern "C" LLVMValueRef LLVMBuildAtomicLoad(LLVMBuilderRef B,
                                            LLVMValueRef source,
                                            const char* Name,
                                            AtomicOrdering order,
                                            unsigned alignment) {
    LoadInst* li = new LoadInst(unwrap(source),0);
    li->setAtomic(order);
    li->setAlignment(alignment);
    return wrap(unwrap(B)->Insert(li, Name));
}
Beispiel #16
0
extern "C" LLVMValueRef LLVMBuildAtomicLoad(LLVMBuilderRef B,
                                            LLVMValueRef source,
                                            const char* Name,
                                            AtomicOrdering order) {
    LoadInst* li = new LoadInst(unwrap(source),0);
    li->setVolatile(true);
    li->setAtomic(order);
    li->setAlignment(sizeof(intptr_t));
    return wrap(unwrap(B)->Insert(li, Name));
}
bool AArch64PromoteConstant::insertDefinitions(
    Constant *Cst, InsertionPointsPerFunc &InsPtsPerFunc) {
  // We will create one global variable per Module.
  DenseMap<Module *, GlobalVariable *> ModuleToMergedGV;
  bool HasChanged = false;

  // Traverse all insertion points in all the function.
  for (const auto &FctToInstPtsIt : InsPtsPerFunc) {
    const InsertionPoints &InsertPts = FctToInstPtsIt.second;
// Do more checking for debug purposes.
#ifndef NDEBUG
    DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>(
                            *FctToInstPtsIt.first).getDomTree();
#endif
    assert(!InsertPts.empty() && "Empty uses does not need a definition");

    Module *M = FctToInstPtsIt.first->getParent();
    GlobalVariable *&PromotedGV = ModuleToMergedGV[M];
    if (!PromotedGV) {
      PromotedGV = new GlobalVariable(
          *M, Cst->getType(), true, GlobalValue::InternalLinkage, nullptr,
          "_PromotedConst", nullptr, GlobalVariable::NotThreadLocal);
      PromotedGV->setInitializer(Cst);
      DEBUG(dbgs() << "Global replacement: ");
      DEBUG(PromotedGV->print(dbgs()));
      DEBUG(dbgs() << '\n');
      ++NumPromoted;
      HasChanged = true;
    }

    for (const auto &IPI : InsertPts) {
      // Create the load of the global variable.
      IRBuilder<> Builder(IPI.first->getParent(), IPI.first);
      LoadInst *LoadedCst = Builder.CreateLoad(PromotedGV);
      DEBUG(dbgs() << "**********\n");
      DEBUG(dbgs() << "New def: ");
      DEBUG(LoadedCst->print(dbgs()));
      DEBUG(dbgs() << '\n');

      // Update the dominated uses.
      for (Use *Use : IPI.second) {
#ifndef NDEBUG
        assert(DT.dominates(LoadedCst, findInsertionPoint(*Use)) &&
               "Inserted definition does not dominate all its uses!");
#endif
        DEBUG(dbgs() << "Use to update " << Use->getOperandNo() << ":");
        DEBUG(Use->getUser()->print(dbgs()));
        DEBUG(dbgs() << '\n');
        Use->set(LoadedCst);
        ++NumPromotedUses;
      }
    }
  }
  return HasChanged;
}
/// RewriteSingleStoreAlloca - If there is only a single store to this value,
/// replace any loads of it that are directly dominated by the definition with
/// the value stored.
void PromoteMem2Reg::RewriteSingleStoreAlloca(AllocaInst *AI,
                                              AllocaInfo &Info,
                                              LargeBlockInfo &LBI) {
  StoreInst *OnlyStore = Info.OnlyStore;
  bool StoringGlobalVal = !isa<Instruction>(OnlyStore->getOperand(0));
  BasicBlock *StoreBB = OnlyStore->getParent();
  int StoreIndex = -1;

  // Clear out UsingBlocks.  We will reconstruct it here if needed.
  Info.UsingBlocks.clear();
  
  for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); UI != E; ) {
    Instruction *UserInst = cast<Instruction>(*UI++);
    if (!isa<LoadInst>(UserInst)) {
      assert(UserInst == OnlyStore && "Should only have load/stores");
      continue;
    }
    LoadInst *LI = cast<LoadInst>(UserInst);
    
    // Okay, if we have a load from the alloca, we want to replace it with the
    // only value stored to the alloca.  We can do this if the value is
    // dominated by the store.  If not, we use the rest of the mem2reg machinery
    // to insert the phi nodes as needed.
    if (!StoringGlobalVal) {  // Non-instructions are always dominated.
      if (LI->getParent() == StoreBB) {
        // If we have a use that is in the same block as the store, compare the
        // indices of the two instructions to see which one came first.  If the
        // load came before the store, we can't handle it.
        if (StoreIndex == -1)
          StoreIndex = LBI.getInstructionIndex(OnlyStore);

        if (unsigned(StoreIndex) > LBI.getInstructionIndex(LI)) {
          // Can't handle this load, bail out.
          Info.UsingBlocks.push_back(StoreBB);
          continue;
        }
        
      } else if (LI->getParent() != StoreBB &&
                 !dominates(StoreBB, LI->getParent())) {
        // If the load and store are in different blocks, use BB dominance to
        // check their relationships.  If the store doesn't dom the use, bail
        // out.
        Info.UsingBlocks.push_back(LI->getParent());
        continue;
      }
    }
    
    // Otherwise, we *can* safely rewrite this load.
    Value *ReplVal = OnlyStore->getOperand(0);
    // If the replacement value is the load, this must occur in unreachable
    // code.
    if (ReplVal == LI)
      ReplVal = UndefValue::get(LI->getType());
    LI->replaceAllUsesWith(ReplVal);
    if (AST && LI->getType()->isPointerTy())
      AST->deleteValue(LI);
    LI->eraseFromParent();
    LBI.deleteValue(LI);
  }
}
Beispiel #19
0
LLVMValueRef 
mono_llvm_build_aligned_load (LLVMBuilderRef builder, LLVMValueRef PointerVal,
							  const char *Name, gboolean is_volatile, int alignment)
{
	LoadInst *ins;

	ins = unwrap(builder)->CreateLoad(unwrap(PointerVal), is_volatile, Name);
	ins->setAlignment (alignment);

	return wrap(ins);
}
Beispiel #20
0
	LoadInst* getLoopViLoad(Loop *L)
	{
    	AllocaInst* viAlloc = getLoopVi(L);
    	//Instruction* firstHeaderInstr = L->getHeader()->begin();
    	Instruction* firstHeaderInstr = L->getHeader()->getFirstNonPHI();

    	//If such load exists, return it and don't create a new one.
    	LoadInst* firstHeaderInstrLoad = dyn_cast<LoadInst>(firstHeaderInstr);
    	if(firstHeaderInstrLoad && firstHeaderInstrLoad->getPointerOperand() == viAlloc)
        	return firstHeaderInstrLoad;
    	return new LoadInst(viAlloc, viAlloc->getName() + ".load", firstHeaderInstr);
	}
///   %res = load {atomic|volatile} T* %ptr memory_order, align sizeof(T)
/// becomes:
///   %res = call T @llvm.nacl.atomic.load.i<size>(%ptr, memory_order)
void AtomicVisitor::visitLoadInst(LoadInst &I) {
  return; // XXX EMSCRIPTEN
  if (I.isSimple())
    return;
  PointerHelper<LoadInst> PH(*this, I);
  const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic =
      findAtomicIntrinsic(I, Intrinsic::nacl_atomic_load, PH.PET);
  checkAlignment(I, I.getAlignment(), PH.BitSize / CHAR_BIT);
  Value *Args[] = {PH.P, freezeMemoryOrder(I, I.getOrdering())};
  replaceInstructionWithIntrinsicCall(I, Intrinsic, PH.OriginalPET, PH.PET,
                                      Args);
}
Beispiel #22
0
void smtit::performTest1() {

  for (Module::iterator FI = Mod->begin(), FE = Mod->end(); FI != FE; ++FI) {
    Function *Func = &*FI;
    // DEBUG(errs() << *Func << "\n");
    for (Function::iterator BI = Func->begin(), BE = Func->end(); BI != BE;
         ++BI) {
      BasicBlock *BB = &*BI;
      for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
        Instruction *BBI = &*I;
        //if (true == isa<StoreInst>(BBI)) {
        if (true == isa<LoadInst>(BBI)) {
          LoadInst *li  = dyn_cast<LoadInst>(BBI);
          Value *ptrOp = li->getPointerOperand();
          DEBUG(errs() << *li << "\t Result Name: " << li->getName() << "\t Pointer Name: " << ptrOp->getName() << "\n");

          // DEBUG(errs() << "\tStore Instruction: " << *BBI << " \n");
          // DEBUG(errs() << "\t\tPointerType: " << isLLVMPAPtrTy(SI->getType())
          // << " \n");
          // Instruction* V = cast<Instruction>(SI->getOperand(1));
          // DEBUG(errs() << "\tOperand : " << *V << " \n");
          // DEBUG(errs() << "\t\tPointerType: " << isLLVMPAPtrTy(V->getType())
          // << " \n");
        } else if(true == isa<GetElementPtrInst>(BBI)) {
          GetElementPtrInst *gep  = dyn_cast<GetElementPtrInst>(BBI);
          DEBUG(errs() << *gep << "\t Result Name: " << gep->getName() << "\n");
          // DEBUG(errs() << "\tInstruction: " << *BBI << " \n");
          // DEBUG(errs() << "\t\tPointerType: " <<
          // isLLVMPAPtrTy(BBI->getType()) << " \n");
        }

        // For def-use chains: All the uses of the definition
        //DEBUG(errs() << *BBI << "\n");
        /*
        for (User *U : BBI->users()) {
          if (Instruction *Inst = dyn_cast<Instruction>(U)) {
            DEBUG(errs()<< " " <<  *Inst << "\n");
          }
        }

        for (Value::user_iterator i = BBI->user_begin(), e = BBI->user_end();
              i != e; ++i) {
          if (Instruction *user_inst = dyn_cast<Instruction>(*i)) {
            DEBUG(errs()<< " " << *user_inst << "\n");
          }
        }
        */
      }
    }
  }
}
Beispiel #23
0
bool CallAnalyzer::visitLoad(LoadInst &I) {
  Value *SROAArg;
  DenseMap<Value *, int>::iterator CostIt;
  if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
    if (I.isSimple()) {
      accumulateSROACost(CostIt, InlineConstants::InstrCost);
      return true;
    }

    disableSROA(CostIt);
  }

  return false;
}
Beispiel #24
0
LLVMValueRef LLVM_General_BuildLoad(
	LLVMBuilderRef b,
	LLVMValueRef p,
	unsigned align,
	LLVMBool isVolatile,
	LLVMAtomicOrdering atomicOrdering,
	LLVMSynchronizationScope synchScope,
	const char *name
) {
	LoadInst *i = unwrap(b)->CreateAlignedLoad(unwrap(p), align, isVolatile, name);
	i->setOrdering(unwrap(atomicOrdering));
	if (atomicOrdering != LLVMAtomicOrderingNotAtomic) i->setSynchScope(unwrap(synchScope));
	return wrap(i);
}
Beispiel #25
0
bool CodeExtractor::isLegalToShrinkwrapLifetimeMarkers(
    Instruction *Addr) const {
  AllocaInst *AI = cast<AllocaInst>(Addr->stripInBoundsConstantOffsets());
  Function *Func = (*Blocks.begin())->getParent();
  for (BasicBlock &BB : *Func) {
    if (Blocks.count(&BB))
      continue;
    for (Instruction &II : BB) {

      if (isa<DbgInfoIntrinsic>(II))
        continue;

      unsigned Opcode = II.getOpcode();
      Value *MemAddr = nullptr;
      switch (Opcode) {
      case Instruction::Store:
      case Instruction::Load: {
        if (Opcode == Instruction::Store) {
          StoreInst *SI = cast<StoreInst>(&II);
          MemAddr = SI->getPointerOperand();
        } else {
          LoadInst *LI = cast<LoadInst>(&II);
          MemAddr = LI->getPointerOperand();
        }
        // Global variable can not be aliased with locals.
        if (dyn_cast<Constant>(MemAddr))
          break;
        Value *Base = MemAddr->stripInBoundsConstantOffsets();
        if (!dyn_cast<AllocaInst>(Base) || Base == AI)
          return false;
        break;
      }
      default: {
        IntrinsicInst *IntrInst = dyn_cast<IntrinsicInst>(&II);
        if (IntrInst) {
          if (IntrInst->getIntrinsicID() == Intrinsic::lifetime_start ||
              IntrInst->getIntrinsicID() == Intrinsic::lifetime_end)
            break;
          return false;
        }
        // Treat all the other cases conservatively if it has side effects.
        if (II.mayHaveSideEffects())
          return false;
      }
      }
    }
  }

  return true;
}
Beispiel #26
0
void LLSTDebuggingPass::insertLoadInstCheck(Function& F)
{
    Value* BrokenPointerMessage = m_builder->CreateGlobalStringPtr("\npointer is broken\n");

    InstructionVector Loads;
    for (Function::iterator BB = F.begin(); BB != F.end(); ++BB)
    {
        for(BasicBlock::iterator II = BB->begin(); II != BB->end(); ++II)
        {
            if (LoadInst* Load = dyn_cast<LoadInst>(II)) {
                Loads.push_back(Load);
            }
        }
    }

    for(std::size_t i = 0; i < Loads.size(); i++)
    {
        LoadInst* Load = dyn_cast<LoadInst>(Loads[i]);
        if (belongsToSmalltalkType( Load->getType() )) {

            //split BB right after load inst. The new BB contains code that will be executed if pointer is OK
            BasicBlock* PointerIsOkBB = Load->getParent()->splitBasicBlock(++( static_cast<BasicBlock::iterator>(Load) ));
            BasicBlock* PointerIsBrokenBB = BasicBlock::Create(m_module->getContext(), "", &F, PointerIsOkBB);
            BasicBlock* PointerIsNotSmallIntBB = BasicBlock::Create(m_module->getContext(), "", &F, PointerIsBrokenBB);

            Instruction* branchToPointerIsOkBB = ++( static_cast<BasicBlock::iterator>(Load) );
            //branchToPointerIsOkBB is created by splitBasicBlock() just after load inst
            //We force builder to insert instructions before branchToPointerIsOkBB
            m_builder->SetInsertPoint(branchToPointerIsOkBB);

            //If pointer to class is null, jump to PointerIsBroken, otherwise to PointerIsOkBB
            Value* objectPtr = m_builder->CreateBitCast( Load, m_baseTypes.object->getPointerTo());

            Value* isSmallInt = m_builder->CreateCall(isSmallInteger, objectPtr);
            m_builder->CreateCondBr(isSmallInt, PointerIsOkBB, PointerIsNotSmallIntBB);

            m_builder->SetInsertPoint(PointerIsNotSmallIntBB);
            Value* klassPtr = m_builder->CreateCall(getObjectClass, objectPtr);
            Value* pointerIsNull = m_builder->CreateICmpEQ(klassPtr, ConstantPointerNull::get(m_baseTypes.klass->getPointerTo()) );
            m_builder->CreateCondBr(pointerIsNull, PointerIsBrokenBB, PointerIsOkBB);

            branchToPointerIsOkBB->eraseFromParent(); //We don't need it anymore

            m_builder->SetInsertPoint(PointerIsBrokenBB);
            m_builder->CreateCall(_printf, BrokenPointerMessage);
            m_builder->CreateBr(PointerIsOkBB);
        }
    }
}
Beispiel #27
-1
void Closure::unpack_struct(Scope<Value *> &dst,
                            llvm::Type *
#if LLVM_VERSION >= 37
                            type
#endif
                            ,
                            Value *src,
                            IRBuilder<> *builder) {
    // type, type of src should be a pointer to a struct of the type returned by build_type
    int idx = 0;
    LLVMContext &context = builder->getContext();
    vector<string> nm = names();
    for (size_t i = 0; i < nm.size(); i++) {
#if LLVM_VERSION >= 37
        Value *ptr = builder->CreateConstInBoundsGEP2_32(type, src, 0, idx++);
#else
        Value *ptr = builder->CreateConstInBoundsGEP2_32(src, 0, idx++);
#endif
        LoadInst *load = builder->CreateLoad(ptr);
        if (load->getType()->isPointerTy()) {
            // Give it a unique type so that tbaa tells llvm that this can't alias anything
            LLVMMDNodeArgumentType md_args[] = {MDString::get(context, nm[i])};
            load->setMetadata("tbaa", MDNode::get(context, md_args));
        }
        dst.push(nm[i], load);
        load->setName(nm[i]);
    }
}
Beispiel #28
-1
//
// Method: runOnModule()
//
// Description:
//  Entry point for this LLVM pass. Search for insert/extractvalue instructions
//  that can be simplified.
//
// Inputs:
//  M - A reference to the LLVM module to transform.
//
// Outputs:
//  M - The transformed LLVM module.
//
// Return value:
// true  - The module was modified.
// false - The module was not modified.
//
bool SimplifyLoad::runOnModule(Module& M) {
  // Repeat till no change
  bool changed;
  do {
    changed = false;
    for (Module::iterator F = M.begin(); F != M.end(); ++F) {
      for (Function::iterator B = F->begin(), FE = F->end(); B != FE; ++B) {      
        for (BasicBlock::iterator I = B->begin(), BE = B->end(); I != BE;) {
          LoadInst *LI = dyn_cast<LoadInst>(I++);
          if(!LI)
            continue;
          if(LI->hasOneUse()) {
            if(CastInst *CI = dyn_cast<CastInst>(*(LI->use_begin()))) {
              if(LI->getType()->isPointerTy()) {
                if(ConstantExpr *CE = dyn_cast<ConstantExpr>(LI->getOperand(0))) {
                  if(const PointerType *PTy = dyn_cast<PointerType>(CE->getOperand(0)->getType()))
                    if(PTy->getElementType() == CI->getType()) {
                      LoadInst *LINew = new LoadInst(CE->getOperand(0), "", LI);
                      CI->replaceAllUsesWith(LINew);
                    }
                }
              }
            }
          }


        }
      }
    }
  } while(changed);
  return (numErased > 0);
}
Beispiel #29
-1
/// MoveExtToFormExtLoad - Move a zext or sext fed by a load into the same
/// basic block as the load, unless conditions are unfavorable. This allows
/// SelectionDAG to fold the extend into the load.
///
bool CodeGenPrepare::MoveExtToFormExtLoad(Instruction *I) {
  // Look for a load being extended.
  LoadInst *LI = dyn_cast<LoadInst>(I->getOperand(0));
  if (!LI) return false;

  // If they're already in the same block, there's nothing to do.
  if (LI->getParent() == I->getParent())
    return false;

  // If the load has other users and the truncate is not free, this probably
  // isn't worthwhile.
  if (!LI->hasOneUse() &&
      TLI && !TLI->isTruncateFree(I->getType(), LI->getType()))
    return false;

  // Check whether the target supports casts folded into loads.
  unsigned LType;
  if (isa<ZExtInst>(I))
    LType = ISD::ZEXTLOAD;
  else {
    assert(isa<SExtInst>(I) && "Unexpected ext type!");
    LType = ISD::SEXTLOAD;
  }
  if (TLI && !TLI->isLoadExtLegal(LType, TLI->getValueType(LI->getType())))
    return false;

  // Move the extend into the same block as the load, so that SelectionDAG
  // can fold it.
  I->removeFromParent();
  I->insertAfter(LI);
  return true;
}
Beispiel #30
-1
        virtual bool runOnFunction(Function &F) {
            //F.dump();
            bool changed = false;
            for (inst_iterator inst_it = inst_begin(F), _inst_end = inst_end(F); inst_it != _inst_end; ++inst_it) {
                LoadInst *li = dyn_cast<LoadInst>(&*inst_it);
                if (!li) continue;

                ConstantExpr *ce = dyn_cast<ConstantExpr>(li->getOperand(0));
                // Not 100% sure what the isGEPWithNoNotionalOverIndexing() means, but
                // at least it checks if it's a gep:
                if (ce && ce->isGEPWithNoNotionalOverIndexing() && ce->getOperand(0)->getType() == g.llvm_flavor_type_ptr) {
                    changed = handleFlavor(li, ce);
                }

                GlobalVariable *gv = dyn_cast<GlobalVariable>(li->getOperand(0));
                if (!gv) continue;

                llvm::Type* gv_t = gv->getType();

                if (gv_t == g.llvm_bool_type_ptr->getPointerTo()) {
                    changed = handleBool(li, gv) || changed;
                    continue;
                }

                if (gv_t == g.llvm_class_type_ptr->getPointerTo()) {
                    changed = handleCls(li, gv) || changed;
                    continue;
                }
            }

            return changed;
        }