Example #1
0
IRAccess
TempScopInfo::buildIRAccess(Instruction *Inst, Loop *L, Region *R,
                            const ScopDetection::BoxedLoopsSetTy *BoxedLoops) {
  unsigned Size;
  Type *SizeType;
  enum IRAccess::TypeKind Type;

  if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) {
    SizeType = Load->getType();
    Size = TD->getTypeStoreSize(SizeType);
    Type = IRAccess::READ;
  } else {
    StoreInst *Store = cast<StoreInst>(Inst);
    SizeType = Store->getValueOperand()->getType();
    Size = TD->getTypeStoreSize(SizeType);
    Type = IRAccess::MUST_WRITE;
  }

  const SCEV *AccessFunction = SE->getSCEVAtScope(getPointerOperand(*Inst), L);
  const SCEVUnknown *BasePointer =
      dyn_cast<SCEVUnknown>(SE->getPointerBase(AccessFunction));

  assert(BasePointer && "Could not find base pointer");
  AccessFunction = SE->getMinusSCEV(AccessFunction, BasePointer);

  auto AccItr = InsnToMemAcc.find(Inst);
  if (PollyDelinearize && AccItr != InsnToMemAcc.end())
    return IRAccess(Type, BasePointer->getValue(), AccessFunction, Size, true,
                    AccItr->second.DelinearizedSubscripts,
                    AccItr->second.Shape->DelinearizedSizes);

  // Check if the access depends on a loop contained in a non-affine subregion.
  bool isVariantInNonAffineLoop = false;
  if (BoxedLoops) {
    SetVector<const Loop *> Loops;
    findLoops(AccessFunction, Loops);
    for (const Loop *L : Loops)
      if (BoxedLoops->count(L))
        isVariantInNonAffineLoop = true;
  }

  bool IsAffine = !isVariantInNonAffineLoop &&
                  isAffineExpr(R, AccessFunction, *SE, BasePointer->getValue());

  SmallVector<const SCEV *, 4> Subscripts, Sizes;
  Subscripts.push_back(AccessFunction);
  Sizes.push_back(SE->getConstant(ZeroOffset->getType(), Size));

  if (!IsAffine && Type == IRAccess::MUST_WRITE)
    Type = IRAccess::MAY_WRITE;

  return IRAccess(Type, BasePointer->getValue(), AccessFunction, Size, IsAffine,
                  Subscripts, Sizes);
}
Example #2
0
extern "C" LLVMValueRef LLVMBuildAtomicStore(LLVMBuilderRef B,
                                             LLVMValueRef val,
                                             LLVMValueRef target,
                                             AtomicOrdering order,
                                             unsigned alignment) {
    StoreInst* si = new StoreInst(unwrap(val),unwrap(target));
    si->setVolatile(true);
    si->setAtomic(order);
    si->setAlignment(alignment);
    return wrap(unwrap(B)->Insert(si));
}
Example #3
0
LLVMValueRef 
mono_llvm_build_aligned_store (LLVMBuilderRef builder, LLVMValueRef Val, LLVMValueRef PointerVal,
							   gboolean is_volatile, int alignment)
{
	StoreInst *ins;

	ins = unwrap(builder)->CreateStore(unwrap(Val), unwrap(PointerVal), is_volatile);
	ins->setAlignment (alignment);

	return wrap (ins);
}
Example #4
0
void MutationGen::genSTDStore(Instruction * inst, StringRef fname, int index){
	StoreInst *st = cast<StoreInst>(inst);
	Type* t = st->getValueOperand()->getType();
	if(isSupportedType(t)){
		std::stringstream ss;
		ss<<"STD:"<<std::string(fname)<<":"<<index<< ":"<<inst->getOpcode()
			<< ":"<<0<<'\n';
		ofresult<<ss.str();
		ofresult.flush();
		muts_num++;
	}
}
bool AMDGPURewriteOutArguments::checkArgumentUses(Value &Arg) const {
  const int MaxUses = 10;
  int UseCount = 0;

  for (Use &U : Arg.uses()) {
    StoreInst *SI = dyn_cast<StoreInst>(U.getUser());
    if (UseCount > MaxUses)
      return false;

    if (!SI) {
      auto *BCI = dyn_cast<BitCastInst>(U.getUser());
      if (!BCI || !BCI->hasOneUse())
        return false;

      // We don't handle multiple stores currently, so stores to aggregate
      // pointers aren't worth the trouble since they are canonically split up.
      Type *DestEltTy = BCI->getType()->getPointerElementType();
      if (DestEltTy->isAggregateType())
        return false;

      // We could handle these if we had a convenient way to bitcast between
      // them.
      Type *SrcEltTy = Arg.getType()->getPointerElementType();
      if (SrcEltTy->isArrayTy())
        return false;

      // Special case handle structs with single members. It is useful to handle
      // some casts between structs and non-structs, but we can't bitcast
      // directly between them.  directly bitcast between them.  Blender uses
      // some casts that look like { <3 x float> }* to <4 x float>*
      if ((SrcEltTy->isStructTy() && (SrcEltTy->getStructNumElements() != 1)))
        return false;

      // Clang emits OpenCL 3-vector type accesses with a bitcast to the
      // equivalent 4-element vector and accesses that, and we're looking for
      // this pointer cast.
      if (DL->getTypeAllocSize(SrcEltTy) != DL->getTypeAllocSize(DestEltTy))
        return false;

      return checkArgumentUses(*BCI);
    }

    if (!SI->isSimple() ||
        U.getOperandNo() != StoreInst::getPointerOperandIndex())
      return false;

    ++UseCount;
  }

  // Skip unused arguments.
  return UseCount > 0;
}
SILInstruction*
SILCombiner::visitAllocExistentialBoxInst(AllocExistentialBoxInst *AEBI) {

    // Optimize away the pattern below that happens when exceptions are created
    // and in some cases, due to inlining, are not needed.
    //
    //   %6 = alloc_existential_box $ErrorType, $ColorError
    //   %7 = enum $VendingMachineError, #ColorError.Red
    //   store %7 to %6#1 : $*ColorError
    //   debug_value %6#0 : $ErrorType
    //   strong_release %6#0 : $ErrorType

    StoreInst *SingleStore = nullptr;
    StrongReleaseInst *SingleRelease = nullptr;

    // For each user U of the alloc_existential_box...
    for (auto U : getNonDebugUses(*AEBI)) {
        // Record stores into the box.
        if (auto *SI = dyn_cast<StoreInst>(U->getUser())) {
            // If this is not the only store into the box then bail out.
            if (SingleStore) return nullptr;
            SingleStore = SI;
            continue;
        }

        // Record releases of the box.
        if (auto *RI = dyn_cast<StrongReleaseInst>(U->getUser())) {
            // If this is not the only release of the box then bail out.
            if (SingleRelease) return nullptr;
            SingleRelease = RI;
            continue;
        }

        // If there are other users to the box then bail out.
        return nullptr;
    }

    if (SingleStore && SingleRelease) {
        // Release the value that was stored into the existential box. The box
        // is going away so we need to release the stored value now.
        Builder.setInsertionPoint(SingleStore);
        Builder.createReleaseValue(AEBI->getLoc(), SingleStore->getSrc());

        // Erase the instruction that stores into the box and the release that
        // releases the box, and finally, release the box.
        eraseInstFromFunction(*SingleRelease);
        eraseInstFromFunction(*SingleStore);
        return eraseInstFromFunction(*AEBI);
    }

    return nullptr;
}
Example #7
0
void PlayerInst::purchase_from_store(GameState* gs, const GameAction& action) {
	StoreInst* store = (StoreInst*)gs->get_instance(action.use_id);
	if (!store) {
		return;
	}
	LANARTS_ASSERT(dynamic_cast<StoreInst*>(gs->get_instance(action.use_id)));
	StoreInventory& inv = store->inventory();
	StoreItemSlot& slot = inv.get(action.use_id2);
	if (gold() >= slot.cost) {
		inventory().add(slot.item);
		gold() -= slot.cost;
		slot.item.clear();
	}
}
Example #8
0
bool CodeExtractor::isLegalToShrinkwrapLifetimeMarkers(
    Instruction *Addr) const {
  AllocaInst *AI = cast<AllocaInst>(Addr->stripInBoundsConstantOffsets());
  Function *Func = (*Blocks.begin())->getParent();
  for (BasicBlock &BB : *Func) {
    if (Blocks.count(&BB))
      continue;
    for (Instruction &II : BB) {

      if (isa<DbgInfoIntrinsic>(II))
        continue;

      unsigned Opcode = II.getOpcode();
      Value *MemAddr = nullptr;
      switch (Opcode) {
      case Instruction::Store:
      case Instruction::Load: {
        if (Opcode == Instruction::Store) {
          StoreInst *SI = cast<StoreInst>(&II);
          MemAddr = SI->getPointerOperand();
        } else {
          LoadInst *LI = cast<LoadInst>(&II);
          MemAddr = LI->getPointerOperand();
        }
        // Global variable can not be aliased with locals.
        if (dyn_cast<Constant>(MemAddr))
          break;
        Value *Base = MemAddr->stripInBoundsConstantOffsets();
        if (!dyn_cast<AllocaInst>(Base) || Base == AI)
          return false;
        break;
      }
      default: {
        IntrinsicInst *IntrInst = dyn_cast<IntrinsicInst>(&II);
        if (IntrInst) {
          if (IntrInst->getIntrinsicID() == Intrinsic::lifetime_start ||
              IntrInst->getIntrinsicID() == Intrinsic::lifetime_end)
            break;
          return false;
        }
        // Treat all the other cases conservatively if it has side effects.
        if (II.mayHaveSideEffects())
          return false;
      }
      }
    }
  }

  return true;
}
Example #9
0
bool CallAnalyzer::visitStore(StoreInst &I) {
  Value *SROAArg;
  DenseMap<Value *, int>::iterator CostIt;
  if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
    if (I.isSimple()) {
      accumulateSROACost(CostIt, InlineConstants::InstrCost);
      return true;
    }

    disableSROA(CostIt);
  }

  return false;
}
Example #10
0
// Not an instruction handled below to turn into a vector.
//
// TODO: Check isTriviallyVectorizable for calls and handle other
// instructions.
static bool canVectorizeInst(Instruction *Inst, User *User) {
  switch (Inst->getOpcode()) {
  case Instruction::Load:
  case Instruction::BitCast:
  case Instruction::AddrSpaceCast:
    return true;
  case Instruction::Store: {
    // Must be the stored pointer operand, not a stored value.
    StoreInst *SI = cast<StoreInst>(Inst);
    return SI->getPointerOperand() == User;
  }
  default:
    return false;
  }
}
Example #11
0
LLVMValueRef LLVM_General_BuildStore(
	LLVMBuilderRef b,
	LLVMValueRef v,
	LLVMValueRef p,
	unsigned align,
	LLVMBool isVolatile,
	LLVMAtomicOrdering atomicOrdering,
	LLVMSynchronizationScope synchScope,
	const char *name
) {
	StoreInst *i = unwrap(b)->CreateAlignedStore(unwrap(v), unwrap(p), align, isVolatile);
	i->setName(name);
	i->setOrdering(unwrap(atomicOrdering));
	if (atomicOrdering != LLVMAtomicOrderingNotAtomic) i->setSynchScope(unwrap(synchScope));
	return wrap(i);
}
Example #12
0
/// \brief Combine a store to a new type.
///
/// Returns the newly created store instruction.
static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) {
  Value *Ptr = SI.getPointerOperand();
  unsigned AS = SI.getPointerAddressSpace();
  SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
  SI.getAllMetadata(MD);

  StoreInst *NewStore = IC.Builder->CreateAlignedStore(
      V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
      SI.getAlignment());
  for (const auto &MDPair : MD) {
    unsigned ID = MDPair.first;
    MDNode *N = MDPair.second;
    // Note, essentially every kind of metadata should be preserved here! This
    // routine is supposed to clone a store instruction changing *only its
    // type*. The only metadata it makes sense to drop is metadata which is
    // invalidated when the pointer type changes. This should essentially
    // never be the case in LLVM, but we explicitly switch over only known
    // metadata to be conservatively correct. If you are adding metadata to
    // LLVM which pertains to stores, you almost certainly want to add it
    // here.
    switch (ID) {
    case LLVMContext::MD_dbg:
    case LLVMContext::MD_tbaa:
    case LLVMContext::MD_prof:
    case LLVMContext::MD_fpmath:
    case LLVMContext::MD_tbaa_struct:
    case LLVMContext::MD_alias_scope:
    case LLVMContext::MD_noalias:
    case LLVMContext::MD_nontemporal:
    case LLVMContext::MD_mem_parallel_loop_access:
      // All of these directly apply.
      NewStore->setMetadata(ID, N);
      break;

    case LLVMContext::MD_invariant_load:
    case LLVMContext::MD_nonnull:
    case LLVMContext::MD_range:
    case LLVMContext::MD_align:
    case LLVMContext::MD_dereferenceable:
    case LLVMContext::MD_dereferenceable_or_null:
      // These don't apply for stores.
      break;
    }
  }

  return NewStore;
}
Example #13
0
/// \brief Check loop instructions safe for Loop versioning.
/// It returns true if it's safe else returns false.
/// Consider following:
/// 1) Check all load store in loop body are non atomic & non volatile.
/// 2) Check function call safety, by ensuring its not accessing memory.
/// 3) Loop body shouldn't have any may throw instruction.
bool LoopVersioningLICM::instructionSafeForVersioning(Instruction *I) {
  assert(I != nullptr && "Null instruction found!");
  // Check function call safety
  if (isa<CallInst>(I) && !AA->doesNotAccessMemory(CallSite(I))) {
    DEBUG(dbgs() << "    Unsafe call site found.\n");
    return false;
  }
  // Avoid loops with possiblity of throw
  if (I->mayThrow()) {
    DEBUG(dbgs() << "    May throw instruction found in loop body\n");
    return false;
  }
  // If current instruction is load instructions
  // make sure it's a simple load (non atomic & non volatile)
  if (I->mayReadFromMemory()) {
    LoadInst *Ld = dyn_cast<LoadInst>(I);
    if (!Ld || !Ld->isSimple()) {
      DEBUG(dbgs() << "    Found a non-simple load.\n");
      return false;
    }
    LoadAndStoreCounter++;
    collectStridedAccess(Ld);
    Value *Ptr = Ld->getPointerOperand();
    // Check loop invariant.
    if (SE->isLoopInvariant(SE->getSCEV(Ptr), CurLoop))
      InvariantCounter++;
  }
  // If current instruction is store instruction
  // make sure it's a simple store (non atomic & non volatile)
  else if (I->mayWriteToMemory()) {
    StoreInst *St = dyn_cast<StoreInst>(I);
    if (!St || !St->isSimple()) {
      DEBUG(dbgs() << "    Found a non-simple store.\n");
      return false;
    }
    LoadAndStoreCounter++;
    collectStridedAccess(St);
    Value *Ptr = St->getPointerOperand();
    // Check loop invariant.
    if (SE->isLoopInvariant(SE->getSCEV(Ptr), CurLoop))
      InvariantCounter++;

    IsReadOnlyLoop = false;
  }
  return true;
}
/// \brief Combine stores to match the type of value being stored.
///
/// The core idea here is that the memory does not have any intrinsic type and
/// where we can we should match the type of a store to the type of value being
/// stored.
///
/// However, this routine must never change the width of a store or the number of
/// stores as that would introduce a semantic change. This combine is expected to
/// be a semantic no-op which just allows stores to more closely model the types
/// of their incoming values.
///
/// Currently, we also refuse to change the precise type used for an atomic or
/// volatile store. This is debatable, and might be reasonable to change later.
/// However, it is risky in case some backend or other part of LLVM is relying
/// on the exact type stored to select appropriate atomic operations.
///
/// \returns true if the store was successfully combined away. This indicates
/// the caller must erase the store instruction. We have to let the caller erase
/// the store instruction sas otherwise there is no way to signal whether it was
/// combined or not: IC.EraseInstFromFunction returns a null pointer.
static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
  // FIXME: We could probably with some care handle both volatile and atomic
  // stores here but it isn't clear that this is important.
  if (!SI.isSimple())
    return false;

  Value *Ptr = SI.getPointerOperand();
  Value *V = SI.getValueOperand();
  unsigned AS = SI.getPointerAddressSpace();
  SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
  SI.getAllMetadata(MD);

  // Fold away bit casts of the stored value by storing the original type.
  if (auto *BC = dyn_cast<BitCastInst>(V)) {
    V = BC->getOperand(0);
    StoreInst *NewStore = IC.Builder->CreateAlignedStore(
        V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
        SI.getAlignment());
    for (const auto &MDPair : MD) {
      unsigned ID = MDPair.first;
      MDNode *N = MDPair.second;
      // Note, essentially every kind of metadata should be preserved here! This
      // routine is supposed to clone a store instruction changing *only its
      // type*. The only metadata it makes sense to drop is metadata which is
      // invalidated when the pointer type changes. This should essentially
      // never be the case in LLVM, but we explicitly switch over only known
      // metadata to be conservatively correct. If you are adding metadata to
      // LLVM which pertains to stores, you almost certainly want to add it
      // here.
      switch (ID) {
      case LLVMContext::MD_dbg:
      case LLVMContext::MD_tbaa:
      case LLVMContext::MD_prof:
      case LLVMContext::MD_fpmath:
      case LLVMContext::MD_tbaa_struct:
      case LLVMContext::MD_alias_scope:
      case LLVMContext::MD_noalias:
      case LLVMContext::MD_nontemporal:
      case LLVMContext::MD_mem_parallel_loop_access:
      case LLVMContext::MD_nonnull:
        // All of these directly apply.
        NewStore->setMetadata(ID, N);
        break;

      case LLVMContext::MD_invariant_load:
      case LLVMContext::MD_range:
        break;
      }
    }
    return true;
  }

  // FIXME: We should also canonicalize loads of vectors when their elements are
  // cast to other types.
  return false;
}
Example #15
0
bool SFVInstVisitor::visitStoreInst(StoreInst &SI) {
  if ((Callbacks & Visit::Stores) == 0) return false;

  if (LatticeValue *LV = getLatticeValueForField(SI.getOperand(1)))
    if (LV->visitStore(SI))
      return RemoveLatticeValueAtBottom(LV);

  return false;
}
Example #16
0
void TracingNoGiri::visitStoreInst(StoreInst &SI) {
  instrumentLock(&SI);

  // Cast the pointer into a void pointer type.
  Value * Pointer = SI.getPointerOperand();
  Pointer = castTo(Pointer, VoidPtrType, Pointer->getName(), &SI);
  // Get the size of the stored data.
  uint64_t size = TD->getTypeStoreSize(SI.getOperand(0)->getType());
  Value *StoreSize = ConstantInt::get(Int64Type, size);
  // Get the ID of the store instruction.
  Value *StoreID = ConstantInt::get(Int32Type, lsNumPass->getID(&SI));
  // Create the call to the run-time to record the store instruction.
  std::vector<Value *> args=make_vector<Value *>(StoreID, Pointer, StoreSize, 0);
  CallInst::Create(RecordStore, args, "", &SI);

  instrumentUnlock(&SI);
  ++NumStores; // Update statistics
}
void GCInvariantVerifier::visitStoreInst(StoreInst &SI) {
    Type *VTy = SI.getValueOperand()->getType();
    if (VTy->isPointerTy()) {
        /* We currently don't obey this for arguments. That's ok - they're
           externally rooted. */
        if (!isa<Argument>(SI.getValueOperand())) {
            unsigned AS = cast<PointerType>(VTy)->getAddressSpace();
            Check(AS != AddressSpace::CalleeRooted &&
                  AS != AddressSpace::Derived,
                  "Illegal store of decayed value", &SI);
        }
    }
    VTy = SI.getPointerOperand()->getType();
    if (VTy->isPointerTy()) {
        unsigned AS = cast<PointerType>(VTy)->getAddressSpace();
        Check(AS != AddressSpace::CalleeRooted,
              "Illegal store to callee rooted value", &SI);
    }
}
Example #18
0
/// \brief Combine stores to match the type of value being stored.
///
/// The core idea here is that the memory does not have any intrinsic type and
/// where we can we should match the type of a store to the type of value being
/// stored.
///
/// However, this routine must never change the width of a store or the number of
/// stores as that would introduce a semantic change. This combine is expected to
/// be a semantic no-op which just allows stores to more closely model the types
/// of their incoming values.
///
/// Currently, we also refuse to change the precise type used for an atomic or
/// volatile store. This is debatable, and might be reasonable to change later.
/// However, it is risky in case some backend or other part of LLVM is relying
/// on the exact type stored to select appropriate atomic operations.
///
/// \returns true if the store was successfully combined away. This indicates
/// the caller must erase the store instruction. We have to let the caller erase
/// the store instruction as otherwise there is no way to signal whether it was
/// combined or not: IC.EraseInstFromFunction returns a null pointer.
static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
  // FIXME: We could probably with some care handle both volatile and atomic
  // stores here but it isn't clear that this is important.
  if (!SI.isSimple())
    return false;

  Value *V = SI.getValueOperand();

  // Fold away bit casts of the stored value by storing the original type.
  if (auto *BC = dyn_cast<BitCastInst>(V)) {
    V = BC->getOperand(0);
    combineStoreToNewValue(IC, SI, V);
    return true;
  }

  // FIXME: We should also canonicalize loads of vectors when their elements are
  // cast to other types.
  return false;
}
Example #19
0
/*
 * Very sloppy implementation for quick prototyping
 * // TODO Assumption is that the first field contains the number of iterations -- if not, then modify source for now
 */
Value *HeteroOMPTransform::find_loop_upper_bound(Value *context) {

	// TODO Assumption is that the first field contains the number of iterations -- if not, then modify source for now
	for (Value::use_iterator i = context->use_begin(), e = context->use_end(); i != e; ++i) {
		Instruction *insn = dyn_cast<Instruction>(*i);
		GetElementPtrInst *GEP; StoreInst *SI;
		if ((GEP = dyn_cast<GetElementPtrInst>(insn)) && 
			isa<ConstantInt>(GEP->getOperand(2)) && 
			((cast<ConstantInt>(GEP->getOperand(2)))->equalsInt(0))) { /// README:NOTE THE ASSUMPTION THAT THE FIRST ELEMENT IN THE CONTEXT IS MAX ITERATION OF PARALLEL LOOP
				for (Value::use_iterator I = insn->use_begin(), E = insn->use_end(); I != E; ++I) {
					if ((SI = dyn_cast<StoreInst>(*I))) { 
						Value *op_0 = SI->getOperand(0);
						return op_0;
					}
				}
		}
	}
	return NULL;
}
Example #20
0
// Not an instruction handled below to turn into a vector.
//
// TODO: Check isTriviallyVectorizable for calls and handle other
// instructions.
static bool canVectorizeInst(Instruction *Inst, User *User) {
  switch (Inst->getOpcode()) {
  case Instruction::Load: {
    LoadInst *LI = cast<LoadInst>(Inst);
    // Currently only handle the case where the Pointer Operand is a GEP so check for that case.
    return isa<GetElementPtrInst>(LI->getPointerOperand()) && !LI->isVolatile();
  }
  case Instruction::BitCast:
  case Instruction::AddrSpaceCast:
    return true;
  case Instruction::Store: {
    // Must be the stored pointer operand, not a stored value, plus
    // since it should be canonical form, the User should be a GEP.
    StoreInst *SI = cast<StoreInst>(Inst);
    return (SI->getPointerOperand() == User) && isa<GetElementPtrInst>(User) && !SI->isVolatile();
  }
  default:
    return false;
  }
}
Example #21
0
/// Convert an atomic store of a non-integral type to an integer store of the
/// equivelent bitwidth.  We used to not support floating point or vector
/// atomics in the IR at all.  The backends learned to deal with the bitcast
/// idiom because that was the only way of expressing the notion of a atomic
/// float or vector store.  The long term plan is to teach each backend to
/// instruction select from the original atomic store, but as a migration
/// mechanism, we convert back to the old format which the backends understand.
/// Each backend will need individual work to recognize the new format.
StoreInst *AtomicExpand::convertAtomicStoreToIntegerType(StoreInst *SI) {
  IRBuilder<> Builder(SI);
  auto *M = SI->getModule();
  Type *NewTy = getCorrespondingIntegerType(SI->getValueOperand()->getType(),
                                            M->getDataLayout());
  Value *NewVal = Builder.CreateBitCast(SI->getValueOperand(), NewTy);

  Value *Addr = SI->getPointerOperand();
  Type *PT = PointerType::get(NewTy,
                              Addr->getType()->getPointerAddressSpace());
  Value *NewAddr = Builder.CreateBitCast(Addr, PT);

  StoreInst *NewSI = Builder.CreateStore(NewVal, NewAddr);
  NewSI->setAlignment(SI->getAlignment());
  NewSI->setVolatile(SI->isVolatile());
  NewSI->setAtomic(SI->getOrdering(), SI->getSynchScope());
  DEBUG(dbgs() << "Replaced " << *SI << " with " << *NewSI << "\n");
  SI->eraseFromParent();
  return NewSI;
}
StructuredModuleEditor::ValueList StructuredModuleEditor::getUseChain(
		Value *V) {
	ValueList Vals;

	for (Value::use_iterator UI = V->use_begin(), UE = V->use_end(); UI != UE;
			++UI) {
		Value *ValueToPush;

		Instruction *Inst = dyn_cast<Instruction>(*UI);
		if (Inst && Inst->getOpcode() == Instruction::Store) {
			StoreInst *StInst = dyn_cast<StoreInst>(Inst);
			Value *Storee = StInst->getPointerOperand();
			ValueToPush = Storee;
		} else
			ValueToPush = *UI;

		Vals.push_back(ValueToPush);
	}

	return Vals;
}
Example #23
0
void TempScopInfo::buildAccessFunctions(Region &R, BasicBlock &BB) {
  AccFuncSetType Functions;

  for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I) {
    Instruction &Inst = *I;
    if (isa<LoadInst>(&Inst) || isa<StoreInst>(&Inst)) {
      unsigned Size;
      enum IRAccess::TypeKind Type;

      if (LoadInst *Load = dyn_cast<LoadInst>(&Inst)) {
        Size = TD->getTypeStoreSize(Load->getType());
        Type = IRAccess::READ;
      } else {
        StoreInst *Store = cast<StoreInst>(&Inst);
        Size = TD->getTypeStoreSize(Store->getValueOperand()->getType());
        Type = IRAccess::WRITE;
      }

      const SCEV *AccessFunction = SE->getSCEV(getPointerOperand(Inst));
      const SCEVUnknown *BasePointer =
          dyn_cast<SCEVUnknown>(SE->getPointerBase(AccessFunction));

      assert(BasePointer && "Could not find base pointer");
      AccessFunction = SE->getMinusSCEV(AccessFunction, BasePointer);

      bool IsAffine =
          isAffineExpr(&R, AccessFunction, *SE, BasePointer->getValue());

      Functions.push_back(
          std::make_pair(IRAccess(Type, BasePointer->getValue(), AccessFunction,
                                  Size, IsAffine), &Inst));
    }
  }

  if (Functions.empty())
    return;

  AccFuncSetType &Accs = AccFuncMap[&BB];
  Accs.insert(Accs.end(), Functions.begin(), Functions.end());
}
Example #24
0
void ExecutorUtil::checkStoreInst(Instruction *inst, 
                                  std::vector<GlobalSharedTaint> &glSet, 
                                  std::vector<GlobalSharedTaint> &sharedSet, 
                                  AliasAnalysis &AA, 
                                  RelFlowSet &flowSet) {
  bool relToShared = false;
  StoreInst *store = dyn_cast<StoreInst>(inst); 
  Value *pointer = store->getPointerOperand(); 
 
  for (unsigned i = 0; i < sharedSet.size(); i++) {
    if (ExecutorUtil::findValueFromTaintSet(pointer, 
                                            sharedSet[i].instSet, 
                                            sharedSet[i].valueSet)) {
      // Related to shared
      if (Verbose > 0) {
        std::cout << "shared store inst: " << std::endl;
        inst->dump();
      }
      flowSet.sharedWriteVec.insert(sharedSet[i].gv);
      relToShared = true;
      break;
    }
  }

  if (!relToShared) {
    for (unsigned i = 0; i < glSet.size(); i++) {
      if (ExecutorUtil::findValueFromTaintSet(pointer,
                                              glSet[i].instSet, 
                                              glSet[i].valueSet)) {
        // Related to global 
        if (Verbose > 0) {
          std::cout << "global store inst: " << std::endl;
          inst->dump();
        }
        flowSet.globalWriteVec.insert(glSet[i].gv);
        break; 
      }
    }
  }
}
Example #25
0
void StoreVisitor::visitStoreInst(StoreInst &I) {
#ifdef MUT_DEBUG
    errs() << "[DEBUG] Found Fence Inst\n";
#endif

    if (onlyAtomic) {
        if (I.isAtomic()) {
            storeInsts.push_back(&I);
        }
    }
    else
        storeInsts.push_back(&I);
}
Example #26
0
void WorklessInstrument::CreateIfElseBlock(Loop * pLoop, vector<BasicBlock *> & vecAdded)
{
	BasicBlock * pPreHeader = pLoop->getLoopPreheader();
	BasicBlock * pHeader = pLoop->getHeader();
	Function * pInnerFunction = pPreHeader->getParent();
	Module * pModule = pPreHeader->getParent()->getParent();

	BasicBlock * pElseBody = NULL;
	TerminatorInst * pTerminator = NULL;

	BranchInst * pBranch = NULL;
	LoadInst * pLoad1 = NULL;
	LoadInst * pLoad2 = NULL;
	LoadInst * pLoadnumGlobalCounter = NULL;
	BinaryOperator * pAddOne = NULL;
	StoreInst * pStoreNew = NULL;
	CmpInst * pCmp = NULL;
	CallInst * pCall = NULL;
	StoreInst * pStore = NULL;
	AttributeSet emptySet;

	pTerminator = pPreHeader->getTerminator();
	pLoadnumGlobalCounter = new LoadInst(this->numGlobalCounter, "", false, pTerminator);
	pLoadnumGlobalCounter->setAlignment(8);
	pAddOne = BinaryOperator::Create(Instruction::Add, pLoadnumGlobalCounter, this->ConstantLong1, "add", pTerminator);
	pStoreNew = new StoreInst(pAddOne, this->numGlobalCounter, false, pTerminator);
	pStoreNew->setAlignment(8);

	pElseBody = BasicBlock::Create(pModule->getContext(), ".else.body.CPI", pInnerFunction, 0);

	pLoad2 = new LoadInst(this->CURRENT_SAMPLE, "", false, pTerminator);
	pLoad2->setAlignment(8);
	pCmp = new ICmpInst(pTerminator, ICmpInst::ICMP_SLT, pAddOne, pLoad2, "");
	pBranch = BranchInst::Create(pHeader, pElseBody, pCmp );
	ReplaceInstWithInst(pTerminator, pBranch);

	pLoad1 = new LoadInst(this->SAMPLE_RATE, "", false, pElseBody);
	pCall = CallInst::Create(this->geo, pLoad1, "", pElseBody);
  	pCall->setCallingConv(CallingConv::C);
  	pCall->setTailCall(false);
  	pCall->setAttributes(emptySet);

  	CastInst * pCast = CastInst::CreateIntegerCast(pCall, this->LongType, true, "", pElseBody);
  	//pBinary = BinaryOperator::Create(Instruction::Add, pLoad2, pCast, "add", pIfBody);
  	pStore = new StoreInst(pCast, this->CURRENT_SAMPLE, false, pElseBody);
  	pStore->setAlignment(8);

  	pStore = new StoreInst(this->ConstantLong0, this->numGlobalCounter, false, pElseBody);
  	pStore->setAlignment(8);

  	pLoad1 = new LoadInst(this->numInstances, "", false, pElseBody);
  	pLoad1->setAlignment(8);
  	pAddOne = BinaryOperator::Create(Instruction::Add, pLoad1, this->ConstantLong1, "add", pElseBody);
	pStore = new StoreInst(pAddOne, this->numInstances, false, pElseBody);
	pStore->setAlignment(8);

	vecAdded.push_back(pPreHeader);
	vecAdded.push_back(pElseBody);
}
Example #27
0
TEST(NousedTest, Linear) {
   const char Assembly[] = R"(
define void @a(){
entry:
   %0 = alloca i32
   store i32 1, i32* %0
   load i32* %0
   ret void
})";
   std::unique_ptr<Module> M = parseAssembly(Assembly);
   Function* F_a = M->getFunction("a");
   BasicBlock::iterator B_a_beg = F_a->begin()->begin();
   StoreInst* SI = cast<StoreInst>(std::next(B_a_beg));

   ResolveEngine RE;
   RE.addRule(RE.ibase_rule);
   RE.addRule(InitRule(RE.iuse_rule));

   Value* V;
   RE.resolve(&SI->getOperandUse(1), RE.findVisit(V));
   EXPECT_EQ(V, std::next(B_a_beg, 2));
}
Example #28
0
LLVMValueRef 
mono_llvm_build_store (LLVMBuilderRef builder, LLVMValueRef Val, LLVMValueRef PointerVal,
					  gboolean is_volatile, BarrierKind barrier)
{
	StoreInst *ins = unwrap(builder)->CreateStore(unwrap(Val), unwrap(PointerVal), is_volatile);

	switch (barrier) {
	case LLVM_BARRIER_NONE:
		break;
	case LLVM_BARRIER_REL:
		ins->setOrdering(Release);
		break;
	case LLVM_BARRIER_SEQ:
		ins->setOrdering(SequentiallyConsistent);
		break;
	default:
		g_assert_not_reached ();
		break;
	}

	return wrap(ins);
}
Example #29
0
void GraphBuilder::visitStoreInst(StoreInst &SI) {
  Type *StoredTy = SI.getOperand(0)->getType();
  DSNodeHandle Dest = getValueDest(SI.getOperand(1));
  if (Dest.isNull()) return;

  // Mark that the node is written to...
  Dest.getNode()->setModifiedMarker();

  // Ensure a type-record exists...
  Dest.getNode()->growSizeForType(StoredTy, Dest.getOffset());

  // Avoid adding edges from null, or processing non-"pointer" stores
  if (isa<PointerType>(StoredTy))
    Dest.addEdgeTo(getValueDest(SI.getOperand(0)));

  if(TypeInferenceOptimize)
    if(SI.getOperand(0)->hasOneUse())
      if(isa<LoadInst>(SI.getOperand(0))){
        ++NumIgnoredInst;
        return;
      }
  Dest.getNode()->mergeTypeInfo(StoredTy, Dest.getOffset());
}
///   store {atomic|volatile} T %val, T* %ptr memory_order, align sizeof(T)
/// becomes:
///   call void @llvm.nacl.atomic.store.i<size>(%val, %ptr, memory_order)
void AtomicVisitor::visitStoreInst(StoreInst &I) {
  return; // XXX EMSCRIPTEN
  if (I.isSimple())
    return;
  PointerHelper<StoreInst> PH(*this, I);
  const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic =
      findAtomicIntrinsic(I, Intrinsic::nacl_atomic_store, PH.PET);
  checkAlignment(I, I.getAlignment(), PH.BitSize / CHAR_BIT);
  Value *V = I.getValueOperand();
  if (!V->getType()->isIntegerTy()) {
    // The store isn't of an integer type. We define atomics in terms of
    // integers, so bitcast the value to store to an integer of the
    // proper width.
    CastInst *Cast = createCast(I, V, Type::getIntNTy(C, PH.BitSize),
                                V->getName() + ".cast");
    Cast->setDebugLoc(I.getDebugLoc());
    V = Cast;
  }
  checkSizeMatchesType(I, PH.BitSize, V->getType());
  Value *Args[] = {V, PH.P, freezeMemoryOrder(I, I.getOrdering())};
  replaceInstructionWithIntrinsicCall(I, Intrinsic, PH.OriginalPET, PH.PET,
                                      Args);
}