Ejemplo n.º 1
0
void SSAUpdater::RewriteUseAfterInsertions(Use &U) {
  Instruction *User = cast<Instruction>(U.getUser());
  
  Value *V;
  if (PHINode *UserPN = dyn_cast<PHINode>(User))
    V = GetValueAtEndOfBlock(UserPN->getIncomingBlock(U));
  else
    V = GetValueAtEndOfBlock(User->getParent());
  
  U.set(V);
}
Ejemplo n.º 2
0
// Replace direct callers of Old with New.
void MergeFunctions::replaceDirectCallers(Function *Old, Function *New) {
  Constant *BitcastNew = ConstantExpr::getBitCast(New, Old->getType());
  for (auto UI = Old->use_begin(), UE = Old->use_end(); UI != UE;) {
    Use *U = &*UI;
    ++UI;
    CallSite CS(U->getUser());
    if (CS && CS.isCallee(U)) {
      remove(CS.getInstruction()->getParent()->getParent());
      U->set(BitcastNew);
    }
  }
}
Ejemplo n.º 3
0
void SSAUpdater::RewriteUse(Use &U) {
  Instruction *User = cast<Instruction>(U.getUser());

  Value *V;
  if (PHINode *UserPN = dyn_cast<PHINode>(User))
    V = GetValueAtEndOfBlock(UserPN->getIncomingBlock(U));
  else
    V = GetValueInMiddleOfBlock(User->getParent());

  // Notify that users of the existing value that it is being replaced.
  Value *OldVal = U.get();
  if (OldVal != V && OldVal->hasValueHandle())
    ValueHandleBase::ValueIsRAUWd(OldVal, V);

  U.set(V);
}
Ejemplo n.º 4
0
void Value::reverseUseList() {
  if (!UseList || !UseList->Next)
    // No need to reverse 0 or 1 uses.
    return;

  Use *Head = UseList;
  Use *Current = UseList->Next;
  Head->Next = nullptr;
  while (Current) {
    Use *Next = Current->Next;
    Current->Next = Head;
    Head->setPrev(&Current->Next);
    Head = Current;
    Current = Next;
  }
  UseList = Head;
  Head->setPrev(&UseList);
}
Ejemplo n.º 5
0
/// \p returns true if \p U is the pointer operand of a memory instruction with
/// a single pointer operand that can have its address space changed by simply
/// mutating the use to a new value.
static bool isSimplePointerUseValidToReplace(Use &U) {
  User *Inst = U.getUser();
  unsigned OpNo = U.getOperandNo();

  if (auto *LI = dyn_cast<LoadInst>(Inst))
    return OpNo == LoadInst::getPointerOperandIndex() && !LI->isVolatile();

  if (auto *SI = dyn_cast<StoreInst>(Inst))
    return OpNo == StoreInst::getPointerOperandIndex() && !SI->isVolatile();

  if (auto *RMW = dyn_cast<AtomicRMWInst>(Inst))
    return OpNo == AtomicRMWInst::getPointerOperandIndex() && !RMW->isVolatile();

  if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst)) {
    return OpNo == AtomicCmpXchgInst::getPointerOperandIndex() &&
           !CmpX->isVolatile();
  }

  return false;
}
Ejemplo n.º 6
0
/// Handle a rare case where the disintegrated nodes instructions
/// no longer dominate all their uses. Not sure if this is really nessasary
void StructurizeCFG::rebuildSSA() {
  SSAUpdater Updater;
  for (Region::block_iterator I = ParentRegion->block_begin(),
                              E = ParentRegion->block_end();
       I != E; ++I) {

    BasicBlock *BB = *I;
    for (BasicBlock::iterator II = BB->begin(), IE = BB->end();
         II != IE; ++II) {

      bool Initialized = false;
      for (Use *I = &II->use_begin().getUse(), *Next; I; I = Next) {

        Next = I->getNext();

        Instruction *User = cast<Instruction>(I->getUser());
        if (User->getParent() == BB) {
          continue;

        } else if (PHINode *UserPN = dyn_cast<PHINode>(User)) {
          if (UserPN->getIncomingBlock(*I) == BB)
            continue;
        }

        if (DT->dominates(II, User))
          continue;

        if (!Initialized) {
          Value *Undef = UndefValue::get(II->getType());
          Updater.Initialize(II->getType(), "");
          Updater.AddAvailableValue(&Func->getEntryBlock(), Undef);
          Updater.AddAvailableValue(BB, II);
          Initialized = true;
        }
        Updater.RewriteUseAfterInsertions(*I);
      }
    }
  }
}
Ejemplo n.º 7
0
bool DominatorTree::dominates(const Instruction *Def,
                              const Use &U) const {
  Instruction *UserInst = cast<Instruction>(U.getUser());
  const BasicBlock *DefBB = Def->getParent();

  // Determine the block in which the use happens. PHI nodes use
  // their operands on edges; simulate this by thinking of the use
  // happening at the end of the predecessor block.
  const BasicBlock *UseBB;
  if (PHINode *PN = dyn_cast<PHINode>(UserInst))
    UseBB = PN->getIncomingBlock(U);
  else
    UseBB = UserInst->getParent();

  // Any unreachable use is dominated, even if Def == User.
  if (!isReachableFromEntry(UseBB))
    return true;

  // Unreachable definitions don't dominate anything.
  if (!isReachableFromEntry(DefBB))
    return false;

  // Invoke instructions define their return values on the edges
  // to their normal successors, so we have to handle them specially.
  // Among other things, this means they don't dominate anything in
  // their own block, except possibly a phi, so we don't need to
  // walk the block in any case.
  if (const InvokeInst *II = dyn_cast<InvokeInst>(Def)) {
    BasicBlock *NormalDest = II->getNormalDest();
    BasicBlockEdge E(DefBB, NormalDest);
    return dominates(E, U);
  }

  // If the def and use are in different blocks, do a simple CFG dominator
  // tree query.
  if (DefBB != UseBB)
    return dominates(DefBB, UseBB);

  // Ok, def and use are in the same block. If the def is an invoke, it
  // doesn't dominate anything in the block. If it's a PHI, it dominates
  // everything in the block.
  if (isa<PHINode>(UserInst))
    return true;

  // Otherwise, just loop through the basic block until we find Def or User.
  BasicBlock::const_iterator I = DefBB->begin();
  for (; &*I != Def && &*I != UserInst; ++I)
    /*empty*/;

  return &*I != UserInst;
}
Ejemplo n.º 8
0
bool DominatorTree::isReachableFromEntry(const Use &U) const {
  Instruction *I = dyn_cast<Instruction>(U.getUser());

  // ConstantExprs aren't really reachable from the entry block, but they
  // don't need to be treated like unreachable code either.
  if (!I) return true;

  // PHI nodes use their operands on their incoming edges.
  if (PHINode *PN = dyn_cast<PHINode>(I))
    return isReachableFromEntry(PN->getIncomingBlock(U));

  // Everything else uses their operands in their own block.
  return isReachableFromEntry(I->getParent());
}
Ejemplo n.º 9
0
bool FixFunctionBitcasts::runOnModule(Module &M) {
  SmallVector<std::pair<Use *, Function *>, 0> Uses;

  // Collect all the places that need wrappers.
  for (Function &F : M)
    FindUses(&F, F, Uses);

  DenseMap<std::pair<Function *, FunctionType *>, Function *> Wrappers;

  for (auto &UseFunc : Uses) {
    Use *U = UseFunc.first;
    Function *F = UseFunc.second;
    PointerType *PTy = cast<PointerType>(U->get()->getType());
    FunctionType *Ty = dyn_cast<FunctionType>(PTy->getElementType());

    // If the function is casted to something like i8* as a "generic pointer"
    // to be later casted to something else, we can't generate a wrapper for it.
    // Just ignore such casts for now.
    if (!Ty)
      continue;

    auto Pair = Wrappers.insert(std::make_pair(std::make_pair(F, Ty), nullptr));
    if (Pair.second)
      Pair.first->second = CreateWrapper(F, Ty);

    Function *Wrapper = Pair.first->second;
    if (!Wrapper)
      continue;

    if (isa<Constant>(U->get()))
      U->get()->replaceAllUsesWith(Wrapper);
    else
      U->set(Wrapper);
  }

  return true;
}
Ejemplo n.º 10
0
bool DominatorTree::dominates(const BasicBlockEdge &BBE, const Use &U) const {
  Instruction *UserInst = cast<Instruction>(U.getUser());
  // A PHI in the end of the edge is dominated by it.
  PHINode *PN = dyn_cast<PHINode>(UserInst);
  if (PN && PN->getParent() == BBE.getEnd() &&
      PN->getIncomingBlock(U) == BBE.getStart())
    return true;

  // Otherwise use the edge-dominates-block query, which
  // handles the crazy critical edge cases properly.
  const BasicBlock *UseBB;
  if (PN)
    UseBB = PN->getIncomingBlock(U);
  else
    UseBB = UserInst->getParent();
  return dominates(BBE, UseBB);
}
Ejemplo n.º 11
0
bool DominatorTree::dominates(const BasicBlockEdge &BBE, const Use &U) const {
  // Assert that we have a single edge. We could handle them by simply
  // returning false, but since isSingleEdge is linear on the number of
  // edges, the callers can normally handle them more efficiently.
  assert(BBE.isSingleEdge());

  Instruction *UserInst = cast<Instruction>(U.getUser());
  // A PHI in the end of the edge is dominated by it.
  PHINode *PN = dyn_cast<PHINode>(UserInst);
  if (PN && PN->getParent() == BBE.getEnd() &&
      PN->getIncomingBlock(U) == BBE.getStart())
    return true;

  // Otherwise use the edge-dominates-block query, which
  // handles the crazy critical edge cases properly.
  const BasicBlock *UseBB;
  if (PN)
    UseBB = PN->getIncomingBlock(U);
  else
    UseBB = UserInst->getParent();
  return dominates(BBE, UseBB);
}
Ejemplo n.º 12
0
void Resolve::lookupReplaceUse(UnresolvedValue *V, Use &U, BasicBlock *Block) {
  auto Name = V->getName();
  auto K = V->getContext();
  if (auto S = K->Map.get(V, Block)) {
    /// %S = 2;
    ///  ^
    /// Came from here (MallocInst, Argument, or Prototype)
    ///
    /// Foo(%S);
    ///      ^
    ///  UnresolvedValue; replace with %Replacement
    if (auto M = dyn_cast<MallocInst>(S)) {
      if (dyn_cast<StoreInst>(U->getUser()))
        U.set(M);
      else {
        auto Replacement = LoadInst::get(M);
        Replacement->setSourceLocation(V->getSourceLocation());
        U.set(Replacement);
      }
    } else if (isa<BindInst>(S) || isa<Argument>(S)) {
      U.set(S);
    } else if (isa<Prototype>(S)) {
      auto Replacement = Pointer::get(S);
      Replacement->setSourceLocation(S->getSourceLocation());
      U.set(Replacement);
    }
  } else {
    /// %V was not seen earlier (%S not initialized)
    /// Only one possibility: %V(...)
    ///                        ^
    ///                Callee of CallInst
    auto SourceLoc = U->getSourceLocation();
    if (auto Inst = dyn_cast<CallInst>(U->getUser()))
      if (Inst->getCallee() == V) {
        DiagnosticPrinter(SourceLoc) << "unbound function " + Name;
        exit(1);
      }
    DiagnosticPrinter(SourceLoc) << "unbound symbol " + Name;
    exit(1);
  }
}
Ejemplo n.º 13
0
void WinEHPrepare::replaceUseWithLoad(Value *V, Use &U, AllocaInst *&SpillSlot,
                                      DenseMap<BasicBlock *, Value *> &Loads,
                                      Function &F) {
  // Lazilly create the spill slot.
  if (!SpillSlot)
    SpillSlot = new AllocaInst(V->getType(), DL->getAllocaAddrSpace(), nullptr,
                               Twine(V->getName(), ".wineh.spillslot"),
                               &F.getEntryBlock().front());

  auto *UsingInst = cast<Instruction>(U.getUser());
  if (auto *UsingPHI = dyn_cast<PHINode>(UsingInst)) {
    // If this is a PHI node, we can't insert a load of the value before
    // the use.  Instead insert the load in the predecessor block
    // corresponding to the incoming value.
    //
    // Note that if there are multiple edges from a basic block to this
    // PHI node that we cannot have multiple loads.  The problem is that
    // the resulting PHI node will have multiple values (from each load)
    // coming in from the same block, which is illegal SSA form.
    // For this reason, we keep track of and reuse loads we insert.
    BasicBlock *IncomingBlock = UsingPHI->getIncomingBlock(U);
    if (auto *CatchRet =
            dyn_cast<CatchReturnInst>(IncomingBlock->getTerminator())) {
      // Putting a load above a catchret and use on the phi would still leave
      // a cross-funclet def/use.  We need to split the edge, change the
      // catchret to target the new block, and put the load there.
      BasicBlock *PHIBlock = UsingInst->getParent();
      BasicBlock *NewBlock = SplitEdge(IncomingBlock, PHIBlock);
      // SplitEdge gives us:
      //   IncomingBlock:
      //     ...
      //     br label %NewBlock
      //   NewBlock:
      //     catchret label %PHIBlock
      // But we need:
      //   IncomingBlock:
      //     ...
      //     catchret label %NewBlock
      //   NewBlock:
      //     br label %PHIBlock
      // So move the terminators to each others' blocks and swap their
      // successors.
      BranchInst *Goto = cast<BranchInst>(IncomingBlock->getTerminator());
      Goto->removeFromParent();
      CatchRet->removeFromParent();
      IncomingBlock->getInstList().push_back(CatchRet);
      NewBlock->getInstList().push_back(Goto);
      Goto->setSuccessor(0, PHIBlock);
      CatchRet->setSuccessor(NewBlock);
      // Update the color mapping for the newly split edge.
      // Grab a reference to the ColorVector to be inserted before getting the
      // reference to the vector we are copying because inserting the new
      // element in BlockColors might cause the map to be reallocated.
      ColorVector &ColorsForNewBlock = BlockColors[NewBlock];
      ColorVector &ColorsForPHIBlock = BlockColors[PHIBlock];
      ColorsForNewBlock = ColorsForPHIBlock;
      for (BasicBlock *FuncletPad : ColorsForPHIBlock)
        FuncletBlocks[FuncletPad].push_back(NewBlock);
      // Treat the new block as incoming for load insertion.
      IncomingBlock = NewBlock;
    }
    Value *&Load = Loads[IncomingBlock];
    // Insert the load into the predecessor block
    if (!Load)
      Load = new LoadInst(SpillSlot, Twine(V->getName(), ".wineh.reload"),
                          /*Volatile=*/false, IncomingBlock->getTerminator());

    U.set(Load);
  } else {
    // Reload right before the old use.
    auto *Load = new LoadInst(SpillSlot, Twine(V->getName(), ".wineh.reload"),
                              /*Volatile=*/false, UsingInst);
    U.set(Load);
  }
}
Ejemplo n.º 14
0
/// Returns Attribute::None, Attribute::ReadOnly or Attribute::ReadNone.
static Attribute::AttrKind
determinePointerReadAttrs(Argument *A,
                          const SmallPtrSet<Argument *, 8> &SCCNodes) {

  SmallVector<Use *, 32> Worklist;
  SmallSet<Use *, 32> Visited;

  // inalloca arguments are always clobbered by the call.
  if (A->hasInAllocaAttr())
    return Attribute::None;

  bool IsRead = false;
  // We don't need to track IsWritten. If A is written to, return immediately.

  for (Use &U : A->uses()) {
    Visited.insert(&U);
    Worklist.push_back(&U);
  }

  while (!Worklist.empty()) {
    Use *U = Worklist.pop_back_val();
    Instruction *I = cast<Instruction>(U->getUser());

    switch (I->getOpcode()) {
    case Instruction::BitCast:
    case Instruction::GetElementPtr:
    case Instruction::PHI:
    case Instruction::Select:
    case Instruction::AddrSpaceCast:
      // The original value is not read/written via this if the new value isn't.
      for (Use &UU : I->uses())
        if (Visited.insert(&UU).second)
          Worklist.push_back(&UU);
      break;

    case Instruction::Call:
    case Instruction::Invoke: {
      bool Captures = true;

      if (I->getType()->isVoidTy())
        Captures = false;

      auto AddUsersToWorklistIfCapturing = [&] {
        if (Captures)
          for (Use &UU : I->uses())
            if (Visited.insert(&UU).second)
              Worklist.push_back(&UU);
      };

      CallSite CS(I);
      if (CS.doesNotAccessMemory()) {
        AddUsersToWorklistIfCapturing();
        continue;
      }

      Function *F = CS.getCalledFunction();
      if (!F) {
        if (CS.onlyReadsMemory()) {
          IsRead = true;
          AddUsersToWorklistIfCapturing();
          continue;
        }
        return Attribute::None;
      }

      // Note: the callee and the two successor blocks *follow* the argument
      // operands.  This means there is no need to adjust UseIndex to account
      // for these.

      unsigned UseIndex = std::distance(CS.arg_begin(), U);

      // U cannot be the callee operand use: since we're exploring the
      // transitive uses of an Argument, having such a use be a callee would
      // imply the CallSite is an indirect call or invoke; and we'd take the
      // early exit above.
      assert(UseIndex < CS.data_operands_size() &&
             "Data operand use expected!");

      bool IsOperandBundleUse = UseIndex >= CS.getNumArgOperands();

      if (UseIndex >= F->arg_size() && !IsOperandBundleUse) {
        assert(F->isVarArg() && "More params than args in non-varargs call");
        return Attribute::None;
      }

      Captures &= !CS.doesNotCapture(UseIndex);

      // Since the optimizer (by design) cannot see the data flow corresponding
      // to a operand bundle use, these cannot participate in the optimistic SCC
      // analysis.  Instead, we model the operand bundle uses as arguments in
      // call to a function external to the SCC.
      if (IsOperandBundleUse ||
          !SCCNodes.count(&*std::next(F->arg_begin(), UseIndex))) {

        // The accessors used on CallSite here do the right thing for calls and
        // invokes with operand bundles.

        if (!CS.onlyReadsMemory() && !CS.onlyReadsMemory(UseIndex))
          return Attribute::None;
        if (!CS.doesNotAccessMemory(UseIndex))
          IsRead = true;
      }

      AddUsersToWorklistIfCapturing();
      break;
    }

    case Instruction::Load:
      // A volatile load has side effects beyond what readonly can be relied
      // upon.
      if (cast<LoadInst>(I)->isVolatile())
        return Attribute::None;

      IsRead = true;
      break;

    case Instruction::ICmp:
    case Instruction::Ret:
      break;

    default:
      return Attribute::None;
    }
  }

  return IsRead ? Attribute::ReadOnly : Attribute::ReadNone;
}
/*
  in addition to the original condition of the loop, insert one cond
  on the virtual iterator, given the linf and lsup bounds for the chunk
  this cond exits and returns to the decision block to start a new chunk
*/
BasicBlock *insertChunkCond(Loop *&L, LoopInfo *LI, Value *vi, Value *lsup,
                            BasicBlock *dcb, Value *vi_dcb_val,
                            PHINode *&phi_vi) {

  BasicBlock *H = L->getHeader();

  BasicBlock *newCond = BasicBlock::Create(
      H->getContext(), Twine("__kernel__" + H->getName().str() + "_viCond"),
      H->getParent(), H);

  std::vector<BasicBlock *> Lblocks = L->getBlocks();

  BasicBlock *exitBlock = BasicBlock::Create(
      H->getContext(), Twine(H->getName().str() + "_exitChunk"), H->getParent(),
      H);
  BranchInst::Create(dcb, exitBlock);

  phi_vi = PHINode::Create(Type::getInt64Ty(H->getContext()), 2, "vi_value",
                           newCond);
  phi_vi->addIncoming(vi_dcb_val, dcb);

  LoadInst *load_lsup = new LoadInst(lsup, "lsup_value", newCond);

  ICmpInst *cmp = new ICmpInst(*newCond, ICmpInst::ICMP_SLT, phi_vi, load_lsup,
                               vi->getName() + "_cmp");

  // Make sure all predecessors now go to our new condition
  std::vector<TerminatorInst *> termInstrs;
  BasicBlock *lp = L->getLoopPredecessor();

  for (auto it = pred_begin(H), end = pred_end(H); it != end; ++it) {
    if ((*it) == lp) {
      // Original entry should be redirected to dcb
      TerminatorInst *tinstr = (*it)->getTerminator();
      for (auto it = tinstr->op_begin(), end = tinstr->op_end(); it != end;
           ++it) {
        Use *use = &*it;
        if (use->get() == H) {
          use->set(dcb);
        }
      }
    } else {
      termInstrs.push_back((*it)->getTerminator());
    }
  }

  for (auto &tinstr : termInstrs) {
    for (auto it = tinstr->op_begin(), end = tinstr->op_end(); it != end;
         ++it) {
      Use *use = &*it;
      if (use->get() == H) {
        use->set(newCond);
      }
    }
  }

  BranchInst::Create(H, exitBlock, cmp, newCond);

  // update loop info
  if (L != LI->getLoopFor(newCond)) {
    L->addBasicBlockToLoop(newCond, *LI);
  }

  L->moveToHeader(newCond);

  Loop *Lp = L->getParentLoop();
  if (Lp)
    Lp->addBasicBlockToLoop(exitBlock, *LI);
  return newCond;
}
Ejemplo n.º 16
0
IOCode Aa::get_io_code(Use &u)
{
	return get_io_code(u.getUser());
}
Ejemplo n.º 17
0
void llvm::PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker) {
  assert(V->getType()->isPointerTy() && "Capture is for pointers only!");
  SmallVector<Use*, Threshold> Worklist;
  SmallSet<Use*, Threshold> Visited;
  int Count = 0;

  for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end();
       UI != UE; ++UI) {
    // If there are lots of uses, conservatively say that the value
    // is captured to avoid taking too much compile time.
    if (Count++ >= Threshold)
      return Tracker->tooManyUses();

    Use *U = &UI.getUse();
    if (!Tracker->shouldExplore(U)) continue;
    Visited.insert(U);
    Worklist.push_back(U);
  }

  while (!Worklist.empty()) {
    Use *U = Worklist.pop_back_val();
    Instruction *I = cast<Instruction>(U->getUser());
    V = U->get();

    switch (I->getOpcode()) {
    case Instruction::Call:
    case Instruction::Invoke: {
      CallSite CS(I);
      // Not captured if the callee is readonly, doesn't return a copy through
      // its return value and doesn't unwind (a readonly function can leak bits
      // by throwing an exception or not depending on the input value).
      if (CS.onlyReadsMemory() && CS.doesNotThrow() && I->getType()->isVoidTy())
        break;

      // Not captured if only passed via 'nocapture' arguments.  Note that
      // calling a function pointer does not in itself cause the pointer to
      // be captured.  This is a subtle point considering that (for example)
      // the callee might return its own address.  It is analogous to saying
      // that loading a value from a pointer does not cause the pointer to be
      // captured, even though the loaded value might be the pointer itself
      // (think of self-referential objects).
      CallSite::arg_iterator B = CS.arg_begin(), E = CS.arg_end();
      for (CallSite::arg_iterator A = B; A != E; ++A)
        if (A->get() == V && !CS.doesNotCapture(A - B))
          // The parameter is not marked 'nocapture' - captured.
          if (Tracker->captured(U))
            return;
      break;
    }
    case Instruction::Load:
      // Loading from a pointer does not cause it to be captured.
      break;
    case Instruction::VAArg:
      // "va-arg" from a pointer does not cause it to be captured.
      break;
    case Instruction::Store:
      if (V == I->getOperand(0))
        // Stored the pointer - conservatively assume it may be captured.
        if (Tracker->captured(U))
          return;
      // Storing to the pointee does not cause the pointer to be captured.
      break;
    case Instruction::BitCast:
    case Instruction::GetElementPtr:
    case Instruction::PHI:
    case Instruction::Select:
      // The original value is not captured via this if the new value isn't.
      for (Instruction::use_iterator UI = I->use_begin(), UE = I->use_end();
           UI != UE; ++UI) {
        Use *U = &UI.getUse();
        if (Visited.insert(U))
          if (Tracker->shouldExplore(U))
            Worklist.push_back(U);
      }
      break;
    case Instruction::ICmp:
      // Don't count comparisons of a no-alias return value against null as
      // captures. This allows us to ignore comparisons of malloc results
      // with null, for example.
      if (ConstantPointerNull *CPN =
          dyn_cast<ConstantPointerNull>(I->getOperand(1)))
        if (CPN->getType()->getAddressSpace() == 0)
          if (isNoAliasCall(V->stripPointerCastsSafe()))
            break;
      // Otherwise, be conservative. There are crazy ways to capture pointers
      // using comparisons.
      if (Tracker->captured(U))
        return;
      break;
    default:
      // Something else - be conservative and say it is captured.
      if (Tracker->captured(U))
        return;
      break;
    }
  }

  // All uses examined.
}
Ejemplo n.º 18
0
void CPFlowFunction::visitBranchInst(BranchInst &BI) {
  CPLatticePoint* result = new CPLatticePoint(*(info_in_casted.back()));
  info_in_casted.pop_back();
  BranchInst* current = &BI;

  if (BI.isConditional()) {
    Value* cond = BI.getCondition();
    if (isa<ICmpInst>(cond)) {
      std::pair<Use*, Use *> branches = helper::getOps(BI);
      Use* true_branch = branches.first;
      Use* false_branch = branches.second;

      ICmpInst* cmp = dyn_cast<ICmpInst>(cond);
      std::pair<Use*, Use *> operands = helper::getOps(*cmp);
      Use* rhs = operands.second;
      Use* lhs = operands.first;

      ConstantInt* rhs_const = NULL;
      ConstantInt* lhs_const = NULL;
      // get the rhs/lhs as a constant int
      if (isa<ConstantInt>(rhs)) {
        rhs_const = dyn_cast<ConstantInt>(rhs);
      } else if (result->representation.count(rhs->get()) > 0) {
        rhs_const = result->representation[rhs->get()];
      } else {
        rhs_const = ConstantInt::get(context, llvm::APInt(32, 0, true));
      } 
      if (isa<ConstantInt>(lhs)) {
        lhs_const = dyn_cast<ConstantInt>(lhs->get());
      } else if (result->representation.count(lhs->get()) > 0) {
        lhs_const = result->representation[lhs->get()];
      } else {
        lhs_const = ConstantInt::get(context, llvm::APInt(32, 0, true));
      }

      // Create successors
      CPLatticePoint* true_branchCLP = new CPLatticePoint(false, false, std::map<Value*,ConstantInt*>(result->representation));
      CPLatticePoint* false_branchCLP = new CPLatticePoint(false, false, std::map<Value*,ConstantInt*>(result->representation));

      // get the predicate
      int predicate = 0;
      predicate = cmp->isSigned() ? cmp->getSignedPredicate() : cmp->getUnsignedPredicate();
      if (predicate == CmpInst::ICMP_EQ) {
        if (isa<ConstantInt>(lhs)) {
           true_branchCLP->representation[rhs->get()] = lhs_const;
        } else if (isa<ConstantInt>(rhs)) {
           true_branchCLP->representation[lhs->get()] = rhs_const;
        }
        out_map[true_branch->get()] = true_branchCLP;
        out_map[false_branch->get()] = false_branchCLP;
      } else if (predicate == CmpInst::ICMP_NE) {
        if (isa<ConstantInt>(lhs)) {
           false_branchCLP->representation[rhs->get()] = lhs_const;
        } else if (isa<ConstantInt>(rhs)) {
           false_branchCLP->representation[lhs->get()] = rhs_const;
        }
        out_map[true_branch->get()] = true_branchCLP;
        out_map[false_branch->get()] = false_branchCLP;
      } else {
        for (std::map<Value *, LatticePoint *>::iterator it=out_map.begin(); it != out_map.end(); ++it){
          Value* elm = it->first;
          out_map[elm] = new CPLatticePoint(*result);
        }
      }
    } else {
      for (std::map<Value *, LatticePoint *>::iterator it=out_map.begin(); it != out_map.end(); ++it){
        Value* elm = it->first;
        out_map[elm] = new CPLatticePoint(*result);
      }
    }
  } else {
    for (std::map<Value *, LatticePoint *>::iterator it=out_map.begin(); it != out_map.end(); ++it){
        Value* elm = it->first;
        out_map[elm] = new CPLatticePoint(*result);
    }
  }
}
Ejemplo n.º 19
0
DDGraph::DDGraph(ResolveResult& RR,Value* root) 
{
   auto& r = get<0>(RR);
   auto& u = get<1>(RR);
   auto& c = get<2>(RR);
   for(auto I : r){
      this->insert(make_value(I,DDGNode::NORMAL));
   }
   for(auto I : u){
      this->insert(make_value(I,DDGNode::UNSOLVED));
   }
   for(auto& N : *this){
      auto found = c.find(N.first);
      DDGNode& node = N.second;
      if(node.flags() & DDGNode::UNSOLVED) continue;
      Use* implicity = (found == c.end())?nullptr:found->second;
      if(implicity){
         DDGValue& v = *this->find(implicity->getUser());
         DDGNode& to = v.second;
         node.impl().push_back(&v);
         ++v.second.ref_count;
         node.flags_ = DDGNode::IMPLICITY;
         if(auto CI = dyn_cast<CallInst>(implicity->getUser())){
            Instruction* NI = dyn_cast<Instruction>(N.first);
            if(NI && CI->getCalledFunction() != NI->getParent()->getParent()){
               Argument* arg = findCallInstArgument(implicity);
               if(!arg) continue;
               auto found = c.find(cast<Value>(arg));
               Use* link = (found==c.end())?nullptr:found->second;
               if(link){
                  node.load_tg_ = &*this->find(link->getUser());
                  //++node.load_tg_->second.ref_count; // shouldn't add ref count for load_tg
                  to.impl().push_back(node.load_inst());
                  to.flags_ = DDGNode::IMPLICITY;
               }
            }else{
               if(isa<AllocaInst>(implicity->get())){
                  auto found = c.find(implicity->get());
                  Use* link = (found==c.end())?nullptr:found->second;
                  if(link) node.load_tg_ = &*this->find(link->getUser());
               }else 
                  node.load_tg_ = &*this->find(implicity->get());
               to.impl().push_back(node.load_inst());
               to.flags_ = DDGNode::IMPLICITY;
            }
         }
      }
   }
   for(auto& N : *this){
      // for stability, make sure all implicity marked over before this.
      auto found = c.find(N.first);
      if(found != c.end()) continue;

      Instruction* Inst = dyn_cast<llvm::Instruction>(N.first);
      DDGNode& node = N.second;
      if(!Inst) continue;
      if(isa<CallInst>(Inst) && (N.second.flags_ & DDGNode::IMPLICITY))
         continue; // implicity callinst never can be direct solve
      for(auto O = Inst->op_begin(),E=Inst->op_end();O!=E;++O){
         auto Target = this->find(*O);
         if(Target != this->end()){
            DDGValue* v = &*Target;
            ++v->second.ref_count;
            node.impl().push_back(v);
         }
      }
   }
   this->root = &*this->find(root);
}