Ejemplo n.º 1
0
/// SplitLandingPadPreds - The landing pad needs to be extracted with the invoke
/// instruction. The critical edge breaker will refuse to break critical edges
/// to a landing pad. So do them here. After this method runs, all landing pads
/// should have only one predecessor.
void BlockExtractorPass::SplitLandingPadPreds(Function *F) {
  for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I) {
    InvokeInst *II = dyn_cast<InvokeInst>(I);
    if (!II) continue;
    BasicBlock *Parent = II->getParent();
    BasicBlock *LPad = II->getUnwindDest();

    // Look through the landing pad's predecessors. If one of them ends in an
    // 'invoke', then we want to split the landing pad.
    bool Split = false;
    for (pred_iterator
           PI = pred_begin(LPad), PE = pred_end(LPad); PI != PE; ++PI) {
      BasicBlock *BB = *PI;
      if (BB->isLandingPad() && BB != Parent &&
          isa<InvokeInst>(Parent->getTerminator())) {
        Split = true;
        break;
      }
    }

    if (!Split) continue;

    SmallVector<BasicBlock*, 2> NewBBs;
    SplitLandingPadPredecessors(LPad, Parent, ".1", ".2", NewBBs);
  }
}
Ejemplo n.º 2
0
// visitCallInst - This converts all LLVM call instructions into invoke
// instructions. The except part of the invoke goes to the "LongJmpBlkPre"
// that grabs the exception and proceeds to determine if it's a longjmp
// exception or not.
void LowerSetJmp::visitCallInst(CallInst& CI)
{
  if (CI.getCalledFunction())
    if (!IsTransformableFunction(CI.getCalledFunction()->getName()) ||
        CI.getCalledFunction()->isIntrinsic()) return;

  BasicBlock* OldBB = CI.getParent();

  // If not reachable from a setjmp call, don't transform.
  if (!DFSBlocks.count(OldBB)) return;

  BasicBlock* NewBB = OldBB->splitBasicBlock(CI);
  assert(NewBB && "Couldn't split BB of \"call\" instruction!!");
  DFSBlocks.insert(NewBB);
  NewBB->setName("Call2Invoke");

  Function* Func = OldBB->getParent();

  // Construct the new "invoke" instruction.
  TerminatorInst* Term = OldBB->getTerminator();
  std::vector<Value*> Params(CI.op_begin() + 1, CI.op_end());
  InvokeInst* II =
    InvokeInst::Create(CI.getCalledValue(), NewBB, PrelimBBMap[Func],
                       Params.begin(), Params.end(), CI.getName(), Term);
  II->setCallingConv(CI.getCallingConv());
  II->setParamAttrs(CI.getParamAttrs());

  // Replace the old call inst with the invoke inst and remove the call.
  CI.replaceAllUsesWith(II);
  CI.getParent()->getInstList().erase(&CI);

  // The old terminator is useless now that we have the invoke inst.
  Term->getParent()->getInstList().erase(Term);
  ++CallsTransformed;
}
Ejemplo n.º 3
0
void Preparer::expandCallSite(CallSite CS) {
  if (!CS.getCalledFunction()) return;
  Function *F = CS.getCalledFunction();
  if (!F->isVarArg()) return;
  vector<Value *> Args;
  for (CallSite::arg_iterator ArgI = CS.arg_begin();
      ArgI != CS.arg_end(); ArgI++) {
    Args.push_back(*ArgI);
  }
  Args.push_back(ConstantInt::get(
        IntegerType::get(CS.getInstruction()->getContext(), 8), 0));
  string InstName = "";
  if (CS.getInstruction()->getName() != "")
    InstName = CS.getInstruction()->getName().str() + ".padded";
  if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
    CallInst *NewCI = CallInst::Create(F, Args, InstName, CI);
    NewCI->setAttributes(CI->getAttributes());
    CI->replaceAllUsesWith(NewCI);
    CI->eraseFromParent();
  } else if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
    InvokeInst *NewII = InvokeInst::Create(F,
        II->getNormalDest(), II->getUnwindDest(), Args, InstName, II);
    NewII->setAttributes(II->getAttributes());
    II->replaceAllUsesWith(NewII);
    II->eraseFromParent();
  }
}
Ejemplo n.º 4
0
/// Extracts the landing pads to make sure all of them have only one
/// predecessor.
void BlockExtractor::splitLandingPadPreds(Function &F) {
  for (BasicBlock &BB : F) {
    for (Instruction &I : BB) {
      if (!isa<InvokeInst>(&I))
        continue;
      InvokeInst *II = cast<InvokeInst>(&I);
      BasicBlock *Parent = II->getParent();
      BasicBlock *LPad = II->getUnwindDest();

      // Look through the landing pad's predecessors. If one of them ends in an
      // 'invoke', then we want to split the landing pad.
      bool Split = false;
      for (auto PredBB : predecessors(LPad)) {
        if (PredBB->isLandingPad() && PredBB != Parent &&
            isa<InvokeInst>(Parent->getTerminator())) {
          Split = true;
          break;
        }
      }

      if (!Split)
        continue;

      SmallVector<BasicBlock *, 2> NewBBs;
      SplitLandingPadPredecessors(LPad, Parent, ".1", ".2", NewBBs);
    }
  }
}
/*
 * Replace called function of a given call site.
 */
void DeadStoreEliminationPass::replaceCallingInst(Instruction* caller,
    Function* fn) {
  if (isa<CallInst>(caller)) {
    CallInst *callInst = dyn_cast<CallInst>(caller);
    callInst->setCalledFunction(fn);
  } else if (isa<InvokeInst>(caller)) {
    InvokeInst *invokeInst = dyn_cast<InvokeInst>(caller);
    invokeInst->setCalledFunction(fn);
  }
}
Ejemplo n.º 6
0
/// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into
/// an invoke, we have to turn all of the calls that can throw into
/// invokes.  This function analyze BB to see if there are any calls, and if so,
/// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
/// nodes in that block with the values specified in InvokeDestPHIValues.
///
/// Returns true to indicate that the next block should be skipped.
static bool HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
                                                   InvokeInliningInfo &Invoke) {
  LandingPadInst *LPI = Invoke.getLandingPadInst();

  for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
    Instruction *I = BBI++;

    if (LandingPadInst *L = dyn_cast<LandingPadInst>(I)) {
      unsigned NumClauses = LPI->getNumClauses();
      L->reserveClauses(NumClauses);
      for (unsigned i = 0; i != NumClauses; ++i)
        L->addClause(LPI->getClause(i));
    }

    // We only need to check for function calls: inlined invoke
    // instructions require no special handling.
    CallInst *CI = dyn_cast<CallInst>(I);

    // If this call cannot unwind, don't convert it to an invoke.
    // Inline asm calls cannot throw.
    if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
      continue;

    // Convert this function call into an invoke instruction.  First, split the
    // basic block.
    BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");

    // Delete the unconditional branch inserted by splitBasicBlock
    BB->getInstList().pop_back();

    // Create the new invoke instruction.
    ImmutableCallSite CS(CI);
    SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end());
    InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split,
                                        Invoke.getOuterResumeDest(),
                                        InvokeArgs, CI->getName(), BB);
    II->setCallingConv(CI->getCallingConv());
    II->setAttributes(CI->getAttributes());
    
    // Make sure that anything using the call now uses the invoke!  This also
    // updates the CallGraph if present, because it uses a WeakVH.
    CI->replaceAllUsesWith(II);

    // Delete the original call
    Split->getInstList().pop_front();

    // Update any PHI nodes in the exceptional block to indicate that there is
    // now a new entry in them.
    Invoke.addIncomingPHIValuesFor(BB);
    return false;
  }

  return false;
}
void DeadStoreEliminationPass::runOverwrittenDeadStoreAnalysisOnFn(Function &F) {
  MDA       = &getAnalysis<MemoryDependenceAnalysis>(F);

  for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
    for (BasicBlock::iterator I = BB->begin(), IE = BB->end(); I != IE; ++I) {
      Instruction *inst = I;
      if (StoreInst* SI = dyn_cast<StoreInst>(inst)) {
        Value *ptr           = SI->getPointerOperand();
        MemDepResult mdr     = MDA->getDependency(inst);
        Instruction *depInst = mdr.getInst();
        if (depInst && (isa<CallInst>(depInst) || isa<InvokeInst>(depInst))) {
           Function *calledFn;

           if (CallInst* CI = dyn_cast<CallInst>(depInst)) {
             calledFn = CI->getCalledFunction();
           } else {
             InvokeInst *II = dyn_cast<InvokeInst>(depInst);
             calledFn = II->getCalledFunction();
           }
           if (!fnThatStoreOnArgs.count(calledFn)) continue;

           CallSite CS(depInst);

           CallSite::arg_iterator actualArgIter = CS.arg_begin();
           Function::arg_iterator formalArgIter = calledFn->arg_begin();
           int size = calledFn->arg_size();

           std::set<Value*> storedArgs = fnThatStoreOnArgs[calledFn];
           for (int i = 0; i < size; ++i, ++actualArgIter, ++formalArgIter) {
             Value *formalArg = formalArgIter;
             Value *actualArg = *actualArgIter;
             if (ptr == actualArg && storedArgs.count(formalArg)) {
               int64_t InstWriteOffset, DepWriteOffset;
               DEBUG(errs() << "  Verifying if store is completely overwritten.\n");
               AliasAnalysis::Location Loc(ptr, getPointerSize(ptr, *AA), NULL);
               AliasAnalysis::Location DepLoc(actualArg, getPointerSize(actualArg, *AA), NULL);
               OverwriteResult OR = isOverwrite(Loc, DepLoc, *AA, DepWriteOffset, InstWriteOffset);
               if (OR == OverwriteComplete) {
                 DEBUG(errs() << "  Store on " << formalArg->getName() << " will be removed with cloning\n");
                 deadArguments[depInst].insert(formalArg);
               }
             }
           }
           if (deadArguments.count(depInst)) {
             fn2Clone[calledFn].push_back(depInst);
           }
        }
      }
    }
  }
}
Ejemplo n.º 8
0
CallSite GNUstep::IMPCacher::SplitSend(CallSite msgSend)
{
    BasicBlock *lookupBB = msgSend->getParent();
    Function *F = lookupBB->getParent();
    Module *M = F->getParent();
    Function *send = M->getFunction("objc_msgSend");
    Function *send_stret = M->getFunction("objc_msgSend_stret");
    Function *send_fpret = M->getFunction("objc_msgSend_fpret");
    Value *self;
    Value *cmd;
    int selfIndex = 0;
    if ((msgSend.getCalledFunction() == send) ||
            (msgSend.getCalledFunction() == send_fpret)) {
        self = msgSend.getArgument(0);
        cmd = msgSend.getArgument(1);
    } else if (msgSend.getCalledFunction() == send_stret) {
        selfIndex = 1;
        self = msgSend.getArgument(1);
        cmd = msgSend.getArgument(2);
    } else {
        abort();
        return CallSite();
    }
    CGBuilder B(&F->getEntryBlock(), F->getEntryBlock().begin());
    Value *selfPtr = B.CreateAlloca(self->getType());
    B.SetInsertPoint(msgSend.getInstruction());
    B.CreateStore(self, selfPtr, true);
    LLVMType *impTy = msgSend.getCalledValue()->getType();
    LLVMType *slotTy = PointerType::getUnqual(StructType::get(PtrTy, PtrTy, PtrTy,
                       IntTy, impTy, PtrTy, NULL));
    Value *slot;
    Constant *lookupFn = M->getOrInsertFunction("objc_msg_lookup_sender",
                         slotTy, selfPtr->getType(), cmd->getType(), PtrTy, NULL);
    if (msgSend.isCall()) {
        slot = B.CreateCall3(lookupFn, selfPtr, cmd, Constant::getNullValue(PtrTy));
    } else {
        InvokeInst *inv = cast<InvokeInst>(msgSend.getInstruction());
        BasicBlock *callBB = SplitBlock(lookupBB, msgSend.getInstruction(), Owner);
        removeTerminator(lookupBB);
        B.SetInsertPoint(lookupBB);
        slot = B.CreateInvoke3(lookupFn, callBB, inv->getUnwindDest(), selfPtr, cmd,
                               Constant::getNullValue(PtrTy));
        addPredecssor(inv->getUnwindDest(), msgSend->getParent(), lookupBB);
        B.SetInsertPoint(msgSend.getInstruction());
    }
    Value *imp = B.CreateLoad(B.CreateStructGEP(slot, 4));
    msgSend.setArgument(selfIndex, B.CreateLoad(selfPtr, true));
    msgSend.setCalledFunction(imp);
    return CallSite(slot);
}
Ejemplo n.º 9
0
/// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into
/// an invoke, we have to turn all of the calls that can throw into
/// invokes.  This function analyze BB to see if there are any calls, and if so,
/// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
/// nodes in that block with the values specified in InvokeDestPHIValues.
///
static void HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
                                                   BasicBlock *InvokeDest,
                           const SmallVectorImpl<Value*> &InvokeDestPHIValues) {
  for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
    Instruction *I = BBI++;
    
    // We only need to check for function calls: inlined invoke
    // instructions require no special handling.
    CallInst *CI = dyn_cast<CallInst>(I);
    if (CI == 0) continue;
    
    // If this call cannot unwind, don't convert it to an invoke.
    if (CI->doesNotThrow())
      continue;
    
    // Convert this function call into an invoke instruction.
    // First, split the basic block.
    BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");
    
    // Next, create the new invoke instruction, inserting it at the end
    // of the old basic block.
    ImmutableCallSite CS(CI);
    SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end());
    InvokeInst *II =
      InvokeInst::Create(CI->getCalledValue(), Split, InvokeDest,
                         InvokeArgs.begin(), InvokeArgs.end(),
                         CI->getName(), BB->getTerminator());
    II->setCallingConv(CI->getCallingConv());
    II->setAttributes(CI->getAttributes());
    
    // Make sure that anything using the call now uses the invoke!  This also
    // updates the CallGraph if present, because it uses a WeakVH.
    CI->replaceAllUsesWith(II);
    
    // Delete the unconditional branch inserted by splitBasicBlock
    BB->getInstList().pop_back();
    Split->getInstList().pop_front();  // Delete the original call
    
    // Update any PHI nodes in the exceptional block to indicate that
    // there is now a new entry in them.
    unsigned i = 0;
    for (BasicBlock::iterator I = InvokeDest->begin();
         isa<PHINode>(I); ++I, ++i)
      cast<PHINode>(I)->addIncoming(InvokeDestPHIValues[i], BB);
    
    // This basic block is now complete, the caller will continue scanning the
    // next one.
    return;
  }
}
Ejemplo n.º 10
0
// visitInvokeInst - Converting the "invoke" instruction is fairly
// straight-forward. The old exception part is replaced by a query asking
// if this is a longjmp exception. If it is, then it goes to the longjmp
// exception blocks. Otherwise, control is passed the old exception.
void LowerSetJmp::visitInvokeInst(InvokeInst& II)
{
  if (II.getCalledFunction())
    if (!IsTransformableFunction(II.getCalledFunction()->getName()) ||
        II.getCalledFunction()->isIntrinsic()) return;

  BasicBlock* BB = II.getParent();

  // If not reachable from a setjmp call, don't transform.
  if (!DFSBlocks.count(BB)) return;

  BasicBlock* ExceptBB = II.getUnwindDest();

  Function* Func = BB->getParent();
  BasicBlock* NewExceptBB = BasicBlock::Create(II.getContext(), 
                                               "InvokeExcept", Func);

  // If this is a longjmp exception, then branch to the preliminary BB of
  // the longjmp exception handling. Otherwise, go to the old exception.
  CallInst* IsLJExcept = CallInst::Create(IsLJException, "IsLJExcept",
                                          NewExceptBB);

  BranchInst::Create(PrelimBBMap[Func], ExceptBB, IsLJExcept, NewExceptBB);

  II.setUnwindDest(NewExceptBB);
  ++InvokesTransformed;
}
Ejemplo n.º 11
0
extern "C" void LLVMRustMarkAllFunctionsNounwind(LLVMModuleRef M) {
  for (Module::iterator GV = unwrap(M)->begin(), E = unwrap(M)->end(); GV != E;
       ++GV) {
    GV->setDoesNotThrow();
    Function *F = dyn_cast<Function>(GV);
    if (F == nullptr)
      continue;

    for (Function::iterator B = F->begin(), BE = F->end(); B != BE; ++B) {
      for (BasicBlock::iterator I = B->begin(), IE = B->end(); I != IE; ++I) {
        if (isa<InvokeInst>(I)) {
          InvokeInst *CI = cast<InvokeInst>(I);
          CI->setDoesNotThrow();
        }
      }
    }
  }
}
Ejemplo n.º 12
0
void MemoryInstrumenter::instrumentPointerInstruction(Instruction *I) {
    BasicBlock::iterator Loc;
    if (isa<PHINode>(I)) {
        // Cannot insert hooks right after a PHI, because PHINodes have to be
        // grouped together.
        Loc = I->getParent()->getFirstNonPHI();
    } else if (!I->isTerminator()) {
        Loc = I;
        ++Loc;
    } else {
        assert(isa<InvokeInst>(I));
        InvokeInst *II = cast<InvokeInst>(I);
        BasicBlock *NormalDest = II->getNormalDest();
        // It's not always OK to insert HookTopLevel simply at the beginning of the
        // normal destination, because the normal destionation may be shared by
        // multiple InvokeInsts. In that case, we will create a critical edge block,
        // and add the HookTopLevel over there.
        if (NormalDest->getUniquePredecessor()) {
            Loc = NormalDest->getFirstNonPHI();
        } else {
            BasicBlock *CritEdge = BasicBlock::Create(I->getContext(),
                                   "crit_edge",
                                   I->getParent()->getParent());
            Loc = BranchInst::Create(NormalDest, CritEdge);
            // Now that CritEdge becomes the new predecessor of NormalDest, replace
            // all phi uses of I->getParent() with CritEdge.
            for (auto J = NormalDest->begin();
                    NormalDest->getFirstNonPHI() != J;
                    ++J) {
                PHINode *Phi = cast<PHINode>(J);
                int i;
                while ((i = Phi->getBasicBlockIndex(I->getParent())) >= 0)
                    Phi->setIncomingBlock(i, CritEdge);
            }
            II->setNormalDest(CritEdge);
        }
    }
    if (LoadInst *LI = dyn_cast<LoadInst>(I))
        instrumentPointer(I, LI->getPointerOperand(), Loc);
    else
        instrumentPointer(I, NULL, Loc);
}
Ejemplo n.º 13
0
 void
 visitInvokeInst(InvokeInst &I)
 {
   Function* target = I.getCalledFunction();
   if (target == NULL) {
     anyUnknown = true;
     return;
   }
   if (isInternal(target)) {
     if (used != NULL) used->push(target);
   } else {
     interface->call(target->getName(), arg_begin(I), arg_end(I));
   }
   this->visitInstruction(I);
 }
Ejemplo n.º 14
0
void Preparer::expandCallSite(CallSite CS) {
  // Skip the callsites that are not calling a va function.
  Value *Callee = CS.getCalledValue();
  FunctionType *CalleeType = cast<FunctionType>(
      cast<PointerType>(Callee->getType())->getElementType());
  if (!CalleeType->isVarArg()) {
    return;
  }

  vector<Value *> Args;
  for (CallSite::arg_iterator ArgI = CS.arg_begin();
      ArgI != CS.arg_end(); ArgI++) {
    Args.push_back(*ArgI);
  }
  Args.push_back(ConstantInt::get(
        IntegerType::get(CS.getInstruction()->getContext(), 8), 0));
  string InstName = "";
  if (CS.getInstruction()->getName() != "")
    InstName = CS.getInstruction()->getName().str() + ".padded";
  if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
    CallInst *NewCI = CallInst::Create(Callee, Args, InstName, CI);
    NewCI->setAttributes(CI->getAttributes());
    CI->replaceAllUsesWith(NewCI);
    CI->eraseFromParent();
  } else if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
    InvokeInst *NewII = InvokeInst::Create(Callee,
                                           II->getNormalDest(),
                                           II->getUnwindDest(),
                                           Args,
                                           InstName,
                                           II);
    NewII->setAttributes(II->getAttributes());
    II->replaceAllUsesWith(NewII);
    II->eraseFromParent();
  }
}
Ejemplo n.º 15
0
// First thing we need to do is scan the whole function for values that are
// live across unwind edges.  Each value that is live across an unwind edge
// we spill into a stack location, guaranteeing that there is nothing live
// across the unwind edge.  This process also splits all critical edges
// coming out of invoke's.
void LowerInvoke::
splitLiveRangesLiveAcrossInvokes(std::vector<InvokeInst*> &Invokes) {
  // First step, split all critical edges from invoke instructions.
  for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
    InvokeInst *II = Invokes[i];
    SplitCriticalEdge(II, 0, this);
    SplitCriticalEdge(II, 1, this);
    assert(!isa<PHINode>(II->getNormalDest()) &&
           !isa<PHINode>(II->getUnwindDest()) &&
           "critical edge splitting left single entry phi nodes?");
  }

  Function *F = Invokes.back()->getParent()->getParent();

  // To avoid having to handle incoming arguments specially, we lower each arg
  // to a copy instruction in the entry block.  This ensures that the argument
  // value itself cannot be live across the entry block.
  BasicBlock::iterator AfterAllocaInsertPt = F->begin()->begin();
  while (isa<AllocaInst>(AfterAllocaInsertPt) &&
        isa<ConstantInt>(cast<AllocaInst>(AfterAllocaInsertPt)->getArraySize()))
    ++AfterAllocaInsertPt;
  for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
       AI != E; ++AI) {
    // This is always a no-op cast because we're casting AI to AI->getType() so
    // src and destination types are identical. BitCast is the only possibility.
    CastInst *NC = new BitCastInst(
      AI, AI->getType(), AI->getName()+".tmp", AfterAllocaInsertPt);
    AI->replaceAllUsesWith(NC);
    // Normally its is forbidden to replace a CastInst's operand because it
    // could cause the opcode to reflect an illegal conversion. However, we're
    // replacing it here with the same value it was constructed with to simply
    // make NC its user.
    NC->setOperand(0, AI);
  }

  // Finally, scan the code looking for instructions with bad live ranges.
  for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
    for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ++II) {
      // Ignore obvious cases we don't have to handle.  In particular, most
      // instructions either have no uses or only have a single use inside the
      // current block.  Ignore them quickly.
      Instruction *Inst = II;
      if (Inst->use_empty()) continue;
      if (Inst->hasOneUse() &&
          cast<Instruction>(Inst->use_back())->getParent() == BB &&
          !isa<PHINode>(Inst->use_back())) continue;

      // If this is an alloca in the entry block, it's not a real register
      // value.
      if (AllocaInst *AI = dyn_cast<AllocaInst>(Inst))
        if (isa<ConstantInt>(AI->getArraySize()) && BB == F->begin())
          continue;

      // Avoid iterator invalidation by copying users to a temporary vector.
      std::vector<Instruction*> Users;
      for (Value::use_iterator UI = Inst->use_begin(), E = Inst->use_end();
           UI != E; ++UI) {
        Instruction *User = cast<Instruction>(*UI);
        if (User->getParent() != BB || isa<PHINode>(User))
          Users.push_back(User);
      }

      // Scan all of the uses and see if the live range is live across an unwind
      // edge.  If we find a use live across an invoke edge, create an alloca
      // and spill the value.
      std::set<InvokeInst*> InvokesWithStoreInserted;

      // Find all of the blocks that this value is live in.
      std::set<BasicBlock*> LiveBBs;
      LiveBBs.insert(Inst->getParent());
      while (!Users.empty()) {
        Instruction *U = Users.back();
        Users.pop_back();

        if (!isa<PHINode>(U)) {
          MarkBlocksLiveIn(U->getParent(), LiveBBs);
        } else {
          // Uses for a PHI node occur in their predecessor block.
          PHINode *PN = cast<PHINode>(U);
          for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
            if (PN->getIncomingValue(i) == Inst)
              MarkBlocksLiveIn(PN->getIncomingBlock(i), LiveBBs);
        }
      }

      // Now that we know all of the blocks that this thing is live in, see if
      // it includes any of the unwind locations.
      bool NeedsSpill = false;
      for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
        BasicBlock *UnwindBlock = Invokes[i]->getUnwindDest();
        if (UnwindBlock != BB && LiveBBs.count(UnwindBlock)) {
          NeedsSpill = true;
        }
      }

      // If we decided we need a spill, do it.
      if (NeedsSpill) {
        ++NumSpilled;
        DemoteRegToStack(*Inst, true);
      }
    }
}
Ejemplo n.º 16
0
/// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls
/// in the body of the inlined function into invokes and turn unwind
/// instructions into branches to the invoke unwind dest.
///
/// II is the invoke instruction being inlined.  FirstNewBlock is the first
/// block of the inlined code (the last block is the end of the function),
/// and InlineCodeInfo is information about the code that got inlined.
static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock,
                                ClonedCodeInfo &InlinedCodeInfo) {
  BasicBlock *InvokeDest = II->getUnwindDest();
  std::vector<Value*> InvokeDestPHIValues;

  // If there are PHI nodes in the unwind destination block, we need to
  // keep track of which values came into them from this invoke, then remove
  // the entry for this block.
  BasicBlock *InvokeBlock = II->getParent();
  for (BasicBlock::iterator I = InvokeDest->begin(); isa<PHINode>(I); ++I) {
    PHINode *PN = cast<PHINode>(I);
    // Save the value to use for this edge.
    InvokeDestPHIValues.push_back(PN->getIncomingValueForBlock(InvokeBlock));
  }

  Function *Caller = FirstNewBlock->getParent();

  // The inlined code is currently at the end of the function, scan from the
  // start of the inlined code to its end, checking for stuff we need to
  // rewrite.
  if (InlinedCodeInfo.ContainsCalls || InlinedCodeInfo.ContainsUnwinds) {
    for (Function::iterator BB = FirstNewBlock, E = Caller->end();
         BB != E; ++BB) {
      if (InlinedCodeInfo.ContainsCalls) {
        for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ){
          Instruction *I = BBI++;

          // We only need to check for function calls: inlined invoke
          // instructions require no special handling.
          if (!isa<CallInst>(I)) continue;
          CallInst *CI = cast<CallInst>(I);

          // If this call cannot unwind, don't convert it to an invoke.
          if (CI->doesNotThrow())
            continue;

          // Convert this function call into an invoke instruction.
          // First, split the basic block.
          BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");

          // Next, create the new invoke instruction, inserting it at the end
          // of the old basic block.
          SmallVector<Value*, 8> InvokeArgs(CI->op_begin()+1, CI->op_end());
          InvokeInst *II =
            InvokeInst::Create(CI->getCalledValue(), Split, InvokeDest,
                               InvokeArgs.begin(), InvokeArgs.end(),
                               CI->getName(), BB->getTerminator());
          II->setCallingConv(CI->getCallingConv());
          II->setAttributes(CI->getAttributes());

          // Make sure that anything using the call now uses the invoke!
          CI->replaceAllUsesWith(II);

          // Delete the unconditional branch inserted by splitBasicBlock
          BB->getInstList().pop_back();
          Split->getInstList().pop_front();  // Delete the original call

          // Update any PHI nodes in the exceptional block to indicate that
          // there is now a new entry in them.
          unsigned i = 0;
          for (BasicBlock::iterator I = InvokeDest->begin();
               isa<PHINode>(I); ++I, ++i) {
            PHINode *PN = cast<PHINode>(I);
            PN->addIncoming(InvokeDestPHIValues[i], BB);
          }

          // This basic block is now complete, start scanning the next one.
          break;
        }
      }

      if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
        // An UnwindInst requires special handling when it gets inlined into an
        // invoke site.  Once this happens, we know that the unwind would cause
        // a control transfer to the invoke exception destination, so we can
        // transform it into a direct branch to the exception destination.
        BranchInst::Create(InvokeDest, UI);

        // Delete the unwind instruction!
        UI->eraseFromParent();

        // Update any PHI nodes in the exceptional block to indicate that
        // there is now a new entry in them.
        unsigned i = 0;
        for (BasicBlock::iterator I = InvokeDest->begin();
             isa<PHINode>(I); ++I, ++i) {
          PHINode *PN = cast<PHINode>(I);
          PN->addIncoming(InvokeDestPHIValues[i], BB);
        }
      }
    }
  }

  // Now that everything is happy, we have one final detail.  The PHI nodes in
  // the exception destination block still have entries due to the original
  // invoke instruction.  Eliminate these entries (which might even delete the
  // PHI node) now.
  InvokeDest->removePredecessor(II->getParent());
}
Ejemplo n.º 17
0
bool CSDataRando::processCallSite(CallSite CS, FuncInfo &FI, PointerEquivalenceAnalysis &P, DSGraph *G) {
  bool IndirectCall = !isa<Function>(CS.getCalledValue()->stripPointerCasts());
  if (IndirectCall) { NumIndirectCalls++; }

  CallSite OriginalCS = originalCallSite(FI, CS);
  if (!DSA->canEncryptCall(OriginalCS)) {
    if (IndirectCall) { NumIndirectCantEncrypt++; }
    return false;
  }

  DSCallSite DSCS = G->getDSCallSiteForCallSite(OriginalCS);
  const Function *Callee = getEffectiveCallee(DSCS, FI, G);
  if (!Callee) {
    if (IndirectCall) { NumIndirectCantEncrypt++; }
    return false;
  }

  FuncInfo &CalleeInfo = FunctionInfo[Callee];
  Value *Clone = getCloneCalledValue(CS, CalleeInfo);
  if (!Clone || CalleeInfo.ArgNodes.empty()) {
    if (IndirectCall) { NumIndirectCantEncrypt++; }
    return false;
  }

  // We create a mapping of the formal argument nodes in the callee function and
  // actual argument nodes in the caller function's graph.
  DSGraph::NodeMapTy NodeMap;
  DSGraph *CalleeG = DSA->getDSGraph(*Callee);

  // getArgNodesForCall places the return node and the vanode in the
  // first two slots of the vector, followed by the nodes for the regular
  // pointer arguments.
  std::vector<DSNodeHandle> ArgNodes;
  getArgNodesForCall(CalleeG, DSCS, ArgNodes);

  // First the return value
  DSNodeHandle CalleeRetNode = ArgNodes[0];
  DSGraph::computeNodeMapping(CalleeRetNode, DSCS.getRetVal(), NodeMap);

  // Then VarArgs
  DSNodeHandle CalleeVarArgNode = ArgNodes[1];
  DSGraph::computeNodeMapping(CalleeVarArgNode, DSCS.getVAVal(), NodeMap);

  // And last the regular arguments.
  for (unsigned int i = 0; i < DSCS.getNumPtrArgs() && i + 2 < ArgNodes.size(); i++) {
    DSGraph::computeNodeMapping(ArgNodes[i + 2], DSCS.getPtrArg(i), NodeMap);
  }

  // Collect the arguments and masks to pass to call
  SmallVector<Value*, 8> Args;
  unsigned int i = 0;
  for (unsigned int e = CS.getFunctionType()->getNumParams(); i < e; i++) {
    Args.push_back(CS.getArgOperand(i));
  }

  for (const DSNode *N : CalleeInfo.ArgNodes) {
    Value *Mask = P.getMaskForNode(NodeMap[N]);
    Args.push_back(Mask);
  }

  // VarArgs go after masks
  for (unsigned int e = CS.arg_size(); i < e; i++) {
    Args.push_back(CS.getArgOperand(i));
  }

  // Do replacement
  Instruction *CI = CS.getInstruction();
  Value *Call;
  if (CS.isCall()) {
    Call = CallInst::Create(Clone, Args, "", CI);
  } else {
    InvokeInst *II = cast<InvokeInst>(CI);
    Call = InvokeInst::Create(Clone, II->getNormalDest(), II->getUnwindDest(), Args, "", II);
  }
  CallSite NewCS(Call);
  NewCS.setCallingConv(CS.getCallingConv());

  CI->replaceAllUsesWith(Call);
  P.replace(CI, Call);
  CI->eraseFromParent();

  return true;
}
// Convert the given call to use normalized argument/return types.
template <class T> static bool ConvertCall(T *Call, Pass *P) {
  // Don't try to change calls to intrinsics.
  if (isa<IntrinsicInst>(Call))
    return false;
  FunctionType *FTy = cast<FunctionType>(
      Call->getCalledValue()->getType()->getPointerElementType());
  FunctionType *NFTy = NormalizeFunctionType(FTy);
  if (NFTy == FTy)
    return false; // No change needed.

  // Convert arguments.
  SmallVector<Value *, 8> Args;
  for (unsigned I = 0; I < Call->getNumArgOperands(); ++I) {
    Value *Arg = Call->getArgOperand(I);
    if (NFTy->getParamType(I) != FTy->getParamType(I)) {
      Instruction::CastOps CastType =
          Call->getAttributes().hasAttribute(I + 1, Attribute::SExt) ?
          Instruction::SExt : Instruction::ZExt;
      Arg = CopyDebug(CastInst::Create(CastType, Arg, NFTy->getParamType(I),
                                       "arg_ext", Call), Call);
    }
    Args.push_back(Arg);
  }
  Value *CastFunc =
    CopyDebug(new BitCastInst(Call->getCalledValue(), NFTy->getPointerTo(),
                              Call->getName() + ".arg_cast", Call), Call);
  Value *Result = NULL;
  if (CallInst *OldCall = dyn_cast<CallInst>(Call)) {
    CallInst *NewCall = CopyDebug(CallInst::Create(CastFunc, Args, "", OldCall),
                                  OldCall);
    NewCall->takeName(OldCall);
    NewCall->setAttributes(OldCall->getAttributes());
    NewCall->setCallingConv(OldCall->getCallingConv());
    NewCall->setTailCall(OldCall->isTailCall());
    Result = NewCall;

    if (FTy->getReturnType() != NFTy->getReturnType()) {
      Result = CopyDebug(new TruncInst(NewCall, FTy->getReturnType(),
                                       NewCall->getName() + ".ret_trunc", Call),
                         Call);
    }
  } else if (InvokeInst *OldInvoke = dyn_cast<InvokeInst>(Call)) {
    BasicBlock *Parent = OldInvoke->getParent();
    BasicBlock *NormalDest = OldInvoke->getNormalDest();
    BasicBlock *UnwindDest = OldInvoke->getUnwindDest();

    if (FTy->getReturnType() != NFTy->getReturnType()) {
      if (BasicBlock *SplitDest = SplitCriticalEdge(Parent, NormalDest)) {
        NormalDest = SplitDest;
      }
    }

    InvokeInst *New = CopyDebug(InvokeInst::Create(CastFunc, NormalDest,
                                                   UnwindDest, Args,
                                                   "", OldInvoke),
                                OldInvoke);
    New->takeName(OldInvoke);

    if (FTy->getReturnType() != NFTy->getReturnType()) {
      Result = CopyDebug(new TruncInst(New, FTy->getReturnType(),
                                       New->getName() + ".ret_trunc",
                                       NormalDest->getTerminator()),
                         OldInvoke);
    } else {
      Result = New;
    }

    New->setAttributes(OldInvoke->getAttributes());
    New->setCallingConv(OldInvoke->getCallingConv());
  }
  Call->replaceAllUsesWith(Result);
  Call->eraseFromParent();
  return true;
}
Ejemplo n.º 19
0
/// Replaces the given call site (Call or Invoke) with a gc.statepoint
/// intrinsic with an empty deoptimization arguments list.  This does
/// NOT do explicit relocation for GC support.
static Value *ReplaceWithStatepoint(const CallSite &CS, /* to replace */
                                    Pass *P) {
  BasicBlock *BB = CS.getInstruction()->getParent();
  Function *F = BB->getParent();
  Module *M = F->getParent();
  assert(M && "must be set");

  // TODO: technically, a pass is not allowed to get functions from within a
  // function pass since it might trigger a new function addition.  Refactor
  // this logic out to the initialization of the pass.  Doesn't appear to
  // matter in practice.

  // Fill in the one generic type'd argument (the function is also vararg)
  std::vector<Type *> argTypes;
  argTypes.push_back(CS.getCalledValue()->getType());

  Function *gc_statepoint_decl = Intrinsic::getDeclaration(
      M, Intrinsic::experimental_gc_statepoint, argTypes);

  // Then go ahead and use the builder do actually do the inserts.  We insert
  // immediately before the previous instruction under the assumption that all
  // arguments will be available here.  We can't insert afterwards since we may
  // be replacing a terminator.
  Instruction *insertBefore = CS.getInstruction();
  IRBuilder<> Builder(insertBefore);
  // First, create the statepoint (with all live ptrs as arguments).
  std::vector<llvm::Value *> args;
  // target, #args, unused, args
  Value *Target = CS.getCalledValue();
  args.push_back(Target);
  int callArgSize = CS.arg_size();
  args.push_back(
      ConstantInt::get(Type::getInt32Ty(M->getContext()), callArgSize));
  // TODO: add a 'Needs GC-rewrite' later flag
  args.push_back(ConstantInt::get(Type::getInt32Ty(M->getContext()), 0));

  // Copy all the arguments of the original call
  args.insert(args.end(), CS.arg_begin(), CS.arg_end());

  // Create the statepoint given all the arguments
  Instruction *token = nullptr;
  AttributeSet return_attributes;
  if (CS.isCall()) {
    CallInst *toReplace = cast<CallInst>(CS.getInstruction());
    CallInst *call =
        Builder.CreateCall(gc_statepoint_decl, args, "safepoint_token");
    call->setTailCall(toReplace->isTailCall());
    call->setCallingConv(toReplace->getCallingConv());

    // Before we have to worry about GC semantics, all attributes are legal
    AttributeSet new_attrs = toReplace->getAttributes();
    // In case if we can handle this set of sttributes - set up function attrs
    // directly on statepoint and return attrs later for gc_result intrinsic.
    call->setAttributes(new_attrs.getFnAttributes());
    return_attributes = new_attrs.getRetAttributes();
    // TODO: handle param attributes

    token = call;

    // Put the following gc_result and gc_relocate calls immediately after the
    // the old call (which we're about to delete)
    BasicBlock::iterator next(toReplace);
    assert(BB->end() != next && "not a terminator, must have next");
    next++;
    Instruction *IP = &*(next);
    Builder.SetInsertPoint(IP);
    Builder.SetCurrentDebugLocation(IP->getDebugLoc());

  } else if (CS.isInvoke()) {
    InvokeInst *toReplace = cast<InvokeInst>(CS.getInstruction());

    // Insert the new invoke into the old block.  We'll remove the old one in a
    // moment at which point this will become the new terminator for the
    // original block.
    InvokeInst *invoke = InvokeInst::Create(
        gc_statepoint_decl, toReplace->getNormalDest(),
        toReplace->getUnwindDest(), args, "", toReplace->getParent());
    invoke->setCallingConv(toReplace->getCallingConv());

    // Currently we will fail on parameter attributes and on certain
    // function attributes.
    AttributeSet new_attrs = toReplace->getAttributes();
    // In case if we can handle this set of sttributes - set up function attrs
    // directly on statepoint and return attrs later for gc_result intrinsic.
    invoke->setAttributes(new_attrs.getFnAttributes());
    return_attributes = new_attrs.getRetAttributes();

    token = invoke;

    // We'll insert the gc.result into the normal block
    BasicBlock *normalDest = normalizeBBForInvokeSafepoint(
        toReplace->getNormalDest(), invoke->getParent());
    Instruction *IP = &*(normalDest->getFirstInsertionPt());
    Builder.SetInsertPoint(IP);
  } else {
    llvm_unreachable("unexpect type of CallSite");
  }
  assert(token);

  // Handle the return value of the original call - update all uses to use a
  // gc_result hanging off the statepoint node we just inserted

  // Only add the gc_result iff there is actually a used result
  if (!CS.getType()->isVoidTy() && !CS.getInstruction()->use_empty()) {
    Instruction *gc_result = nullptr;
    std::vector<Type *> types;     // one per 'any' type
    types.push_back(CS.getType()); // result type
    auto get_gc_result_id = [&](Type &Ty) {
      if (Ty.isIntegerTy()) {
        return Intrinsic::experimental_gc_result_int;
      } else if (Ty.isFloatingPointTy()) {
        return Intrinsic::experimental_gc_result_float;
      } else if (Ty.isPointerTy()) {
        return Intrinsic::experimental_gc_result_ptr;
      } else {
        llvm_unreachable("non java type encountered");
      }
    };
    Intrinsic::ID Id = get_gc_result_id(*CS.getType());
    Value *gc_result_func = Intrinsic::getDeclaration(M, Id, types);

    std::vector<Value *> args;
    args.push_back(token);
    gc_result = Builder.CreateCall(
        gc_result_func, args,
        CS.getInstruction()->hasName() ? CS.getInstruction()->getName() : "");

    cast<CallInst>(gc_result)->setAttributes(return_attributes);
    return gc_result;
  } else {
    // No return value for the call.
    return nullptr;
  }
}
Ejemplo n.º 20
0
// Specialize F by replacing the arguments (keys) in replacements with the 
// constants (values).  Replace all calls to F with those constants with
// a call to the specialized function.  Returns the specialized function
static Function* 
SpecializeFunction(Function* F, 
                   ValueMap<const Value*, Value*>& replacements) {
  // arg numbers of deleted arguments
  DenseMap<unsigned, const Argument*> deleted;
  for (ValueMap<const Value*, Value*>::iterator 
         repb = replacements.begin(), repe = replacements.end();
       repb != repe; ++repb) {
    Argument const *arg = cast<const Argument>(repb->first);
    deleted[arg->getArgNo()] = arg;
  }

  Function* NF = CloneFunction(F, replacements,
                               /*ModuleLevelChanges=*/false);
  NF->setLinkage(GlobalValue::InternalLinkage);
  F->getParent()->getFunctionList().push_back(NF);

  for (Value::use_iterator ii = F->use_begin(), ee = F->use_end(); 
       ii != ee; ) {
    Value::use_iterator i = ii;
    ++ii;
    User *U = *i;
    CallSite CS(U);
    if (CS) {
      if (CS.getCalledFunction() == F) {
        SmallVector<Value*, 6> args;
        // Assemble the non-specialized arguments for the updated callsite.
        // In the process, make sure that the specialized arguments are
        // constant and match the specialization.  If that's not the case,
        // this callsite needs to call the original or some other
        // specialization; don't change it here.
        CallSite::arg_iterator as = CS.arg_begin(), ae = CS.arg_end();
        for (CallSite::arg_iterator ai = as; ai != ae; ++ai) {
          DenseMap<unsigned, const Argument*>::iterator delit = deleted.find(
            std::distance(as, ai));
          if (delit == deleted.end())
            args.push_back(cast<Value>(ai));
          else {
            Constant *ci = dyn_cast<Constant>(ai);
            if (!(ci && ci == replacements[delit->second]))
              goto next_use;
          }
        }
        Value* NCall;
        if (CallInst *CI = dyn_cast<CallInst>(U)) {
          NCall = CallInst::Create(NF, args.begin(), args.end(), 
                                   CI->getName(), CI);
          cast<CallInst>(NCall)->setTailCall(CI->isTailCall());
          cast<CallInst>(NCall)->setCallingConv(CI->getCallingConv());
        } else {
          InvokeInst *II = cast<InvokeInst>(U);
          NCall = InvokeInst::Create(NF, II->getNormalDest(),
                                     II->getUnwindDest(),
                                     args.begin(), args.end(), 
                                     II->getName(), II);
          cast<InvokeInst>(NCall)->setCallingConv(II->getCallingConv());
        }
        CS.getInstruction()->replaceAllUsesWith(NCall);
        CS.getInstruction()->eraseFromParent();
        ++numReplaced;
      }
    }
    next_use:;
  }
  return NF;
}
Ejemplo n.º 21
0
void AAAnalyzer::handle_inst(Instruction *inst, FunctionWrapper * parent_func) {
    //outs()<<*inst<<"\n"; outs().flush();
    switch (inst->getOpcode()) {
            // common/bitwise binary operations
            // Terminator instructions
        case Instruction::Ret:
        {
            ReturnInst* retInst = ((ReturnInst*) inst);
            if (retInst->getNumOperands() > 0 && !retInst->getOperandUse(0)->getType()->isVoidTy()) {
                parent_func->addRet(retInst->getOperandUse(0));
            }
        }
            break;
        case Instruction::Resume:
        {
            Value* resume = ((ResumeInst*) inst)->getOperand(0);
            parent_func->addResume(resume);
        }
            break;
        case Instruction::Switch:
        case Instruction::Br:
        case Instruction::IndirectBr:
        case Instruction::Unreachable:
            break;

            // vector operations
        case Instruction::ExtractElement:
        {
        }
            break;
        case Instruction::InsertElement:
        {
        }
            break;
        case Instruction::ShuffleVector:
        {
        }
            break;

            // aggregate operations
        case Instruction::ExtractValue:
        {
            Value * agg = ((ExtractValueInst*) inst)->getAggregateOperand();
            DyckVertex* aggV = wrapValue(agg);

            Type* aggTy = agg->getType();

            ArrayRef<unsigned> indices = ((ExtractValueInst*) inst)->getIndices();
            DyckVertex* currentStruct = aggV;

            for (unsigned int i = 0; i < indices.size(); i++) {
                if (isa<CompositeType>(aggTy) && aggTy->isSized()) {
                    if (!aggTy->isStructTy()) {
                        aggTy = ((CompositeType*) aggTy)->getTypeAtIndex(indices[i]);
#ifndef ARRAY_SIMPLIFIED
                        current = addPtrOffset(current, (int) indices[i] * dl.getTypeAllocSize(aggTy), dgraph);
#endif
                        if (i == indices.size() - 1) {
                            this->makeAlias(currentStruct, wrapValue(inst));
                        }
                    } else {
                        aggTy = ((CompositeType*) aggTy)->getTypeAtIndex(indices[i]);

                        if (i != indices.size() - 1) {
                            currentStruct = this->addField(currentStruct, -2 - (int) indices[i], NULL);
                        } else {
                            currentStruct = this->addField(currentStruct, -2 - (int) indices[i], wrapValue(inst));
                        }
                    }
                } else {
                    break;
                }
            }
        }
            break;
        case Instruction::InsertValue:
        {
            DyckVertex* resultV = wrapValue(inst);
            Value * agg = ((InsertValueInst*) inst)->getAggregateOperand();
            if (!isa<UndefValue>(agg)) {
                makeAlias(resultV, wrapValue(agg));
            }

            Value * val = ((InsertValueInst*) inst)->getInsertedValueOperand();
            DyckVertex* insertedVal = wrapValue(val);

            Type *aggTy = inst->getType();

            ArrayRef<unsigned> indices = ((InsertValueInst*) inst)->getIndices();

            DyckVertex* currentStruct = resultV;

            for (unsigned int i = 0; i < indices.size(); i++) {
                if (isa<CompositeType>(aggTy) && aggTy->isSized()) {
                    if (!aggTy->isStructTy()) {
                        aggTy = ((CompositeType*) aggTy)->getTypeAtIndex(indices[i]);
#ifndef ARRAY_SIMPLIFIED
                        current = addPtrOffset(current, (int) indices[i] * dl.getTypeAllocSize(aggTy), dgraph);
#endif
                        if (i == indices.size() - 1) {
                            this->makeAlias(currentStruct, insertedVal);
                        }
                    } else {
                        aggTy = ((CompositeType*) aggTy)->getTypeAtIndex(indices[i]);

                        if (i != indices.size() - 1) {
                            currentStruct = this->addField(currentStruct, -2 - (int) indices[i], NULL);
                        } else {
                            currentStruct = this->addField(currentStruct, -2 - (int) indices[i], insertedVal);
                        }
                    }
                } else {
                    break;
                }
            }
        }
            break;

            // memory accessing and addressing operations
        case Instruction::Alloca:
        {
        }
            break;
        case Instruction::Fence:
        {
        }
            break;
        case Instruction::AtomicCmpXchg:
        {
            Value * retXchg = inst;
            Value * ptrXchg = inst->getOperand(0);
            Value * newXchg = inst->getOperand(2);
            addPtrTo(wrapValue(ptrXchg), wrapValue(retXchg));
            addPtrTo(wrapValue(ptrXchg), wrapValue(newXchg));
        }
            break;
        case Instruction::AtomicRMW:
        {
            Value * retRmw = inst;
            Value * ptrRmw = ((AtomicRMWInst*) inst)->getPointerOperand();
            addPtrTo(wrapValue(ptrRmw), wrapValue(retRmw));

            switch (((AtomicRMWInst*) inst)->getOperation()) {
                case AtomicRMWInst::Max:
                case AtomicRMWInst::Min:
                case AtomicRMWInst::UMax:
                case AtomicRMWInst::UMin:
                case AtomicRMWInst::Xchg:
                {
                    Value * newRmw = ((AtomicRMWInst*) inst)->getValOperand();
                    addPtrTo(wrapValue(ptrRmw), wrapValue(newRmw));
                }
                    break;
                default:
                    //others are binary ops like add/sub/...
                    ///@TODO
                    break;
            }
        }
            break;
        case Instruction::Load:
        {
            Value *lval = inst;
            Value *ladd = inst->getOperand(0);
            addPtrTo(wrapValue(ladd), wrapValue(lval));
        }
            break;
        case Instruction::Store:
        {
            Value * sval = inst->getOperand(0);
            Value * sadd = inst->getOperand(1);
            addPtrTo(wrapValue(sadd), wrapValue(sval));
        }
            break;
        case Instruction::GetElementPtr:
        {
            makeAlias(wrapValue(inst), handle_gep((GEPOperator*) inst));
        }
            break;

            // conversion operations
        case Instruction::Trunc:
        case Instruction::ZExt:
        case Instruction::SExt:
        case Instruction::FPTrunc:
        case Instruction::FPExt:
        case Instruction::FPToUI:
        case Instruction::FPToSI:
        case Instruction::UIToFP:
        case Instruction::SIToFP:
        case Instruction::BitCast:
        case Instruction::PtrToInt:
        case Instruction::IntToPtr:
        {
            Value * itpv = inst->getOperand(0);
            makeAlias(wrapValue(inst), wrapValue(itpv));
        }
            break;

            // other operations
        case Instruction::Invoke: // invoke is a terminal operation
        {
            InvokeInst * invoke = (InvokeInst*) inst;
            LandingPadInst* lpd = invoke->getLandingPadInst();
            parent_func->addLandingPad(invoke, lpd);

            Value * cv = invoke->getCalledValue();
            vector<Value*> args;
            for (unsigned i = 0; i < invoke->getNumArgOperands(); i++) {
                args.push_back(invoke->getArgOperand(i));
            }

            this->handle_invoke_call_inst(invoke, cv, &args, parent_func);
        }
            break;
        case Instruction::Call:
        {
            CallInst * callinst = (CallInst*) inst;

            if (callinst->isInlineAsm()) {
                break;
            }

            Value * cv = callinst->getCalledValue();
            vector<Value*> args;
            for (unsigned i = 0; i < callinst->getNumArgOperands(); i++) {
                args.push_back(callinst->getArgOperand(i));
            }

            this->handle_invoke_call_inst(callinst, cv, &args, parent_func);
        }
            break;
        case Instruction::PHI:
        {
            PHINode *phi = (PHINode *) inst;
            int nums = phi->getNumIncomingValues();
            for (int i = 0; i < nums; i++) {
                Value * p = phi->getIncomingValue(i);
                makeAlias(wrapValue(inst), wrapValue(p));
            }
        }
            break;
        case Instruction::Select:
        {
            Value *first = ((SelectInst*) inst)->getTrueValue();
            Value *second = ((SelectInst*) inst)->getFalseValue();
            makeAlias(wrapValue(inst), wrapValue(first));
            makeAlias(wrapValue(inst), wrapValue(second));
        }
            break;
        case Instruction::VAArg:
        {
            parent_func->addVAArg(inst);

            DyckVertex* vaarg = wrapValue(inst);
            Value * ptrVaarg = inst->getOperand(0);
            addPtrTo(wrapValue(ptrVaarg), vaarg);
        }
            break;
        case Instruction::LandingPad: // handled with invoke inst
        case Instruction::ICmp:
        case Instruction::FCmp:
        default:
            break;
    }
}
Ejemplo n.º 22
0
/// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into
/// an invoke, we have to turn all of the calls that can throw into
/// invokes.  This function analyze BB to see if there are any calls, and if so,
/// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
/// nodes in that block with the values specified in InvokeDestPHIValues.
///
/// Returns true to indicate that the next block should be skipped.
static bool HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
                                                   InvokeInliningInfo &Invoke) {
  for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
    Instruction *I = BBI++;
    
    // We only need to check for function calls: inlined invoke
    // instructions require no special handling.
    CallInst *CI = dyn_cast<CallInst>(I);
    if (CI == 0) continue;

    // LIBUNWIND: merge selector instructions.
    if (EHSelectorInst *Inner = dyn_cast<EHSelectorInst>(CI)) {
      EHSelectorInst *Outer = Invoke.getOuterSelector();
      if (!Outer) continue;

      bool innerIsOnlyCleanup = isCleanupOnlySelector(Inner);
      bool outerIsOnlyCleanup = isCleanupOnlySelector(Outer);

      // If both selectors contain only cleanups, we don't need to do
      // anything.  TODO: this is really just a very specific instance
      // of a much more general optimization.
      if (innerIsOnlyCleanup && outerIsOnlyCleanup) continue;

      // Otherwise, we just append the outer selector to the inner selector.
      SmallVector<Value*, 16> NewSelector;
      for (unsigned i = 0, e = Inner->getNumArgOperands(); i != e; ++i)
        NewSelector.push_back(Inner->getArgOperand(i));
      for (unsigned i = 2, e = Outer->getNumArgOperands(); i != e; ++i)
        NewSelector.push_back(Outer->getArgOperand(i));

      CallInst *NewInner =
        IRBuilder<>(Inner).CreateCall(Inner->getCalledValue(), NewSelector);
      // No need to copy attributes, calling convention, etc.
      NewInner->takeName(Inner);
      Inner->replaceAllUsesWith(NewInner);
      Inner->eraseFromParent();
      continue;
    }
    
    // If this call cannot unwind, don't convert it to an invoke.
    if (CI->doesNotThrow())
      continue;
    
    // Convert this function call into an invoke instruction.
    // First, split the basic block.
    BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");

    // Delete the unconditional branch inserted by splitBasicBlock
    BB->getInstList().pop_back();

    // LIBUNWIND: If this is a call to @llvm.eh.resume, just branch
    // directly to the new landing pad.
    if (Invoke.forwardEHResume(CI, BB)) {
      // TODO: 'Split' is now unreachable; clean it up.

      // We want to leave the original call intact so that the call
      // graph and other structures won't get misled.  We also have to
      // avoid processing the next block, or we'll iterate here forever.
      return true;
    }

    // Otherwise, create the new invoke instruction.
    ImmutableCallSite CS(CI);
    SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end());
    InvokeInst *II =
      InvokeInst::Create(CI->getCalledValue(), Split,
                         Invoke.getOuterUnwindDest(),
                         InvokeArgs, CI->getName(), BB);
    II->setCallingConv(CI->getCallingConv());
    II->setAttributes(CI->getAttributes());
    
    // Make sure that anything using the call now uses the invoke!  This also
    // updates the CallGraph if present, because it uses a WeakVH.
    CI->replaceAllUsesWith(II);

    Split->getInstList().pop_front();  // Delete the original call

    // Update any PHI nodes in the exceptional block to indicate that
    // there is now a new entry in them.
    Invoke.addIncomingPHIValuesFor(BB);
    return false;
  }

  return false;
}
Ejemplo n.º 23
0
/// splitLiveRangesAcrossInvokes - Each value that is live across an unwind edge
/// we spill into a stack location, guaranteeing that there is nothing live
/// across the unwind edge.  This process also splits all critical edges
/// coming out of invoke's.
/// FIXME: Move this function to a common utility file (Local.cpp?) so
/// both SjLj and LowerInvoke can use it.
void SjLjEHPass::
splitLiveRangesAcrossInvokes(SmallVector<InvokeInst*,16> &Invokes) {
  // First step, split all critical edges from invoke instructions.
  for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
    InvokeInst *II = Invokes[i];
    SplitCriticalEdge(II, 0, this);

    // FIXME: New EH - This if-condition will be always true in the new scheme.
    if (II->getUnwindDest()->isLandingPad()) {
      SmallVector<BasicBlock*, 2> NewBBs;
      SplitLandingPadPredecessors(II->getUnwindDest(), II->getParent(),
                                  ".1", ".2", this, NewBBs);
      LPadSuccMap[II] = *succ_begin(NewBBs[0]);
    } else {
      SplitCriticalEdge(II, 1, this);
    }

    assert(!isa<PHINode>(II->getNormalDest()) &&
           !isa<PHINode>(II->getUnwindDest()) &&
           "Critical edge splitting left single entry phi nodes?");
  }

  Function *F = Invokes.back()->getParent()->getParent();

  // To avoid having to handle incoming arguments specially, we lower each arg
  // to a copy instruction in the entry block.  This ensures that the argument
  // value itself cannot be live across the entry block.
  BasicBlock::iterator AfterAllocaInsertPt = F->begin()->begin();
  while (isa<AllocaInst>(AfterAllocaInsertPt) &&
        isa<ConstantInt>(cast<AllocaInst>(AfterAllocaInsertPt)->getArraySize()))
    ++AfterAllocaInsertPt;
  for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
       AI != E; ++AI) {
    Type *Ty = AI->getType();
    // Aggregate types can't be cast, but are legal argument types, so we have
    // to handle them differently. We use an extract/insert pair as a
    // lightweight method to achieve the same goal.
    if (isa<StructType>(Ty) || isa<ArrayType>(Ty) || isa<VectorType>(Ty)) {
      Instruction *EI = ExtractValueInst::Create(AI, 0, "",AfterAllocaInsertPt);
      Instruction *NI = InsertValueInst::Create(AI, EI, 0);
      NI->insertAfter(EI);
      AI->replaceAllUsesWith(NI);
      // Set the operand of the instructions back to the AllocaInst.
      EI->setOperand(0, AI);
      NI->setOperand(0, AI);
    } else {
      // This is always a no-op cast because we're casting AI to AI->getType()
      // so src and destination types are identical. BitCast is the only
      // possibility.
      CastInst *NC = new BitCastInst(
        AI, AI->getType(), AI->getName()+".tmp", AfterAllocaInsertPt);
      AI->replaceAllUsesWith(NC);
      // Set the operand of the cast instruction back to the AllocaInst.
      // Normally it's forbidden to replace a CastInst's operand because it
      // could cause the opcode to reflect an illegal conversion. However,
      // we're replacing it here with the same value it was constructed with.
      // We do this because the above replaceAllUsesWith() clobbered the
      // operand, but we want this one to remain.
      NC->setOperand(0, AI);
    }
  }

  // Finally, scan the code looking for instructions with bad live ranges.
  for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
    for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ++II) {
      // Ignore obvious cases we don't have to handle.  In particular, most
      // instructions either have no uses or only have a single use inside the
      // current block.  Ignore them quickly.
      Instruction *Inst = II;
      if (Inst->use_empty()) continue;
      if (Inst->hasOneUse() &&
          cast<Instruction>(Inst->use_back())->getParent() == BB &&
          !isa<PHINode>(Inst->use_back())) continue;

      // If this is an alloca in the entry block, it's not a real register
      // value.
      if (AllocaInst *AI = dyn_cast<AllocaInst>(Inst))
        if (isa<ConstantInt>(AI->getArraySize()) && BB == F->begin())
          continue;

      // Avoid iterator invalidation by copying users to a temporary vector.
      SmallVector<Instruction*,16> Users;
      for (Value::use_iterator UI = Inst->use_begin(), E = Inst->use_end();
           UI != E; ++UI) {
        Instruction *User = cast<Instruction>(*UI);
        if (User->getParent() != BB || isa<PHINode>(User))
          Users.push_back(User);
      }

      // Find all of the blocks that this value is live in.
      std::set<BasicBlock*> LiveBBs;
      LiveBBs.insert(Inst->getParent());
      while (!Users.empty()) {
        Instruction *U = Users.back();
        Users.pop_back();

        if (!isa<PHINode>(U)) {
          MarkBlocksLiveIn(U->getParent(), LiveBBs);
        } else {
          // Uses for a PHI node occur in their predecessor block.
          PHINode *PN = cast<PHINode>(U);
          for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
            if (PN->getIncomingValue(i) == Inst)
              MarkBlocksLiveIn(PN->getIncomingBlock(i), LiveBBs);
        }
      }

      // Now that we know all of the blocks that this thing is live in, see if
      // it includes any of the unwind locations.
      bool NeedsSpill = false;
      for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
        BasicBlock *UnwindBlock = Invokes[i]->getUnwindDest();
        if (UnwindBlock != BB && LiveBBs.count(UnwindBlock)) {
          NeedsSpill = true;
        }
      }

      // If we decided we need a spill, do it.
      // FIXME: Spilling this way is overkill, as it forces all uses of
      // the value to be reloaded from the stack slot, even those that aren't
      // in the unwind blocks. We should be more selective.
      if (NeedsSpill) {
        ++NumSpilled;
        DemoteRegToStack(*Inst, true);
      }
    }
}
Ejemplo n.º 24
0
/// Replaces the given call site (Call or Invoke) with a gc.statepoint
/// intrinsic with an empty deoptimization arguments list.  This does
/// NOT do explicit relocation for GC support.
static Value *ReplaceWithStatepoint(const CallSite &CS, /* to replace */
                                    Pass *P) {
  assert(CS.getInstruction()->getParent()->getParent()->getParent() &&
         "must be set");

  // TODO: technically, a pass is not allowed to get functions from within a
  // function pass since it might trigger a new function addition.  Refactor
  // this logic out to the initialization of the pass.  Doesn't appear to
  // matter in practice.

  // Then go ahead and use the builder do actually do the inserts.  We insert
  // immediately before the previous instruction under the assumption that all
  // arguments will be available here.  We can't insert afterwards since we may
  // be replacing a terminator.
  IRBuilder<> Builder(CS.getInstruction());

  // Note: The gc args are not filled in at this time, that's handled by
  // RewriteStatepointsForGC (which is currently under review).

  // Create the statepoint given all the arguments
  Instruction *Token = nullptr;
  AttributeSet OriginalAttrs;

  if (CS.isCall()) {
    CallInst *ToReplace = cast<CallInst>(CS.getInstruction());
    CallInst *Call = Builder.CreateGCStatepointCall(
        CS.getCalledValue(), makeArrayRef(CS.arg_begin(), CS.arg_end()), None,
        None, "safepoint_token");
    Call->setTailCall(ToReplace->isTailCall());
    Call->setCallingConv(ToReplace->getCallingConv());

    // Before we have to worry about GC semantics, all attributes are legal
    // TODO: handle param attributes
    OriginalAttrs = ToReplace->getAttributes();

    // In case if we can handle this set of attributes - set up function
    // attributes directly on statepoint and return attributes later for
    // gc_result intrinsic.
    Call->setAttributes(OriginalAttrs.getFnAttributes());

    Token = Call;

    // Put the following gc_result and gc_relocate calls immediately after the
    // the old call (which we're about to delete).
    assert(ToReplace->getNextNode() && "not a terminator, must have next");
    Builder.SetInsertPoint(ToReplace->getNextNode());
    Builder.SetCurrentDebugLocation(ToReplace->getNextNode()->getDebugLoc());
  } else if (CS.isInvoke()) {
    InvokeInst *ToReplace = cast<InvokeInst>(CS.getInstruction());

    // Insert the new invoke into the old block.  We'll remove the old one in a
    // moment at which point this will become the new terminator for the
    // original block.
    Builder.SetInsertPoint(ToReplace->getParent());
    InvokeInst *Invoke = Builder.CreateGCStatepointInvoke(
        CS.getCalledValue(), ToReplace->getNormalDest(),
        ToReplace->getUnwindDest(), makeArrayRef(CS.arg_begin(), CS.arg_end()),
        Builder.getInt32(0), None, "safepoint_token");

    // Currently we will fail on parameter attributes and on certain
    // function attributes.
    OriginalAttrs = ToReplace->getAttributes();

    // In case if we can handle this set of attributes - set up function
    // attributes directly on statepoint and return attributes later for
    // gc_result intrinsic.
    Invoke->setAttributes(OriginalAttrs.getFnAttributes());

    Token = Invoke;

    // We'll insert the gc.result into the normal block
    BasicBlock *NormalDest = normalizeBBForInvokeSafepoint(
        ToReplace->getNormalDest(), Invoke->getParent());
    Builder.SetInsertPoint(NormalDest->getFirstInsertionPt());
  } else {
    llvm_unreachable("unexpect type of CallSite");
  }
  assert(Token);

  // Handle the return value of the original call - update all uses to use a
  // gc_result hanging off the statepoint node we just inserted

  // Only add the gc_result iff there is actually a used result
  if (!CS.getType()->isVoidTy() && !CS.getInstruction()->use_empty()) {
    std::string TakenName =
        CS.getInstruction()->hasName() ? CS.getInstruction()->getName() : "";
    CallInst *GCResult = Builder.CreateGCResult(Token, CS.getType(), TakenName);
    GCResult->setAttributes(OriginalAttrs.getRetAttributes());
    return GCResult;
  } else {
    // No return value for the call.
    return nullptr;
  }
}
Ejemplo n.º 25
0
/// Replaces the given call site (Call or Invoke) with a gc.statepoint
/// intrinsic with an empty deoptimization arguments list.  This does
/// NOT do explicit relocation for GC support.
static Value *ReplaceWithStatepoint(const CallSite &CS /* to replace */) {
  assert(CS.getInstruction()->getModule() && "must be set");

  // TODO: technically, a pass is not allowed to get functions from within a
  // function pass since it might trigger a new function addition.  Refactor
  // this logic out to the initialization of the pass.  Doesn't appear to
  // matter in practice.

  // Then go ahead and use the builder do actually do the inserts.  We insert
  // immediately before the previous instruction under the assumption that all
  // arguments will be available here.  We can't insert afterwards since we may
  // be replacing a terminator.
  IRBuilder<> Builder(CS.getInstruction());

  // Note: The gc args are not filled in at this time, that's handled by
  // RewriteStatepointsForGC (which is currently under review).

  // Create the statepoint given all the arguments
  Instruction *Token = nullptr;

  uint64_t ID;
  uint32_t NumPatchBytes;

  AttributeSet OriginalAttrs = CS.getAttributes();
  Attribute AttrID =
      OriginalAttrs.getAttribute(AttributeSet::FunctionIndex, "statepoint-id");
  Attribute AttrNumPatchBytes = OriginalAttrs.getAttribute(
      AttributeSet::FunctionIndex, "statepoint-num-patch-bytes");

  AttrBuilder AttrsToRemove;
  bool HasID = AttrID.isStringAttribute() &&
               !AttrID.getValueAsString().getAsInteger(10, ID);

  if (HasID)
    AttrsToRemove.addAttribute("statepoint-id");
  else
    ID = 0xABCDEF00;

  bool HasNumPatchBytes =
      AttrNumPatchBytes.isStringAttribute() &&
      !AttrNumPatchBytes.getValueAsString().getAsInteger(10, NumPatchBytes);

  if (HasNumPatchBytes)
    AttrsToRemove.addAttribute("statepoint-num-patch-bytes");
  else
    NumPatchBytes = 0;

  OriginalAttrs = OriginalAttrs.removeAttributes(
      CS.getInstruction()->getContext(), AttributeSet::FunctionIndex,
      AttrsToRemove);

  if (CS.isCall()) {
    CallInst *ToReplace = cast<CallInst>(CS.getInstruction());
    CallInst *Call = Builder.CreateGCStatepointCall(
        ID, NumPatchBytes, CS.getCalledValue(),
        makeArrayRef(CS.arg_begin(), CS.arg_end()), None, None,
        "safepoint_token");
    Call->setTailCall(ToReplace->isTailCall());
    Call->setCallingConv(ToReplace->getCallingConv());

    // In case if we can handle this set of attributes - set up function
    // attributes directly on statepoint and return attributes later for
    // gc_result intrinsic.
    Call->setAttributes(OriginalAttrs.getFnAttributes());

    Token = Call;

    // Put the following gc_result and gc_relocate calls immediately after
    // the old call (which we're about to delete).
    assert(ToReplace->getNextNode() && "not a terminator, must have next");
    Builder.SetInsertPoint(ToReplace->getNextNode());
    Builder.SetCurrentDebugLocation(ToReplace->getNextNode()->getDebugLoc());
  } else if (CS.isInvoke()) {
    InvokeInst *ToReplace = cast<InvokeInst>(CS.getInstruction());

    // Insert the new invoke into the old block.  We'll remove the old one in a
    // moment at which point this will become the new terminator for the
    // original block.
    Builder.SetInsertPoint(ToReplace->getParent());
    InvokeInst *Invoke = Builder.CreateGCStatepointInvoke(
        ID, NumPatchBytes, CS.getCalledValue(), ToReplace->getNormalDest(),
        ToReplace->getUnwindDest(), makeArrayRef(CS.arg_begin(), CS.arg_end()),
        None, None, "safepoint_token");

    Invoke->setCallingConv(ToReplace->getCallingConv());

    // In case if we can handle this set of attributes - set up function
    // attributes directly on statepoint and return attributes later for
    // gc_result intrinsic.
    Invoke->setAttributes(OriginalAttrs.getFnAttributes());

    Token = Invoke;

    // We'll insert the gc.result into the normal block
    BasicBlock *NormalDest = ToReplace->getNormalDest();
    // Can not insert gc.result in case of phi nodes preset.
    // Should have removed this cases prior to running this function
    assert(!isa<PHINode>(NormalDest->begin()));
    Instruction *IP = &*(NormalDest->getFirstInsertionPt());
    Builder.SetInsertPoint(IP);
  } else {
    llvm_unreachable("unexpect type of CallSite");
  }
  assert(Token);

  // Handle the return value of the original call - update all uses to use a
  // gc_result hanging off the statepoint node we just inserted

  // Only add the gc_result iff there is actually a used result
  if (!CS.getType()->isVoidTy() && !CS.getInstruction()->use_empty()) {
    std::string TakenName =
        CS.getInstruction()->hasName() ? CS.getInstruction()->getName() : "";
    CallInst *GCResult = Builder.CreateGCResult(Token, CS.getType(), TakenName);
    GCResult->setAttributes(OriginalAttrs.getRetAttributes());
    return GCResult;
  } else {
    // No return value for the call.
    return nullptr;
  }
}
Ejemplo n.º 26
0
void MemoryInstrumenter::instrumentMalloc(const CallSite &CS) {
    DataLayout &TD = getAnalysis<DataLayout>();
    TargetLibraryInfo& TLI = getAnalysis<TargetLibraryInfo>();

    Function *Callee = CS.getCalledFunction();
    assert(DynAAUtils::IsMalloc(Callee));

    Instruction *Ins = CS.getInstruction();

    // Calculate where to insert.
    // <Loc> will be the next instruction executed.
    BasicBlock::iterator Loc;
    if (!Ins->isTerminator()) {
        Loc = Ins;
        ++Loc;
    } else {
        assert(isa<InvokeInst>(Ins));
        InvokeInst *II = cast<InvokeInst>(Ins);
        assert(II->getNormalDest()->getUniquePredecessor());
        Loc = II->getNormalDest()->getFirstInsertionPt();
    }

    IRBuilder<> Builder(Loc);
    Value *Start = NULL;
    Value *Size = NULL;
    Value *Success = NULL; // Indicate whether the allocation succeeded.

    StringRef CalleeName = Callee->getName();
    if (CalleeName == "malloc" || CalleeName == "valloc") {
        Start = Ins;
        Size = UndefValue::get(LongType);
        Success = Builder.CreateICmpNE(Ins, ConstantPointerNull::get(CharStarType));
    } else if (CalleeName.startswith("_Zn")) {
        Start = Ins;
        Size = CS.getArgument(0);
    } else if (CalleeName == "calloc") {
        // calloc() takes two size_t, i.e. i64.
        // Therefore, no need to worry Mul will have two operands with different
        // types. Also, Size will always be of type i64.
        Start = Ins;
        assert(CS.getArgument(0)->getType() == LongType);
        assert(CS.getArgument(1)->getType() == LongType);
        Size = BinaryOperator::Create(Instruction::Mul,
                                      CS.getArgument(0),
                                      CS.getArgument(1),
                                      "",
                                      Loc);
        Success = Builder.CreateICmpNE(Ins, ConstantPointerNull::get(CharStarType));
    } else if (CalleeName == "memalign" || CalleeName == "realloc") {
        Start = Ins;
        Size = CS.getArgument(1);
        Success = Builder.CreateICmpNE(Ins, ConstantPointerNull::get(CharStarType));
    } else if (CalleeName == "strdup" || CalleeName == "__strdup") {
        Start = Ins;
        // Use strlen to compute the length of the allocated memory.
        Value *StrLen = EmitStrLen(Ins, Builder, &TD, &TLI);
        // size = strlen(result) + 1
        Size = Builder.CreateAdd(StrLen, ConstantInt::get(LongType, 1));
        Success = Builder.CreateICmpNE(Ins, ConstantPointerNull::get(CharStarType));
    } else if (CalleeName == "getline") {
        // getline(char **lineptr, size_t *n, FILE *stream)
        // start = *lineptr
        // size = *n
        // succ = (<rv> != -1)
        Start = Builder.CreateLoad(CS.getArgument(0));
        Size = Builder.CreateLoad(CS.getArgument(1));
        Success = Builder.CreateICmpNE(Ins, ConstantInt::get(Ins->getType(), -1));
    } else {
        assert(false && "Unhandled malloc function call");
    }

    //      start = malloc(size)
    //      if (success)
    //        HookMemAlloc
    // Loc:
    instrumentMemoryAllocation(Start, Size, Success, Loc);
}
Ejemplo n.º 27
0
bool FuncAddrTaken::runOnModule(Module &M) {
  bool Changed = false;
  // add declaration of function __patch_at
  // declare void @__patch_at(void)
  FunctionType *FT = FunctionType::get(Type::getVoidTy(M.getContext()),
                                       false);
  Function* PatchAt = Function::Create(FT, Function::ExternalLinkage, "__patch_at", &M);
  if (PatchAt->getName() != "__patch_at") {
    PatchAt->eraseFromParent();
    return false;
  }
  Changed = true;

  // Simple optimization so that no function address will be taken twice
  // in the same basic block.
  std::map<BasicBlock*, std::set<std::string> > UniqPatchAt;

  // before each store instruction that manipulates a function, create a call
  // to __patch_at
  for (auto F = M.getFunctionList().begin(); F != M.getFunctionList().end(); F++) {
    for (auto BB = F->begin(); BB != F->end(); BB++) {
      for (auto MI = BB->begin(); MI != BB->end(); MI++) {
        if (isa<StoreInst>(MI)) {
          // check if the store inst moves a function to a variable
          Value *V = InnerMost(cast<StoreInst>(MI)->getValueOperand());
          addPatchAt(M, FT, V, MI, UniqPatchAt);
          if (isa<ConstantVector>(V)) {
            std::set<Value*> patched;
            for (unsigned i = 0; i < cast<ConstantVector>(V)->getNumOperands(); i++) {
              Value *VV = InnerMost(cast<ConstantVector>(V)->getOperand(i));
              if (patched.find(VV) == patched.end()) {
                addPatchAt(M, FT, VV, MI, UniqPatchAt);
                patched.insert(VV);
              }
            }
          } else if (isa<ConstantStruct>(V)) {
            std::set<Value*> patched;
            for (unsigned i = 0; i < cast<ConstantStruct>(V)->getNumOperands(); i++) {
              Value *VV = InnerMost(cast<ConstantStruct>(V)->getOperand(i));
              if (patched.find(VV) == patched.end()) {
                addPatchAt(M, FT, VV, MI, UniqPatchAt);
                patched.insert(VV);
              }
            }
          } else if (isa<ConstantArray>(V)) {
            std::set<Value*> patched;
            for (unsigned i = 0; i < cast<ConstantArray>(V)->getNumOperands(); i++) {
              Value *VV = InnerMost(cast<ConstantArray>(V)->getOperand(i));
              if (patched.find(VV) == patched.end()) {
                addPatchAt(M, FT, VV, MI, UniqPatchAt);
                patched.insert(VV);
              }
            }
          }
        } else if (isa<SelectInst>(MI)) {
          Value *V = InnerMost(cast<SelectInst>(MI)->getTrueValue());
          addPatchAt(M, FT, V, MI, UniqPatchAt);
          V = InnerMost(cast<SelectInst>(MI)->getFalseValue());
          addPatchAt(M, FT, V, MI, UniqPatchAt);
        } else if (isa<CallInst>(MI)) {
          CallInst* CI = cast<CallInst>(MI);
          for (unsigned i = 0; i < CI->getNumArgOperands(); i++) {
            Value *V = InnerMost(CI->getArgOperand(i));
            addPatchAt(M, FT, V, MI, UniqPatchAt);
          }
        } else if (isa<InvokeInst>(MI)) {
          InvokeInst* CI = cast<InvokeInst>(MI);
          for (unsigned i = 0; i < CI->getNumArgOperands(); i++) {
            Value *V = InnerMost(CI->getArgOperand(i));
            addPatchAt(M, FT, V, MI, UniqPatchAt);
          }
        } else if (isa<ReturnInst>(MI)) {
          Value *V = cast<ReturnInst>(MI)->getReturnValue();
          if (V) {
            V = InnerMost(V);
            addPatchAt(M, FT, V, MI, UniqPatchAt);
          }
        } else if (isa<PHINode>(MI)) {
          for (unsigned i = 0; i < cast<PHINode>(MI)->getNumIncomingValues(); i++) {
            Value *V = InnerMost(cast<PHINode>(MI)->getIncomingValue(i));
            BasicBlock* BB = cast<PHINode>(MI)->getIncomingBlock(i);
            // right before the last (maybe terminator) instruction.
            addPatchAt(M, FT, V, &(BB->back()), UniqPatchAt);
          }
        }
      }
    }
  }

  // TODO: separate the following virtual table traversal code into another pass
  for (auto G = M.getGlobalList().begin(); G != M.getGlobalList().end(); G++) {
    if (isa<GlobalVariable>(G) &&
        cast<GlobalVariable>(G)->hasInitializer()) {
      const GlobalVariable *GV = cast<GlobalVariable>(G);
      const Constant *C = GV->getInitializer();
      if (GV->hasName() && isa<ConstantArray>(C) && GV->getName().startswith("_ZTV")) {
        std::string VTName = CXXDemangledName(GV->getName().data());
        if (VTName.size() && VTName.find("vtable") == 0) {
          //llvm::errs() << VTName << "\n";
          VTName = VTName.substr(11); // get pass "vtable for "
          llvm::NamedMDNode* MD = M.getOrInsertNamedMetadata("MCFIVtable");
          std::string info = VTName;
          for (unsigned i = 0; i < cast<ConstantArray>(C)->getNumOperands(); i++) {
            Value *V = InnerMost(cast<ConstantArray>(C)->getOperand(i));
            if (isa<Function>(V) && cast<Function>(V)->hasName()) {
              //llvm::errs() << cast<Function>(V)->getName() << "\n";
              info += std::string("#") + cast<Function>(V)->getName().str();
            }
          }
          MD->addOperand(llvm::MDNode::get(M.getContext(),
                                           llvm::MDString::get(
                                             M.getContext(), info.c_str())));
        }
      }
    }
  }
  return Changed;
}
Ejemplo n.º 28
0
bool ICFGBuilder::runOnModule(Module &M) {
  MicroBasicBlockBuilder &MBBB = getAnalysis<MicroBasicBlockBuilder>();

  forallbb(M, bb) {
    for (mbb_iterator mi = MBBB.begin(bb), E = MBBB.end(bb); mi != E; ++mi)
      getOrInsertMBB(mi);
  }

  forallbb(M, bb) {
    for (mbb_iterator mi = MBBB.begin(bb), E = MBBB.end(bb); mi != E; ++mi) {
      // The ICFG will not contain any inter-thread edge. 
      // It's also difficult to handle them. How to deal with the return
      // edges? They are supposed to go to the pthread_join sites. 
      if (mi->end() != bb->end() && !is_pthread_create(mi->end())) {
        FPCallGraph &CG = getAnalysis<FPCallGraph>();
        FuncList callees = CG.getCalledFunctions(mi->end());
        bool calls_decl = false;
        for (size_t i = 0; i < callees.size(); ++i) {
          Function *callee = callees[i];
          if (callee->isDeclaration()) {
            calls_decl = true;
          } else {
            MicroBasicBlock *entry_mbb = MBBB.begin(callee->begin());
            addEdge(mi, entry_mbb);
          }
        }
        if (calls_decl) {
          mbb_iterator next_mbb = mi; ++next_mbb;
          addEdge(mi, next_mbb);
        }
      } else {
        for (succ_iterator si = succ_begin(bb); si != succ_end(bb); ++si) {
          MicroBasicBlock *succ_mbb = MBBB.begin(*si);
          addEdge(mi, succ_mbb);
        }
        TerminatorInst *ti = bb->getTerminator();
        if (is_ret(ti)) {
          FPCallGraph &CG = getAnalysis<FPCallGraph>();
          InstList call_sites = CG.getCallSites(bb->getParent());
          for (size_t i = 0; i < call_sites.size(); ++i) {
            Instruction *call_site = call_sites[i];
            // Ignore inter-thread edges. 
            if (is_pthread_create(call_site))
              continue;
            MicroBasicBlock *next_mbb;
            if (isa<CallInst>(call_site)) {
              BasicBlock::iterator next = call_site;
              ++next;
              next_mbb = MBBB.parent(next);
            } else {
              assert(isa<InvokeInst>(call_site));
              InvokeInst *inv = dyn_cast<InvokeInst>(call_site);
              if (isa<ReturnInst>(ti)) {
                next_mbb = MBBB.begin(inv->getNormalDest());
              } else {
                next_mbb = MBBB.begin(inv->getUnwindDest());
              }
            }
            addEdge(mi, next_mbb);
          }
        }
      }
    }
  }
  return false;
}