///   fence memory_order
/// becomes:
///   call void @llvm.nacl.atomic.fence(memory_order)
/// and
///   call void asm sideeffect "", "~{memory}"()
///   fence seq_cst
///   call void asm sideeffect "", "~{memory}"()
/// becomes:
///   call void asm sideeffect "", "~{memory}"()
///   call void @llvm.nacl.atomic.fence.all()
///   call void asm sideeffect "", "~{memory}"()
/// Note that the assembly gets eliminated by the -remove-asm-memory pass.
void AtomicVisitor::visitFenceInst(FenceInst &I) {
  return; // XXX EMSCRIPTEN
  Type *T = Type::getInt32Ty(C); // Fences aren't overloaded on type.
  BasicBlock::InstListType &IL(I.getParent()->getInstList());
  bool isFirst = IL.empty() || &*I.getParent()->getInstList().begin() == &I;
  bool isLast = IL.empty() || &*I.getParent()->getInstList().rbegin() == &I;
  CallInst *PrevC = isFirst ? 0 : dyn_cast<CallInst>(I.getPrevNode());
  CallInst *NextC = isLast ? 0 : dyn_cast<CallInst>(I.getNextNode());

  if ((PrevC && PrevC->isInlineAsm() &&
       cast<InlineAsm>(PrevC->getCalledValue())->isAsmMemory()) &&
      (NextC && NextC->isInlineAsm() &&
       cast<InlineAsm>(NextC->getCalledValue())->isAsmMemory()) &&
      I.getOrdering() == SequentiallyConsistent) {
    const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic =
        findAtomicIntrinsic(I, Intrinsic::nacl_atomic_fence_all, T);
    replaceInstructionWithIntrinsicCall(I, Intrinsic, T, T,
                                        ArrayRef<Value *>());
  } else {
    const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic =
        findAtomicIntrinsic(I, Intrinsic::nacl_atomic_fence, T);
    Value *Args[] = {freezeMemoryOrder(I, I.getOrdering())};
    replaceInstructionWithIntrinsicCall(I, Intrinsic, T, T, Args);
  }
}
Exemplo n.º 2
0
/// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into
/// an invoke, we have to turn all of the calls that can throw into
/// invokes.  This function analyze BB to see if there are any calls, and if so,
/// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
/// nodes in that block with the values specified in InvokeDestPHIValues.
///
/// Returns true to indicate that the next block should be skipped.
static bool HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
                                                   InvokeInliningInfo &Invoke) {
  LandingPadInst *LPI = Invoke.getLandingPadInst();

  for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
    Instruction *I = BBI++;

    if (LandingPadInst *L = dyn_cast<LandingPadInst>(I)) {
      unsigned NumClauses = LPI->getNumClauses();
      L->reserveClauses(NumClauses);
      for (unsigned i = 0; i != NumClauses; ++i)
        L->addClause(LPI->getClause(i));
    }

    // We only need to check for function calls: inlined invoke
    // instructions require no special handling.
    CallInst *CI = dyn_cast<CallInst>(I);

    // If this call cannot unwind, don't convert it to an invoke.
    // Inline asm calls cannot throw.
    if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
      continue;

    // Convert this function call into an invoke instruction.  First, split the
    // basic block.
    BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");

    // Delete the unconditional branch inserted by splitBasicBlock
    BB->getInstList().pop_back();

    // Create the new invoke instruction.
    ImmutableCallSite CS(CI);
    SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end());
    InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split,
                                        Invoke.getOuterResumeDest(),
                                        InvokeArgs, CI->getName(), BB);
    II->setCallingConv(CI->getCallingConv());
    II->setAttributes(CI->getAttributes());
    
    // Make sure that anything using the call now uses the invoke!  This also
    // updates the CallGraph if present, because it uses a WeakVH.
    CI->replaceAllUsesWith(II);

    // Delete the original call
    Split->getInstList().pop_front();

    // Update any PHI nodes in the exceptional block to indicate that there is
    // now a new entry in them.
    Invoke.addIncomingPHIValuesFor(BB);
    return false;
  }

  return false;
}
Exemplo n.º 3
0
Function* PrintCgTree::getFunction(Value *Call)
{
   if(Call==NULL) return NULL;
   CallInst* CI = dyn_cast<CallInst>(Call);
   if(CI==NULL) return NULL;
   return dyn_cast<Function>(castoff(CI->getCalledValue()));
}
Exemplo n.º 4
0
// visitCallInst - This converts all LLVM call instructions into invoke
// instructions. The except part of the invoke goes to the "LongJmpBlkPre"
// that grabs the exception and proceeds to determine if it's a longjmp
// exception or not.
void LowerSetJmp::visitCallInst(CallInst& CI)
{
  if (CI.getCalledFunction())
    if (!IsTransformableFunction(CI.getCalledFunction()->getName()) ||
        CI.getCalledFunction()->isIntrinsic()) return;

  BasicBlock* OldBB = CI.getParent();

  // If not reachable from a setjmp call, don't transform.
  if (!DFSBlocks.count(OldBB)) return;

  BasicBlock* NewBB = OldBB->splitBasicBlock(CI);
  assert(NewBB && "Couldn't split BB of \"call\" instruction!!");
  DFSBlocks.insert(NewBB);
  NewBB->setName("Call2Invoke");

  Function* Func = OldBB->getParent();

  // Construct the new "invoke" instruction.
  TerminatorInst* Term = OldBB->getTerminator();
  std::vector<Value*> Params(CI.op_begin() + 1, CI.op_end());
  InvokeInst* II =
    InvokeInst::Create(CI.getCalledValue(), NewBB, PrelimBBMap[Func],
                       Params.begin(), Params.end(), CI.getName(), Term);
  II->setCallingConv(CI.getCallingConv());
  II->setParamAttrs(CI.getParamAttrs());

  // Replace the old call inst with the invoke inst and remove the call.
  CI.replaceAllUsesWith(II);
  CI.getParent()->getInstList().erase(&CI);

  // The old terminator is useless now that we have the invoke inst.
  Term->getParent()->getInstList().erase(Term);
  ++CallsTransformed;
}
Exemplo n.º 5
0
// FIXME: This should just be implemented as a patch to
// X86TargetAsmInfo.cpp, then everyone will benefit.
bool RaiseAsmPass::runOnInstruction(Module &M, Instruction *I) {
  // We can just raise inline assembler using calls
  CallInst *ci = dyn_cast<CallInst>(I);
  if (!ci)
    return false;

  InlineAsm *ia = dyn_cast<InlineAsm>(ci->getCalledValue());
  if (!ia)
    return false;

  // Try to use existing infrastructure
  if (!TLI)
    return false;

  if (TLI->ExpandInlineAsm(ci))
    return true;

  if (triple.getArch() == llvm::Triple::x86_64 &&
      triple.getOS() == llvm::Triple::Linux) {

    if (ia->getAsmString() == "" && ia->hasSideEffects()) {
#if LLVM_VERSION_CODE >= LLVM_VERSION(3, 3)
      IRBuilder<> Builder(I);
      Builder.CreateFence(llvm::SequentiallyConsistent);
#endif
      I->eraseFromParent();
      return true;
    }
  }

  return false;
}
/**
 * removeUndefCalls -- remove calls with undef function
 *
 * These are irrelevant to the code, so may be removed completely.
 */
void FunctionStaticSlicer::removeUndefCalls(ModulePass *MP, Function &F) {
  for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E;) {
    CallInst *CI = dyn_cast<CallInst>(&*I);
    ++I;
    if (CI && isa<UndefValue>(CI->getCalledValue())) {
      CI->replaceAllUsesWith(UndefValue::get(CI->getType()));
      CI->eraseFromParent();
    }
  }
}
Exemplo n.º 7
0
Value *
ReAllocatorInfo::getAllocedPointer (Value * AllocSite) const {
  CallInst * CI = dyn_cast<CallInst>(AllocSite);

  Function * F  = dyn_cast<Function>(CI->getCalledValue()->stripPointerCasts());
  if (!F || F->getName() != allocCallName) 
    return NULL;

  CallSite CS(CI);
  return CS.getArgument(allocPtrOperand-1);
}
Exemplo n.º 8
0
Value *
SimpleAllocatorInfo::getFreedPointer(Value * FreeSite) const {
  CallInst * CI = dyn_cast<CallInst>(FreeSite);

  Function * F  = dyn_cast<Function>(CI->getCalledValue()->stripPointerCasts());
  if (!F || F->getName() != freeCallName) 
    return NULL;

  CallSite CS(CI);
  return CS.getArgument(freePtrOperand-1);
}
Exemplo n.º 9
0
bool LLVMReachingDefsAnalysis::handleUndefinedCall(LLVMNode *callNode,
                                                   DefMap *df)
{
    CallInst *CI = cast<CallInst>(callNode->getKey());
    Function *func
        = dyn_cast<Function>(CI->getCalledValue()->stripPointerCasts());

    if (func && func->isIntrinsic())
        return handleIntrinsicCall(callNode, CI, df);

    return handleUndefinedCall(callNode, CI, df);
}
void AsmDirectivesVisitor::visitCallInst(CallInst &CI) {
  if (!CI.isInlineAsm() ||
      !cast<InlineAsm>(CI.getCalledValue())->isAsmMemory())
    return;

  // In NaCl ``asm("":::"memory")`` always comes in pairs, straddling a
  // sequentially consistent fence. Other passes rewrite this fence to
  // an equivalent stable NaCl intrinsic, meaning that this assembly can
  // be removed.
  CI.eraseFromParent();
  ModifiedFunction = true;
}
Exemplo n.º 11
0
/// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into
/// an invoke, we have to turn all of the calls that can throw into
/// invokes.  This function analyze BB to see if there are any calls, and if so,
/// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
/// nodes in that block with the values specified in InvokeDestPHIValues.
///
static void HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
                                                   BasicBlock *InvokeDest,
                           const SmallVectorImpl<Value*> &InvokeDestPHIValues) {
  for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
    Instruction *I = BBI++;
    
    // We only need to check for function calls: inlined invoke
    // instructions require no special handling.
    CallInst *CI = dyn_cast<CallInst>(I);
    if (CI == 0) continue;
    
    // If this call cannot unwind, don't convert it to an invoke.
    if (CI->doesNotThrow())
      continue;
    
    // Convert this function call into an invoke instruction.
    // First, split the basic block.
    BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");
    
    // Next, create the new invoke instruction, inserting it at the end
    // of the old basic block.
    ImmutableCallSite CS(CI);
    SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end());
    InvokeInst *II =
      InvokeInst::Create(CI->getCalledValue(), Split, InvokeDest,
                         InvokeArgs.begin(), InvokeArgs.end(),
                         CI->getName(), BB->getTerminator());
    II->setCallingConv(CI->getCallingConv());
    II->setAttributes(CI->getAttributes());
    
    // Make sure that anything using the call now uses the invoke!  This also
    // updates the CallGraph if present, because it uses a WeakVH.
    CI->replaceAllUsesWith(II);
    
    // Delete the unconditional branch inserted by splitBasicBlock
    BB->getInstList().pop_back();
    Split->getInstList().pop_front();  // Delete the original call
    
    // Update any PHI nodes in the exceptional block to indicate that
    // there is now a new entry in them.
    unsigned i = 0;
    for (BasicBlock::iterator I = InvokeDest->begin();
         isa<PHINode>(I); ++I, ++i)
      cast<PHINode>(I)->addIncoming(InvokeDestPHIValues[i], BB);
    
    // This basic block is now complete, the caller will continue scanning the
    // next one.
    return;
  }
}
Exemplo n.º 12
0
void llvm::computeUsesVAFloatArgument(const CallInst &I,
                                      MachineModuleInfo &MMI) {
  FunctionType *FT =
      cast<FunctionType>(I.getCalledValue()->getType()->getContainedType(0));
  if (FT->isVarArg() && !MMI.usesVAFloatArgument()) {
    for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
      Type *T = I.getArgOperand(i)->getType();
      for (auto i : post_order(T)) {
        if (i->isFloatingPointTy()) {
          MMI.setUsesVAFloatArgument(true);
          return;
        }
      }
    }
  }
}
Exemplo n.º 13
0
Value *
StringAllocatorInfo::getOrCreateAllocSize (Value * AllocSite) const {
  //
  // See if this is a call to the allocator.  If not, return NULL.
  //
  CallInst * CI = dyn_cast<CallInst>(AllocSite);
  if (!CI)
    return NULL;

  Function * F  = dyn_cast<Function>(CI->getCalledValue()->stripPointerCasts());
  if (!F || F->getName() != allocCallName) 
    return NULL;

  //
  // See if this call has an argument.  If not, ignore it.  We do this because
  // autoconf configure scripts will create calls to string functions with zero
  // arguments just to see if the function exists.
  //
  CallSite CS(CI);
  if (CS.arg_size() == 0)
    return NULL;

  //
  // Insert a call to strlen() to determine the length of the string that was
  // allocated.  Use a version of strlen() in the SAFECode library that can
  // handle NULL pointers.
  //
  Module * M = CI->getParent()->getParent()->getParent();
  Function * Strlen = M->getFunction ("nullstrlen");
  assert (Strlen && "No nullstrlen function in the module");
  BasicBlock::iterator InsertPt = CI;
  ++InsertPt;
  Instruction * Length =
    CallInst::Create (Strlen, CS.getInstruction(), "", InsertPt);

  //
  // The size of the allocation is the string length plus one.
  //
  IntegerType * LengthType = dyn_cast<IntegerType>(Length->getType());
  assert (LengthType && "nullstrlen doesn't return an integer?");
  Value * One = ConstantInt::get (LengthType, 1);
  Instruction * Size = BinaryOperator::Create (Instruction::Add, Length, One);
  Size->insertAfter (Length);
  return Size;
}
Exemplo n.º 14
0
/// ComputeUsesVAFloatArgument - Determine if any floating-point values are
/// being passed to this variadic function, and set the MachineModuleInfo's
/// usesVAFloatArgument flag if so. This flag is used to emit an undefined
/// reference to _fltused on Windows, which will link in MSVCRT's
/// floating-point support.
void llvm::ComputeUsesVAFloatArgument(const CallInst &I,
                                      MachineModuleInfo *MMI)
{
    FunctionType *FT = cast<FunctionType>(
                           I.getCalledValue()->getType()->getContainedType(0));
    if (FT->isVarArg() && !MMI->usesVAFloatArgument()) {
        for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
            Type* T = I.getArgOperand(i)->getType();
            for (po_iterator<Type*> i = po_begin(T), e = po_end(T);
                    i != e; ++i) {
                if (i->isFloatingPointTy()) {
                    MMI->setUsesVAFloatArgument(true);
                    return;
                }
            }
        }
    }
}
Exemplo n.º 15
0
void LLVMDefUseAnalysis::handleCallInst(LLVMNode *node)
{
    CallInst *CI = cast<CallInst>(node->getKey());

    if (CI->isInlineAsm()) {
        handleInlineAsm(node);
        return;
    }

    Function *func
        = dyn_cast<Function>(CI->getCalledValue()->stripPointerCasts());
    if (func) {
        if (func->isIntrinsic() && !isa<DbgInfoIntrinsic>(CI)) {
            handleIntrinsicCall(node, CI);
            return;
        }

        // for realloc, we need to make it data dependent on the
        // memory it reallocates, since that is the memory it copies
        if (func->size() == 0) {
            using analysis::AllocationFunction;
            auto type = _options.getAllocationFunction(func->getName());

            if (type == AllocationFunction::REALLOC) {
                addDataDependence(node, CI, CI->getOperand(0), Offset::UNKNOWN /* FIXME */);
            } else if (type == AllocationFunction::NONE) {
                handleUndefinedCall(node, CI);
            }// else {
             // we do not want to do anything for the memory
             // allocation functions
             // }

            // the function is undefined, so do not even try to
            // add the edges from return statements
            return;
        }
    }

    // add edges from the return nodes of subprocedure
    // to the call (if the call returns something)
    for (LLVMDependenceGraph *subgraph : node->getSubgraphs())
        addReturnEdge(node, subgraph);
}
Exemplo n.º 16
0
Value *
ArrayAllocatorInfo::getOrCreateAllocSize(Value * AllocSite) const {
  //
  // See if this is a call to the allocator.  If not, return NULL.
  //
  CallInst * CI = dyn_cast<CallInst>(AllocSite);
  if (!CI)
    return NULL;

  Function * F  = dyn_cast<Function>(CI->getCalledValue()->stripPointerCasts());
  if (!F || F->getName() != allocCallName) 
    return NULL;

  //
  // Insert a multiplication instruction to compute the size of the array
  // allocation.
  //
  CallSite CS(CI);
  return BinaryOperator::Create (BinaryOperator::Mul,
                                 CS.getArgument(allocSizeOperand - 1),
                                 CS.getArgument(allocNumOperand - 1),
                                 "size",
                                 CI);
}
Exemplo n.º 17
0
//
// Function: makeCStdLibCallsComplete()
//
// Description:
//  Fills in completeness information for all calls of a given CStdLib function
//  assumed to be of the form:
//
//   pool_X(POOL *p1, ..., POOL *pN, void *a1, ..., void *aN, ..., uint8_t c);
//
//  Specifically, this function assumes that there are as many pointer arguments
//  to check as there are initial pool arguments, and the pointer arguments
//  follow the pool arguments in corresponding order. Also, it is assumed that
//  the final argument to the function is a byte sized bit vector.
//
//  This function fills in this final byte with a constant value whose ith
//  bit is set exactly when the ith pointer argument is complete.
//
// Inputs:
//
//  F            - A pointer to the CStdLib function appearing in the module
//                 (non-null).
//  PoolArgs     - The number of initial pool arguments for which a
//                 corresponding pointer value requires a completeness check
//                 (required to be at most 8).
//
void
CompleteChecks::makeCStdLibCallsComplete(Function *F, unsigned PoolArgs) {
  assert(F != 0 && "Null function argument!");

  assert(PoolArgs <= 8 && \
    "Only up to 8 arguments are supported by CStdLib completeness checks!");

  Value::use_iterator U = F->use_begin();
  Value::use_iterator E = F->use_end();

  //
  // Hold the call instructions that need changing.
  //
  typedef std::pair<CallInst *, uint8_t> VectorReplacement;
  std::set<VectorReplacement> callsToChange;

  Type *int8ty = Type::getInt8Ty(F->getContext());
  FunctionType *F_type = F->getFunctionType();

  //
  // Verify the type of the function is as expected.
  //
  // There should be as many pointer parameters to check for completeness
  // as there are pool parameters. The last parameter should be a byte.
  //
  assert(F_type->getNumParams() >= PoolArgs * 2 && \
    "Not enough arguments to transformed CStdLib function call!");
  for (unsigned arg = PoolArgs; arg < PoolArgs * 2; ++arg)
    assert(isa<PointerType>(F_type->getParamType(arg)) && \
      "Expected pointer argument to function!");

  //
  // This is the position of the vector operand in the call.
  //
  unsigned vect_position = F_type->getNumParams();

  assert(F_type->getParamType(vect_position - 1) == int8ty && \
    "Last parameter to the function should be a byte!");

  //
  // Iterate over all calls of the function in the module, computing the
  // vectors for each call as it is found.
  //
  for (; U != E; ++U) {
    CallInst *CI;
    if ((CI = dyn_cast<CallInst>(*U)) && \
      CI->getCalledValue()->stripPointerCasts() == F) {

      uint8_t vector = 0x0;

      //
      // Get the parent function to which this instruction belongs.
      //
      Function *P = CI->getParent()->getParent();

      //
      // Iterate over the pointer arguments that need completeness checking
      // and build the completeness vector.
      //
      for (unsigned arg = 0; arg < PoolArgs; ++arg) {
        bool complete = true;
        //
        // Go past all the pool arguments to get the pointer to check.
        //
        Value *V = CI->getOperand(1 + PoolArgs + arg);

        //
        // Check for completeness of the pointer using DSA and 
        // set the bit in the vector accordingly.
        //
        DSNode *N;
        if ((N = getDSNodeHandle(V, P).getNode()) &&
              (N->isExternalNode() || N->isIncompleteNode() ||
               N->isUnknownNode()  || N->isIntToPtrNode()   ||
               N->isPtrToIntNode()) ) {
            complete = false;
        }

        if (complete)
          vector |= (1 << arg);
      }

      //
      // Add the instruction and vector to the set of instructions to change.
      //
      callsToChange.insert(VectorReplacement(CI, vector));
    }
  }


  //
  // Iterate over all call instructions that need changing, modifying the
  // final operand of the call to hold the bit vector value.
  //
  std::set<VectorReplacement>::iterator change = callsToChange.begin();
  std::set<VectorReplacement>::iterator change_end = callsToChange.end();

  while (change != change_end) {
    Constant *vect_value = ConstantInt::get(int8ty, change->second);
    change->first->setOperand(vect_position, vect_value);
    ++change;
  }

  return;

}
bool LowerEmSetjmp::runOnModule(Module &M) {
  TheModule = &M;

  Function *Setjmp = TheModule->getFunction("setjmp");
  Function *Longjmp = TheModule->getFunction("longjmp");
  if (!Setjmp && !Longjmp) return false;

  Type *i32 = Type::getInt32Ty(M.getContext());
  Type *Void = Type::getVoidTy(M.getContext());

  // Add functions

  Function *EmSetjmp = NULL;

  if (Setjmp) {
    SmallVector<Type*, 2> EmSetjmpTypes;
    EmSetjmpTypes.push_back(Setjmp->getFunctionType()->getParamType(0));
    EmSetjmpTypes.push_back(i32); // extra param that says which setjmp in the function it is
    FunctionType *EmSetjmpFunc = FunctionType::get(i32, EmSetjmpTypes, false);
    EmSetjmp = Function::Create(EmSetjmpFunc, GlobalValue::ExternalLinkage, "emscripten_setjmp", TheModule);
  }

  Function *EmLongjmp = Longjmp ? Function::Create(Longjmp->getFunctionType(), GlobalValue::ExternalLinkage, "emscripten_longjmp", TheModule) : NULL;

  SmallVector<Type*, 1> IntArgTypes;
  IntArgTypes.push_back(i32);
  FunctionType *IntIntFunc = FunctionType::get(i32, IntArgTypes, false);

  Function *CheckLongjmp = Function::Create(IntIntFunc, GlobalValue::ExternalLinkage, "emscripten_check_longjmp", TheModule); // gets control flow

  Function *GetLongjmpResult = Function::Create(IntIntFunc, GlobalValue::ExternalLinkage, "emscripten_get_longjmp_result", TheModule); // gets int value longjmp'd

  FunctionType *VoidFunc = FunctionType::get(Void, false);
  Function *PrepSetjmp = Function::Create(VoidFunc, GlobalValue::ExternalLinkage, "emscripten_prep_setjmp", TheModule);

  Function *CleanupSetjmp = Function::Create(VoidFunc, GlobalValue::ExternalLinkage, "emscripten_cleanup_setjmp", TheModule);

  Function *PreInvoke = TheModule->getFunction("emscripten_preinvoke");
  if (!PreInvoke) PreInvoke = Function::Create(VoidFunc, GlobalValue::ExternalLinkage, "emscripten_preinvoke", TheModule);

  FunctionType *IntFunc = FunctionType::get(i32, false);
  Function *PostInvoke = TheModule->getFunction("emscripten_postinvoke");
  if (!PostInvoke) PostInvoke = Function::Create(IntFunc, GlobalValue::ExternalLinkage, "emscripten_postinvoke", TheModule);

  // Process all callers of setjmp and longjmp. Start with setjmp.

  typedef std::vector<PHINode*> Phis;
  typedef std::map<Function*, Phis> FunctionPhisMap;
  FunctionPhisMap SetjmpOutputPhis;
  std::vector<Instruction*> ToErase;

  if (Setjmp) {
    for (Instruction::user_iterator UI = Setjmp->user_begin(), UE = Setjmp->user_end(); UI != UE; ++UI) {
      User *U = *UI;
      if (CallInst *CI = dyn_cast<CallInst>(U)) {
        BasicBlock *SJBB = CI->getParent();
        // The tail is everything right after the call, and will be reached once when setjmp is
        // called, and later when longjmp returns to the setjmp
        BasicBlock *Tail = SplitBlock(SJBB, CI->getNextNode());
        // Add a phi to the tail, which will be the output of setjmp, which indicates if this is the
        // first call or a longjmp back. The phi directly uses the right value based on where we
        // arrive from
        PHINode *SetjmpOutput = PHINode::Create(i32, 2, "", Tail->getFirstNonPHI());
        SetjmpOutput->addIncoming(ConstantInt::get(i32, 0), SJBB); // setjmp initial call returns 0
        CI->replaceAllUsesWith(SetjmpOutput); // The proper output is now this, not the setjmp call itself
        // longjmp returns to the setjmp will add themselves to this phi
        Phis& P = SetjmpOutputPhis[SJBB->getParent()];
        P.push_back(SetjmpOutput);
        // fix call target
        SmallVector<Value *, 2> Args;
        Args.push_back(CI->getArgOperand(0));
        Args.push_back(ConstantInt::get(i32, P.size())); // our index in the function is our place in the array + 1
        CallInst::Create(EmSetjmp, Args, "", CI);
        ToErase.push_back(CI);
      } else {
        errs() << **UI << "\n";
        report_fatal_error("bad use of setjmp, should only call it");
      }
    }
  }

  // Update longjmp FIXME: we could avoid throwing in longjmp as an optimization when longjmping back into the current function perhaps?

  if (Longjmp) Longjmp->replaceAllUsesWith(EmLongjmp);

  // Update all setjmping functions

  for (FunctionPhisMap::iterator I = SetjmpOutputPhis.begin(); I != SetjmpOutputPhis.end(); I++) {
    Function *F = I->first;
    Phis& P = I->second;

    CallInst::Create(PrepSetjmp, "", F->begin()->begin());

    // Update each call that can longjmp so it can return to a setjmp where relevant

    for (Function::iterator BBI = F->begin(), E = F->end(); BBI != E; ) {
      BasicBlock *BB = BBI++;
      for (BasicBlock::iterator Iter = BB->begin(), E = BB->end(); Iter != E; ) {
        Instruction *I = Iter++;
        CallInst *CI;
        if ((CI = dyn_cast<CallInst>(I))) {
          Value *V = CI->getCalledValue();
          if (V == PrepSetjmp || V == EmSetjmp || V == CheckLongjmp || V == GetLongjmpResult || V == PreInvoke || V == PostInvoke) continue;
          if (Function *CF = dyn_cast<Function>(V)) if (CF->isIntrinsic()) continue;
          // TODO: proper analysis of what can actually longjmp. Currently we assume anything but setjmp can.
          // This may longjmp, so we need to check if it did. Split at that point, and
          // envelop the call in pre/post invoke, if we need to
          CallInst *After;
          Instruction *Check = NULL;
          if (Iter != E && (After = dyn_cast<CallInst>(Iter)) && After->getCalledValue() == PostInvoke) {
            // use the pre|postinvoke that exceptions lowering already made
            Check = Iter++;
          }
          BasicBlock *Tail = SplitBlock(BB, Iter); // Iter already points to the next instruction, as we need
          TerminatorInst *TI = BB->getTerminator();
          if (!Check) {
            // no existing pre|postinvoke, create our own
            CallInst::Create(PreInvoke, "", CI);
            Check = CallInst::Create(PostInvoke, "", TI); // CI is at the end of the block

            // If we are calling a function that is noreturn, we must remove that attribute. The code we
            // insert here does expect it to return, after we catch the exception.
            if (CI->doesNotReturn()) {
              if (Function *F = dyn_cast<Function>(CI->getCalledValue())) {
                F->removeFnAttr(Attribute::NoReturn);
              }
              CI->setAttributes(CI->getAttributes().removeAttribute(TheModule->getContext(), AttributeSet::FunctionIndex, Attribute::NoReturn));
              assert(!CI->doesNotReturn());
            }
          }

          // We need to replace the terminator in Tail - SplitBlock makes BB go straight to Tail, we need to check if a longjmp occurred, and
          // go to the right setjmp-tail if so
          SmallVector<Value *, 1> Args;
          Args.push_back(Check);
          Instruction *LongjmpCheck = CallInst::Create(CheckLongjmp, Args, "", BB);
          Instruction *LongjmpResult = CallInst::Create(GetLongjmpResult, Args, "", BB);
          SwitchInst *SI = SwitchInst::Create(LongjmpCheck, Tail, 2, BB);
          // -1 means no longjmp happened, continue normally (will hit the default switch case). 0 means a longjmp that is not ours to handle, needs a rethrow. Otherwise
          // the index mean is the same as the index in P+1 (to avoid 0).
          for (unsigned i = 0; i < P.size(); i++) {
            SI->addCase(cast<ConstantInt>(ConstantInt::get(i32, i+1)), P[i]->getParent());
            P[i]->addIncoming(LongjmpResult, BB);
          }
          ToErase.push_back(TI); // new terminator is now the switch

          // we are splitting the block here, and must continue to find other calls in the block - which is now split. so continue
          // to traverse in the Tail
          BB = Tail;
          Iter = BB->begin();
          E = BB->end();
        } else if (InvokeInst *CI = dyn_cast<InvokeInst>(I)) { // XXX check if target is setjmp
          (void)CI;
          report_fatal_error("TODO: invoke inside setjmping functions");
        }
      }
    }

    // add a cleanup before each return
    for (Function::iterator BBI = F->begin(), E = F->end(); BBI != E; ) {
      BasicBlock *BB = BBI++;
      TerminatorInst *TI = BB->getTerminator();
      if (isa<ReturnInst>(TI)) {
        CallInst::Create(CleanupSetjmp, "", TI);
      }
    }
  }

  for (unsigned i = 0; i < ToErase.size(); i++) {
    ToErase[i]->eraseFromParent();
  }

  // Finally, our modifications to the cfg can break dominance of SSA variables. For example,
  //   if (x()) { .. setjmp() .. }
  //   if (y()) { .. longjmp() .. }
  // We must split the longjmp block, and it can jump into the setjmp one. But that means that when
  // we split the setjmp block, it's first part no longer dominates its second part - there is
  // a theoretically possible control flow path where x() is false, then y() is true and we
  // reach the second part of the setjmp block, without ever reaching the first part. So,
  // we recalculate regs vs. mem
  for (FunctionPhisMap::iterator I = SetjmpOutputPhis.begin(); I != SetjmpOutputPhis.end(); I++) {
    Function *F = I->first;
    doRegToMem(*F);
    doMemToReg(*F);
  }

  return true;
}
Exemplo n.º 19
0
/// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls
/// in the body of the inlined function into invokes and turn unwind
/// instructions into branches to the invoke unwind dest.
///
/// II is the invoke instruction being inlined.  FirstNewBlock is the first
/// block of the inlined code (the last block is the end of the function),
/// and InlineCodeInfo is information about the code that got inlined.
static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock,
                                ClonedCodeInfo &InlinedCodeInfo) {
  BasicBlock *InvokeDest = II->getUnwindDest();
  std::vector<Value*> InvokeDestPHIValues;

  // If there are PHI nodes in the unwind destination block, we need to
  // keep track of which values came into them from this invoke, then remove
  // the entry for this block.
  BasicBlock *InvokeBlock = II->getParent();
  for (BasicBlock::iterator I = InvokeDest->begin(); isa<PHINode>(I); ++I) {
    PHINode *PN = cast<PHINode>(I);
    // Save the value to use for this edge.
    InvokeDestPHIValues.push_back(PN->getIncomingValueForBlock(InvokeBlock));
  }

  Function *Caller = FirstNewBlock->getParent();

  // The inlined code is currently at the end of the function, scan from the
  // start of the inlined code to its end, checking for stuff we need to
  // rewrite.
  if (InlinedCodeInfo.ContainsCalls || InlinedCodeInfo.ContainsUnwinds) {
    for (Function::iterator BB = FirstNewBlock, E = Caller->end();
         BB != E; ++BB) {
      if (InlinedCodeInfo.ContainsCalls) {
        for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ){
          Instruction *I = BBI++;

          // We only need to check for function calls: inlined invoke
          // instructions require no special handling.
          if (!isa<CallInst>(I)) continue;
          CallInst *CI = cast<CallInst>(I);

          // If this call cannot unwind, don't convert it to an invoke.
          if (CI->doesNotThrow())
            continue;

          // Convert this function call into an invoke instruction.
          // First, split the basic block.
          BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");

          // Next, create the new invoke instruction, inserting it at the end
          // of the old basic block.
          SmallVector<Value*, 8> InvokeArgs(CI->op_begin()+1, CI->op_end());
          InvokeInst *II =
            InvokeInst::Create(CI->getCalledValue(), Split, InvokeDest,
                               InvokeArgs.begin(), InvokeArgs.end(),
                               CI->getName(), BB->getTerminator());
          II->setCallingConv(CI->getCallingConv());
          II->setAttributes(CI->getAttributes());

          // Make sure that anything using the call now uses the invoke!
          CI->replaceAllUsesWith(II);

          // Delete the unconditional branch inserted by splitBasicBlock
          BB->getInstList().pop_back();
          Split->getInstList().pop_front();  // Delete the original call

          // Update any PHI nodes in the exceptional block to indicate that
          // there is now a new entry in them.
          unsigned i = 0;
          for (BasicBlock::iterator I = InvokeDest->begin();
               isa<PHINode>(I); ++I, ++i) {
            PHINode *PN = cast<PHINode>(I);
            PN->addIncoming(InvokeDestPHIValues[i], BB);
          }

          // This basic block is now complete, start scanning the next one.
          break;
        }
      }

      if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
        // An UnwindInst requires special handling when it gets inlined into an
        // invoke site.  Once this happens, we know that the unwind would cause
        // a control transfer to the invoke exception destination, so we can
        // transform it into a direct branch to the exception destination.
        BranchInst::Create(InvokeDest, UI);

        // Delete the unwind instruction!
        UI->eraseFromParent();

        // Update any PHI nodes in the exceptional block to indicate that
        // there is now a new entry in them.
        unsigned i = 0;
        for (BasicBlock::iterator I = InvokeDest->begin();
             isa<PHINode>(I); ++I, ++i) {
          PHINode *PN = cast<PHINode>(I);
          PN->addIncoming(InvokeDestPHIValues[i], BB);
        }
      }
    }
  }

  // Now that everything is happy, we have one final detail.  The PHI nodes in
  // the exception destination block still have entries due to the original
  // invoke instruction.  Eliminate these entries (which might even delete the
  // PHI node) now.
  InvokeDest->removePredecessor(II->getParent());
}
Exemplo n.º 20
0
void AAAnalyzer::handle_inst(Instruction *inst, FunctionWrapper * parent_func) {
    //outs()<<*inst<<"\n"; outs().flush();
    switch (inst->getOpcode()) {
            // common/bitwise binary operations
            // Terminator instructions
        case Instruction::Ret:
        {
            ReturnInst* retInst = ((ReturnInst*) inst);
            if (retInst->getNumOperands() > 0 && !retInst->getOperandUse(0)->getType()->isVoidTy()) {
                parent_func->addRet(retInst->getOperandUse(0));
            }
        }
            break;
        case Instruction::Resume:
        {
            Value* resume = ((ResumeInst*) inst)->getOperand(0);
            parent_func->addResume(resume);
        }
            break;
        case Instruction::Switch:
        case Instruction::Br:
        case Instruction::IndirectBr:
        case Instruction::Unreachable:
            break;

            // vector operations
        case Instruction::ExtractElement:
        {
        }
            break;
        case Instruction::InsertElement:
        {
        }
            break;
        case Instruction::ShuffleVector:
        {
        }
            break;

            // aggregate operations
        case Instruction::ExtractValue:
        {
            Value * agg = ((ExtractValueInst*) inst)->getAggregateOperand();
            DyckVertex* aggV = wrapValue(agg);

            Type* aggTy = agg->getType();

            ArrayRef<unsigned> indices = ((ExtractValueInst*) inst)->getIndices();
            DyckVertex* currentStruct = aggV;

            for (unsigned int i = 0; i < indices.size(); i++) {
                if (isa<CompositeType>(aggTy) && aggTy->isSized()) {
                    if (!aggTy->isStructTy()) {
                        aggTy = ((CompositeType*) aggTy)->getTypeAtIndex(indices[i]);
#ifndef ARRAY_SIMPLIFIED
                        current = addPtrOffset(current, (int) indices[i] * dl.getTypeAllocSize(aggTy), dgraph);
#endif
                        if (i == indices.size() - 1) {
                            this->makeAlias(currentStruct, wrapValue(inst));
                        }
                    } else {
                        aggTy = ((CompositeType*) aggTy)->getTypeAtIndex(indices[i]);

                        if (i != indices.size() - 1) {
                            currentStruct = this->addField(currentStruct, -2 - (int) indices[i], NULL);
                        } else {
                            currentStruct = this->addField(currentStruct, -2 - (int) indices[i], wrapValue(inst));
                        }
                    }
                } else {
                    break;
                }
            }
        }
            break;
        case Instruction::InsertValue:
        {
            DyckVertex* resultV = wrapValue(inst);
            Value * agg = ((InsertValueInst*) inst)->getAggregateOperand();
            if (!isa<UndefValue>(agg)) {
                makeAlias(resultV, wrapValue(agg));
            }

            Value * val = ((InsertValueInst*) inst)->getInsertedValueOperand();
            DyckVertex* insertedVal = wrapValue(val);

            Type *aggTy = inst->getType();

            ArrayRef<unsigned> indices = ((InsertValueInst*) inst)->getIndices();

            DyckVertex* currentStruct = resultV;

            for (unsigned int i = 0; i < indices.size(); i++) {
                if (isa<CompositeType>(aggTy) && aggTy->isSized()) {
                    if (!aggTy->isStructTy()) {
                        aggTy = ((CompositeType*) aggTy)->getTypeAtIndex(indices[i]);
#ifndef ARRAY_SIMPLIFIED
                        current = addPtrOffset(current, (int) indices[i] * dl.getTypeAllocSize(aggTy), dgraph);
#endif
                        if (i == indices.size() - 1) {
                            this->makeAlias(currentStruct, insertedVal);
                        }
                    } else {
                        aggTy = ((CompositeType*) aggTy)->getTypeAtIndex(indices[i]);

                        if (i != indices.size() - 1) {
                            currentStruct = this->addField(currentStruct, -2 - (int) indices[i], NULL);
                        } else {
                            currentStruct = this->addField(currentStruct, -2 - (int) indices[i], insertedVal);
                        }
                    }
                } else {
                    break;
                }
            }
        }
            break;

            // memory accessing and addressing operations
        case Instruction::Alloca:
        {
        }
            break;
        case Instruction::Fence:
        {
        }
            break;
        case Instruction::AtomicCmpXchg:
        {
            Value * retXchg = inst;
            Value * ptrXchg = inst->getOperand(0);
            Value * newXchg = inst->getOperand(2);
            addPtrTo(wrapValue(ptrXchg), wrapValue(retXchg));
            addPtrTo(wrapValue(ptrXchg), wrapValue(newXchg));
        }
            break;
        case Instruction::AtomicRMW:
        {
            Value * retRmw = inst;
            Value * ptrRmw = ((AtomicRMWInst*) inst)->getPointerOperand();
            addPtrTo(wrapValue(ptrRmw), wrapValue(retRmw));

            switch (((AtomicRMWInst*) inst)->getOperation()) {
                case AtomicRMWInst::Max:
                case AtomicRMWInst::Min:
                case AtomicRMWInst::UMax:
                case AtomicRMWInst::UMin:
                case AtomicRMWInst::Xchg:
                {
                    Value * newRmw = ((AtomicRMWInst*) inst)->getValOperand();
                    addPtrTo(wrapValue(ptrRmw), wrapValue(newRmw));
                }
                    break;
                default:
                    //others are binary ops like add/sub/...
                    ///@TODO
                    break;
            }
        }
            break;
        case Instruction::Load:
        {
            Value *lval = inst;
            Value *ladd = inst->getOperand(0);
            addPtrTo(wrapValue(ladd), wrapValue(lval));
        }
            break;
        case Instruction::Store:
        {
            Value * sval = inst->getOperand(0);
            Value * sadd = inst->getOperand(1);
            addPtrTo(wrapValue(sadd), wrapValue(sval));
        }
            break;
        case Instruction::GetElementPtr:
        {
            makeAlias(wrapValue(inst), handle_gep((GEPOperator*) inst));
        }
            break;

            // conversion operations
        case Instruction::Trunc:
        case Instruction::ZExt:
        case Instruction::SExt:
        case Instruction::FPTrunc:
        case Instruction::FPExt:
        case Instruction::FPToUI:
        case Instruction::FPToSI:
        case Instruction::UIToFP:
        case Instruction::SIToFP:
        case Instruction::BitCast:
        case Instruction::PtrToInt:
        case Instruction::IntToPtr:
        {
            Value * itpv = inst->getOperand(0);
            makeAlias(wrapValue(inst), wrapValue(itpv));
        }
            break;

            // other operations
        case Instruction::Invoke: // invoke is a terminal operation
        {
            InvokeInst * invoke = (InvokeInst*) inst;
            LandingPadInst* lpd = invoke->getLandingPadInst();
            parent_func->addLandingPad(invoke, lpd);

            Value * cv = invoke->getCalledValue();
            vector<Value*> args;
            for (unsigned i = 0; i < invoke->getNumArgOperands(); i++) {
                args.push_back(invoke->getArgOperand(i));
            }

            this->handle_invoke_call_inst(invoke, cv, &args, parent_func);
        }
            break;
        case Instruction::Call:
        {
            CallInst * callinst = (CallInst*) inst;

            if (callinst->isInlineAsm()) {
                break;
            }

            Value * cv = callinst->getCalledValue();
            vector<Value*> args;
            for (unsigned i = 0; i < callinst->getNumArgOperands(); i++) {
                args.push_back(callinst->getArgOperand(i));
            }

            this->handle_invoke_call_inst(callinst, cv, &args, parent_func);
        }
            break;
        case Instruction::PHI:
        {
            PHINode *phi = (PHINode *) inst;
            int nums = phi->getNumIncomingValues();
            for (int i = 0; i < nums; i++) {
                Value * p = phi->getIncomingValue(i);
                makeAlias(wrapValue(inst), wrapValue(p));
            }
        }
            break;
        case Instruction::Select:
        {
            Value *first = ((SelectInst*) inst)->getTrueValue();
            Value *second = ((SelectInst*) inst)->getFalseValue();
            makeAlias(wrapValue(inst), wrapValue(first));
            makeAlias(wrapValue(inst), wrapValue(second));
        }
            break;
        case Instruction::VAArg:
        {
            parent_func->addVAArg(inst);

            DyckVertex* vaarg = wrapValue(inst);
            Value * ptrVaarg = inst->getOperand(0);
            addPtrTo(wrapValue(ptrVaarg), vaarg);
        }
            break;
        case Instruction::LandingPad: // handled with invoke inst
        case Instruction::ICmp:
        case Instruction::FCmp:
        default:
            break;
    }
}
Exemplo n.º 21
0
/// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into
/// an invoke, we have to turn all of the calls that can throw into
/// invokes.  This function analyze BB to see if there are any calls, and if so,
/// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
/// nodes in that block with the values specified in InvokeDestPHIValues.
///
/// Returns true to indicate that the next block should be skipped.
static bool HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
                                                   InvokeInliningInfo &Invoke) {
  for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
    Instruction *I = BBI++;
    
    // We only need to check for function calls: inlined invoke
    // instructions require no special handling.
    CallInst *CI = dyn_cast<CallInst>(I);
    if (CI == 0) continue;

    // LIBUNWIND: merge selector instructions.
    if (EHSelectorInst *Inner = dyn_cast<EHSelectorInst>(CI)) {
      EHSelectorInst *Outer = Invoke.getOuterSelector();
      if (!Outer) continue;

      bool innerIsOnlyCleanup = isCleanupOnlySelector(Inner);
      bool outerIsOnlyCleanup = isCleanupOnlySelector(Outer);

      // If both selectors contain only cleanups, we don't need to do
      // anything.  TODO: this is really just a very specific instance
      // of a much more general optimization.
      if (innerIsOnlyCleanup && outerIsOnlyCleanup) continue;

      // Otherwise, we just append the outer selector to the inner selector.
      SmallVector<Value*, 16> NewSelector;
      for (unsigned i = 0, e = Inner->getNumArgOperands(); i != e; ++i)
        NewSelector.push_back(Inner->getArgOperand(i));
      for (unsigned i = 2, e = Outer->getNumArgOperands(); i != e; ++i)
        NewSelector.push_back(Outer->getArgOperand(i));

      CallInst *NewInner =
        IRBuilder<>(Inner).CreateCall(Inner->getCalledValue(), NewSelector);
      // No need to copy attributes, calling convention, etc.
      NewInner->takeName(Inner);
      Inner->replaceAllUsesWith(NewInner);
      Inner->eraseFromParent();
      continue;
    }
    
    // If this call cannot unwind, don't convert it to an invoke.
    if (CI->doesNotThrow())
      continue;
    
    // Convert this function call into an invoke instruction.
    // First, split the basic block.
    BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");

    // Delete the unconditional branch inserted by splitBasicBlock
    BB->getInstList().pop_back();

    // LIBUNWIND: If this is a call to @llvm.eh.resume, just branch
    // directly to the new landing pad.
    if (Invoke.forwardEHResume(CI, BB)) {
      // TODO: 'Split' is now unreachable; clean it up.

      // We want to leave the original call intact so that the call
      // graph and other structures won't get misled.  We also have to
      // avoid processing the next block, or we'll iterate here forever.
      return true;
    }

    // Otherwise, create the new invoke instruction.
    ImmutableCallSite CS(CI);
    SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end());
    InvokeInst *II =
      InvokeInst::Create(CI->getCalledValue(), Split,
                         Invoke.getOuterUnwindDest(),
                         InvokeArgs, CI->getName(), BB);
    II->setCallingConv(CI->getCallingConv());
    II->setAttributes(CI->getAttributes());
    
    // Make sure that anything using the call now uses the invoke!  This also
    // updates the CallGraph if present, because it uses a WeakVH.
    CI->replaceAllUsesWith(II);

    Split->getInstList().pop_front();  // Delete the original call

    // Update any PHI nodes in the exceptional block to indicate that
    // there is now a new entry in them.
    Invoke.addIncomingPHIValuesFor(BB);
    return false;
  }

  return false;
}
Exemplo n.º 22
0
bool LoaderPass::runOnModule(Module &M) {
  ProfileInfoLoader PIL("profile-loader", Filename);

  EdgeInformation.clear();
  std::vector<uint64_t> Counters64 = PIL.getRawEdgeCounts();
  if (Counters64.size() > 0) {
    ReadCount = 0;
    std::vector<uint64_t>& Counters = Counters64;
    for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
      if (F->isDeclaration()) continue;
      DEBUG(dbgs() << "Working on " << F->getName() << "\n");
      readEdge(getEdge(0,&F->getEntryBlock()), Counters);
      for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
        TerminatorInst *TI = BB->getTerminator();
        for (unsigned s = 0, e = TI->getNumSuccessors(); s != e; ++s) {
          readEdge(getEdge(BB,TI->getSuccessor(s)), Counters);
        }
      }
    }
    if (ReadCount != Counters.size()) {
      errs() << "WARNING: profile information is inconsistent with "
             << "the current program!\n";
    }
    NumEdgesRead = ReadCount;
  }

#if 0
  std::vector<unsigned> Counters = PIL.getRawOptimalEdgeCounts();
  if (Counters.size() > 0) {
    ReadCount = 0;
    for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
      if (F->isDeclaration()) continue;
      DEBUG(dbgs() << "Working on " << F->getName() << "\n");
      readEdge(getEdge(0,&F->getEntryBlock()), Counters);
      for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
        TerminatorInst *TI = BB->getTerminator();
        if (TI->getNumSuccessors() == 0) {
          readEdge(getEdge(BB,0), Counters);
        }
        for (unsigned s = 0, e = TI->getNumSuccessors(); s != e; ++s) {
          readEdge(getEdge(BB,TI->getSuccessor(s)), Counters);
        }
      }
      while (SpanningTree.size() > 0) {

        unsigned size = SpanningTree.size();

        BBisUnvisited.clear();
        for (std::set<Edge>::iterator ei = SpanningTree.begin(),
             ee = SpanningTree.end(); ei != ee; ++ei) {
          BBisUnvisited.insert(ei->first);
          BBisUnvisited.insert(ei->second);
        }
        while (BBisUnvisited.size() > 0) {
          recurseBasicBlock(*BBisUnvisited.begin());
        }

        if (SpanningTree.size() == size) {
          DEBUG(dbgs()<<"{");
          for (std::set<Edge>::iterator ei = SpanningTree.begin(),
               ee = SpanningTree.end(); ei != ee; ++ei) {
            DEBUG(dbgs()<< *ei <<",");
          }
          assert(0 && "No edge calculated!");
        }

      }
    }
    if (ReadCount != Counters.size()) {
      errs() << "WARNING: profile information is inconsistent with "
             << "the current program!\n";
    }
    NumEdgesRead = ReadCount;
  }
#endif

  BlockInformation.clear();
  Counters64 = PIL.getRawBlockCounts();
  if (Counters64.size() > 0) {
    std::vector<uint64_t>& Counters = Counters64;
    ReadCount = 0;
    for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
      if (F->isDeclaration()) continue;
      for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
        if (ReadCount < Counters.size())
          // Here the data realm changes from the unsigned of the file to the
          // double of the ProfileInfo. This conversion is save because we know
          // that everything thats representable in unsinged is also
          // representable in double.
          BlockInformation[F][BB] = (double)Counters[ReadCount++];
    }
    if (ReadCount != Counters.size()) {
      errs() << "WARNING: profile information is inconsistent with "
             << "the current program!\n";
    }
  }

  FunctionInformation.clear();
  std::vector<unsigned> Counters = PIL.getRawFunctionCounts();
  if (Counters.size() > 0) {
    ReadCount = 0;
    for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
      if (F->isDeclaration()) continue;
      if (ReadCount < Counters.size())
        // Here the data realm changes from the unsigned of the file to the
        // double of the ProfileInfo. This conversion is save because we know
        // that everything thats representable in unsinged is also
        // representable in double.
        FunctionInformation[F] = (double)Counters[ReadCount++];
    }
    if (ReadCount != Counters.size()) {
      errs() << "WARNING: profile information is inconsistent with "
             << "the current program!\n";
    }
  }

  ValueInformation.clear();
  Counters = PIL.getRawValueCounts();
  if(Counters.size() > 0) {
	  ReadCount = 0;
	  for(Module::iterator F = M.begin(),E = M.end(); F!= E; ++F) {
		  if (F->isDeclaration()) continue;
		  for(inst_iterator I = inst_begin(F),IE = inst_end(F); I!=IE; ++I){
			  CallInst* Call = dyn_cast<CallInst>(&*I);
           if(!Call) continue;
			  if(Call->getCalledValue()->getName() != "llvm_profiling_trap_value") continue;
			  unsigned index = getTrapedIndex(Call);
			  ValueCounts Ins;
			  Ins.Nums = Counters[index];
			  const std::vector<int>& content = PIL.getRawValueContent(index);
			  Ins.flags = (ProfilingFlags)content.front();
			  Ins.Contents.resize(content.size()-1);
			  copy(content.begin()+1,content.end(),Ins.Contents.begin());
			  //should NOT insert two values into one cell.
			  ValueInformation[Call] = Ins;
		  }
	  }
  }

  SLGInformation.clear();
  Counters = PIL.getRawSLGCounts();
  if(Counters.size() > 0) {
     unsigned MaxStore = std::accumulate(Counters.begin(), Counters.end(), 0, sig_max);
     std::vector<const Instruction*> Cache(MaxStore+1);
     ReadCount = 0; 
     unsigned load_idx = 0, store_idx = 1;
     for(Module::iterator F = M.begin(), E = M.end(); F!=E; ++F){
        for(inst_iterator I = inst_begin(F), IE = inst_end(F); I!=IE; ++I){
           if(!isa<StoreInst>(&*I) && !isa<LoadInst>(&*I)) continue;
           if(lle::access_global_variable(&*I)){
              unsigned index = 0;
              Instruction* SLI = &*I;
              if(isa<StoreInst>(&*I)){
                 index = store_idx++;
              }else if(LoadInst* LI = dyn_cast<LoadInst>(&*I)){
                 SLGInformation[LI] = std::make_pair(load_idx, (Instruction*)NULL);
                 index = Counters[load_idx++];
                 if(index == 0 || index == ~0U/*unsigned -1*/){ 
                    continue;
                 }
              }
              if(index >= Cache.size()) continue;
              if(Cache[index]){
                 const Instruction* SLJ = Cache[index];
                 if(isa<StoreInst>(SLJ) && isa<LoadInst>(SLI)) SLGInformation[SLI].second = SLJ;
                 else if(isa<StoreInst>(SLI) && isa<LoadInst>(SLJ)) SLGInformation[SLJ].second = SLI;
                 else
                    assert(0 && "It shouldn't happen");
              }else
                 Cache[index] = &*I;
           }
        }
     }
  }

  MPInformation.clear();
  Counters = PIL.getRawMPICounts();
  if(Counters.size() > 0) {
     ReadCount = 0;
     for(auto F = M.begin(), E = M.end(); F!=E; ++F){
        for(auto I = inst_begin(F), IE = inst_end(F); I!=IE; ++I){
           CallInst* CI = dyn_cast<CallInst>(&*I);
           if(CI == NULL) continue;
           if(lle::get_mpi_count_idx(CI)){
              MPInformation[CI] = std::make_pair(ReadCount, Counters[ReadCount]);
              ++ReadCount;
           }
        }
     }
  }

  MPIFullInformation.clear();
  Counters = PIL.getRawMPIFullCounts();
  if(Counters.size() > 0) {
     ReadCount = 0;
     for(auto F = M.begin(), E = M.end(); F!=E; ++F){
        for(auto I = inst_begin(F), IE = inst_end(F); I!=IE; ++I){
           CallInst* CI = dyn_cast<CallInst>(&*I);
           if(CI == NULL) continue;
           if(lle::get_mpi_count_idx(CI)){
              MPIFullInformation[CI] = std::make_pair(ReadCount, Counters[ReadCount]);
              ++ReadCount;
           }
        }
     }
  }

  return false;
}
Exemplo n.º 23
0
//
// Method: runOnModule()
//
// Description:
//  Entry point for this LLVM pass.
//  Search for all call sites to casted functions.
//  Check if they only differ in an argument type
//  Cast the argument, and call the original function
//
// Inputs:
//  M - A reference to the LLVM module to transform
//
// Outputs:
//  M - The transformed LLVM module.
//
// Return value:
//  true  - The module was modified.
//  false - The module was not modified.
//
bool ArgCast::runOnModule(Module& M) {

  std::vector<CallInst*> worklist;
  for (Module::iterator I = M.begin(); I != M.end(); ++I) {
    if (I->mayBeOverridden())
      continue;
    // Find all uses of this function
    for(Value::user_iterator ui = I->user_begin(), ue = I->user_end(); ui != ue; ) {
      // check if is ever casted to a different function type
      ConstantExpr *CE = dyn_cast<ConstantExpr>(*ui++);
      if(!CE)
        continue;
      if (CE->getOpcode() != Instruction::BitCast)
        continue;
      if(CE->getOperand(0) != I)
        continue;
      const PointerType *PTy = dyn_cast<PointerType>(CE->getType());
      if (!PTy)
        continue;
      const Type *ETy = PTy->getElementType();
      const FunctionType *FTy  = dyn_cast<FunctionType>(ETy); 
      if(!FTy)
        continue;
      // casting to a varargs funtion
      // or function with same number of arguments
      // possibly varying types of arguments
      
      if(FTy->getNumParams() != I->arg_size() && !FTy->isVarArg())
        continue;
      for(Value::user_iterator uii = CE->user_begin(),
          uee = CE->user_end(); uii != uee; ++uii) {
        // Find all uses of the casted value, and check if it is 
        // used in a Call Instruction
        if (CallInst* CI = dyn_cast<CallInst>(*uii)) {
          // Check that it is the called value, and not an argument
          if(CI->getCalledValue() != CE) 
            continue;
          // Check that the number of arguments passed, and expected
          // by the function are the same.
          if(!I->isVarArg()) {
            if(CI->getNumOperands() != I->arg_size() + 1)
              continue;
          } else {
            if(CI->getNumOperands() < I->arg_size() + 1)
              continue;
          }
          // If so, add to worklist
          worklist.push_back(CI);
        }
      }
    }
  }

  // Proces the worklist of potential call sites to transform
  while(!worklist.empty()) {
    CallInst *CI = worklist.back();
    worklist.pop_back();
    // Get the called Function
    Function *F = cast<Function>(CI->getCalledValue()->stripPointerCasts());
    const FunctionType *FTy = F->getFunctionType();

    SmallVector<Value*, 8> Args;
    unsigned i =0;
    for(i =0; i< FTy->getNumParams(); ++i) {
      Type *ArgType = CI->getOperand(i+1)->getType();
      Type *FormalType = FTy->getParamType(i);
      // If the types for this argument match, just add it to the
      // parameter list. No cast needs to be inserted.
      if(ArgType == FormalType) {
        Args.push_back(CI->getOperand(i+1));
      }
      else if(ArgType->isPointerTy() && FormalType->isPointerTy()) {
        CastInst *CastI = CastInst::CreatePointerCast(CI->getOperand(i+1), 
                                                      FormalType, "", CI);
        Args.push_back(CastI);
      } else if (ArgType->isIntegerTy() && FormalType->isIntegerTy()) {
        unsigned SrcBits = ArgType->getScalarSizeInBits();
        unsigned DstBits = FormalType->getScalarSizeInBits();
        if(SrcBits > DstBits) {
          CastInst *CastI = CastInst::CreateIntegerCast(CI->getOperand(i+1), 
                                                        FormalType, true, "", CI);
          Args.push_back(CastI);
        } else {
          if (F->getAttributes().hasAttribute(i+1, Attribute::SExt)) {
            CastInst *CastI = CastInst::CreateIntegerCast(CI->getOperand(i+1), 
                                                          FormalType, true, "", CI);
            Args.push_back(CastI);
          } else if (F->getAttributes().hasAttribute(i+1, Attribute::ZExt)) {
            CastInst *CastI = CastInst::CreateIntegerCast(CI->getOperand(i+1), 
                                                          FormalType, false, "", CI);
            Args.push_back(CastI);
          } else {
            // Use ZExt in default case.
            // Derived from InstCombine. Also, the only reason this should happen
            // is mismatched prototypes.
            // Seen in case of integer constants which get interpreted as i32, 
            // even if being used as i64.
            // TODO: is this correct?
            CastInst *CastI = CastInst::CreateIntegerCast(CI->getOperand(i+1), 
                                                          FormalType, false, "", CI);
            Args.push_back(CastI);
          } 
        } 
      } else {
        DEBUG(ArgType->dump());
        DEBUG(FormalType->dump());
        break;
      }
    }

    // If we found an argument we could not cast, try the next instruction
    if(i != FTy->getNumParams()) {
      continue;
    }

    if(FTy->isVarArg()) {
      for(; i< CI->getNumOperands() - 1 ;i++) {
        Args.push_back(CI->getOperand(i+1));
      }
    }

    // else replace the call instruction
    CallInst *CINew = CallInst::Create(F, Args, "", CI);
    CINew->setCallingConv(CI->getCallingConv());
    CINew->setAttributes(CI->getAttributes());
    if(!CI->use_empty()) {
      CastInst *RetCast;
      if(CI->getType() != CINew->getType()) {
        if(CI->getType()->isPointerTy() && CINew->getType()->isPointerTy())
          RetCast = CastInst::CreatePointerCast(CINew, CI->getType(), "", CI);
        else if(CI->getType()->isIntOrIntVectorTy() && CINew->getType()->isIntOrIntVectorTy())
          RetCast = CastInst::CreateIntegerCast(CINew, CI->getType(), false, "", CI);
        else if(CI->getType()->isIntOrIntVectorTy() && CINew->getType()->isPointerTy())
          RetCast = CastInst::CreatePointerCast(CINew, CI->getType(), "", CI);
        else if(CI->getType()->isPointerTy() && CINew->getType()->isIntOrIntVectorTy()) 
          RetCast = new IntToPtrInst(CINew, CI->getType(), "", CI);
        else {
          // TODO: I'm not sure what right behavior is here, but this case should be handled.
          llvm_unreachable("Unexpected type conversion in call!");
          abort();
        }
        CI->replaceAllUsesWith(RetCast);
      } else {
        CI->replaceAllUsesWith(CINew);
      }
    }

    // Debug printing
    DEBUG(errs() << "ARGCAST:");
    DEBUG(errs() << "ERASE:");
    DEBUG(CI->dump());
    DEBUG(errs() << "ARGCAST:");
    DEBUG(errs() << "ADDED:");
    DEBUG(CINew->dump());

    CI->eraseFromParent();
    numChanged++;
  }
  return true;
}
Exemplo n.º 24
0
LLVMValueRef LLVMGetCalledFunction(LLVMValueRef I)
{
  CallInst *CI = (CallInst*)unwrap(I);
  return wrap(CI->getCalledValue());
}
Exemplo n.º 25
0
void MutationGen::genSTDCall(Instruction * inst, StringRef fname, int index){

	CallInst *call = cast<CallInst>(inst);

	Function *fun = call->getCalledFunction();

	if(fun->getName().startswith("llvm")){
		return;
	}
	
	Type *t = call->getCalledValue()->getType();

	FunctionType* ft = cast<FunctionType>(cast<PointerType>(t)->getElementType());

	Type *tt = ft->getReturnType();
	//tt->dump();
	
	if(tt->isIntegerTy(32)){
		//1. if the func returns a int32 val, let it be 0, 1 or a random number
		//errs()<<"IT IS A 32 !!\n";
		std::stringstream ss;
		ss<<"STD:"<<std::string(fname)<<":"<<index<< ":"<<inst->getOpcode()
			<< ":"<<32<<":0\n";

		muts_num++;
		
		ss<<"STD:"<<std::string(fname)<<":"<<index<< ":"<<inst->getOpcode()
			<< ":"<<32<<":1\n";		

		muts_num++;
		
		//srand((int)time(0));
		//int random = rand();
		ss<<"STD:"<<std::string(fname)<<":"<<index<< ":"<<inst->getOpcode()
			<< ":"<<32<<":"<<-1<<"\n";

		muts_num++;
		
		ofresult<<ss.str();
		ofresult.flush();
	}else if(tt->isVoidTy()){
		//2. if the func returns void, subsitute @llvm.donothing for the func
		//errs()<<"IT IS A VOID !!\n";
		std::stringstream ss;
		ss<<"STD:"<<std::string(fname)<<":"<<index<< ":"<<inst->getOpcode()
			<< ":"<<0<<'\n';
		ofresult<<ss.str();
		ofresult.flush();
		muts_num++;
	}else if(tt->isIntegerTy(64)){
		//1. if the func returns a int64 val, let it be 0, 1 or a random number
		//errs()<<"IT IS A 64 !!\n";
		std::stringstream ss;
		ss<<"STD:"<<std::string(fname)<<":"<<index<< ":"<<inst->getOpcode()
			<< ":"<<64<<":0\n";

		muts_num++;

		ss<<"STD:"<<std::string(fname)<<":"<<index<< ":"<<inst->getOpcode()
			<< ":"<<64<<":1\n";

		muts_num++;
		
		//srand((int)time(0));
		//int random = rand();
		ss<<"STD:"<<std::string(fname)<<":"<<index<< ":"<<inst->getOpcode()
			<< ":"<<64<<":"<<-1<<"\n";

		muts_num++;
		
		ofresult<<ss.str();	
		ofresult.flush();
	}
	
}
Exemplo n.º 26
0
void TracingNoGiri::visitCallInst(CallInst &CI) {
  // Attempt to get the called function.
  Function *CalledFunc = CI.getCalledFunction();
  if (!CalledFunc)
    return;

  // Do not instrument calls to tracing run-time functions or debug functions.
  if (isTracerFunction(CalledFunc))
    return;

  if (!CalledFunc->getName().str().compare(0,9,"llvm.dbg."))
    return;

  // Instrument external calls which can have invariants on its return value
  if (CalledFunc->isDeclaration() && CalledFunc->isIntrinsic()) {
     // Instrument special external calls which loads/stores
     // e.g. strlen(), strcpy(), memcpy() etc.
     visitSpecialCall(CI);
     return;
  }

  // If the called value is inline assembly code, then don't instrument it.
  if (isa<InlineAsm>(CI.getCalledValue()->stripPointerCasts()))
    return;

  instrumentLock(&CI);
  // Get the ID of the store instruction.
  Value *CallID = ConstantInt::get(Int32Type, lsNumPass->getID(&CI));
  // Get the called function value and cast it to a void pointer.
  Value *FP = castTo(CI.getCalledValue(), VoidPtrType, "", &CI);
  // Create the call to the run-time to record the call instruction.
  std::vector<Value *> args = make_vector<Value *>(CallID, FP, 0);
  // Do not add calls to function call stack for external functions
  // as return records won't be used/needed for them, so call a special record function
  // FIXME!!!! Do we still need it after adding separate return records????
  Instruction *RC;
  if (CalledFunc->isDeclaration())
    RC = CallInst::Create(RecordExtCall, args, "", &CI);
  else
    RC = CallInst::Create(RecordCall, args, "", &CI);
  instrumentUnlock(RC);

  // Create the call to the run-time to record the return of call instruction.
  CallInst *CallInst = CallInst::Create(RecordReturn, args, "", &CI);
  CI.moveBefore(CallInst);
  instrumentLock(CallInst);
  instrumentUnlock(CallInst);

  ++NumCalls; // Update statistics

  // The best way to handle external call is to set a flag before calling ext fn and
  // use that to determine if an internal function is called from ext fn. It flag can be
  // reset afterwards and restored to its original value before returning to ext code.
  // FIXME!!!! LATER

#if 0
  if (CalledFunc->isDeclaration() &&
      CalledFunc->getName().str() == "pthread_create") {
    // If pthread_create is called then handle it specially as it calls
    // functions externally and add an extra call for the externally
    // called functions with the same id so that returns can match with it.
    // In addition to a function call to pthread_create.
    // Get the external function pointer operand and cast it to a void pointer
    Value *FP = castTo(CI.getOperand(2), VoidPtrType, "", &CI);
    // Create the call to the run-time to record the call instruction.
    std::vector<Value *> argsExt = make_vector<Value *>(CallID, FP, 0);
    CallInst = CallInst::Create(RecordCall, argsExt, "", &CI);
    CI.moveBefore(CallInst);

    // Update statistics
    ++Calls;

    // For, both external functions and internal/ext functions called from
    // external functions, return records are not useful as they won't be used.
    // Since, we won't create return records for them, simply update the call
    // stack to mark the end of function call.

    //args = make_vector<Value *>(CallID, FP, 0);
    //CallInst::Create(RecordExtCallRet, args.begin(), args.end(), "", &CI);

    // Create the call to the run-time to record the return of call instruction.
    CallInst::Create(RecordReturn, argsExt, "", &CI);
  }
#endif

  // Instrument special external calls which loads/stores
  // like strlen, strcpy, memcpy etc.
  visitSpecialCall(CI);
}