Esempio n. 1
0
bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
  if (!SI->isSimple()) return false;

  // Avoid merging nontemporal stores since the resulting
  // memcpy/memset would not be able to preserve the nontemporal hint.
  // In theory we could teach how to propagate the !nontemporal metadata to
  // memset calls. However, that change would force the backend to
  // conservatively expand !nontemporal memset calls back to sequences of
  // store instructions (effectively undoing the merging).
  if (SI->getMetadata(LLVMContext::MD_nontemporal))
    return false;

  const DataLayout &DL = SI->getModule()->getDataLayout();

  // Detect cases where we're performing call slot forwarding, but
  // happen to be using a load-store pair to implement it, rather than
  // a memcpy.
  if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) {
    if (LI->isSimple() && LI->hasOneUse() &&
        LI->getParent() == SI->getParent()) {
      MemDepResult ldep = MD->getDependency(LI);
      CallInst *C = nullptr;
      if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst()))
        C = dyn_cast<CallInst>(ldep.getInst());

      if (C) {
        // Check that nothing touches the dest of the "copy" between
        // the call and the store.
        AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
        MemoryLocation StoreLoc = MemoryLocation::get(SI);
        for (BasicBlock::iterator I = --SI->getIterator(), E = C->getIterator();
             I != E; --I) {
          if (AA.getModRefInfo(&*I, StoreLoc) != MRI_NoModRef) {
            C = nullptr;
            break;
          }
        }
      }

      if (C) {
        unsigned storeAlign = SI->getAlignment();
        if (!storeAlign)
          storeAlign = DL.getABITypeAlignment(SI->getOperand(0)->getType());
        unsigned loadAlign = LI->getAlignment();
        if (!loadAlign)
          loadAlign = DL.getABITypeAlignment(LI->getType());

        bool changed = performCallSlotOptzn(
            LI, SI->getPointerOperand()->stripPointerCasts(),
            LI->getPointerOperand()->stripPointerCasts(),
            DL.getTypeStoreSize(SI->getOperand(0)->getType()),
            std::min(storeAlign, loadAlign), C);
        if (changed) {
          MD->removeInstruction(SI);
          SI->eraseFromParent();
          MD->removeInstruction(LI);
          LI->eraseFromParent();
          ++NumMemCpyInstr;
          return true;
        }
      }
    }
  }

  // There are two cases that are interesting for this code to handle: memcpy
  // and memset.  Right now we only handle memset.

  // Ensure that the value being stored is something that can be memset'able a
  // byte at a time like "0" or "-1" or any width, as well as things like
  // 0xA0A0A0A0 and 0.0.
  if (Value *ByteVal = isBytewiseValue(SI->getOperand(0)))
    if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(),
                                              ByteVal)) {
      BBI = I->getIterator(); // Don't invalidate iterator.
      return true;
    }

  return false;
}
bool LowerEmSetjmp::runOnModule(Module &M) {
  TheModule = &M;

  Function *Setjmp = TheModule->getFunction("setjmp");
  Function *Longjmp = TheModule->getFunction("longjmp");
  if (!Setjmp && !Longjmp) return false;

  Type *i32 = Type::getInt32Ty(M.getContext());
  Type *Void = Type::getVoidTy(M.getContext());

  // Add functions

  Function *EmSetjmp = NULL;

  if (Setjmp) {
    SmallVector<Type*, 2> EmSetjmpTypes;
    EmSetjmpTypes.push_back(Setjmp->getFunctionType()->getParamType(0));
    EmSetjmpTypes.push_back(i32); // extra param that says which setjmp in the function it is
    FunctionType *EmSetjmpFunc = FunctionType::get(i32, EmSetjmpTypes, false);
    EmSetjmp = Function::Create(EmSetjmpFunc, GlobalValue::ExternalLinkage, "emscripten_setjmp", TheModule);
  }

  Function *EmLongjmp = Longjmp ? Function::Create(Longjmp->getFunctionType(), GlobalValue::ExternalLinkage, "emscripten_longjmp", TheModule) : NULL;

  SmallVector<Type*, 1> IntArgTypes;
  IntArgTypes.push_back(i32);
  FunctionType *IntIntFunc = FunctionType::get(i32, IntArgTypes, false);

  Function *CheckLongjmp = Function::Create(IntIntFunc, GlobalValue::ExternalLinkage, "emscripten_check_longjmp", TheModule); // gets control flow

  Function *GetLongjmpResult = Function::Create(IntIntFunc, GlobalValue::ExternalLinkage, "emscripten_get_longjmp_result", TheModule); // gets int value longjmp'd

  FunctionType *VoidFunc = FunctionType::get(Void, false);
  Function *PrepSetjmp = Function::Create(VoidFunc, GlobalValue::ExternalLinkage, "emscripten_prep_setjmp", TheModule);

  Function *CleanupSetjmp = Function::Create(VoidFunc, GlobalValue::ExternalLinkage, "emscripten_cleanup_setjmp", TheModule);

  Function *PreInvoke = TheModule->getFunction("emscripten_preinvoke");
  if (!PreInvoke) PreInvoke = Function::Create(VoidFunc, GlobalValue::ExternalLinkage, "emscripten_preinvoke", TheModule);

  FunctionType *IntFunc = FunctionType::get(i32, false);
  Function *PostInvoke = TheModule->getFunction("emscripten_postinvoke");
  if (!PostInvoke) PostInvoke = Function::Create(IntFunc, GlobalValue::ExternalLinkage, "emscripten_postinvoke", TheModule);

  // Process all callers of setjmp and longjmp. Start with setjmp.

  typedef std::vector<PHINode*> Phis;
  typedef std::map<Function*, Phis> FunctionPhisMap;
  FunctionPhisMap SetjmpOutputPhis;
  std::vector<Instruction*> ToErase;

  if (Setjmp) {
    for (Instruction::user_iterator UI = Setjmp->user_begin(), UE = Setjmp->user_end(); UI != UE; ++UI) {
      User *U = *UI;
      if (CallInst *CI = dyn_cast<CallInst>(U)) {
        BasicBlock *SJBB = CI->getParent();
        // The tail is everything right after the call, and will be reached once when setjmp is
        // called, and later when longjmp returns to the setjmp
        BasicBlock *Tail = SplitBlock(SJBB, CI->getNextNode());
        // Add a phi to the tail, which will be the output of setjmp, which indicates if this is the
        // first call or a longjmp back. The phi directly uses the right value based on where we
        // arrive from
        PHINode *SetjmpOutput = PHINode::Create(i32, 2, "", Tail->getFirstNonPHI());
        SetjmpOutput->addIncoming(ConstantInt::get(i32, 0), SJBB); // setjmp initial call returns 0
        CI->replaceAllUsesWith(SetjmpOutput); // The proper output is now this, not the setjmp call itself
        // longjmp returns to the setjmp will add themselves to this phi
        Phis& P = SetjmpOutputPhis[SJBB->getParent()];
        P.push_back(SetjmpOutput);
        // fix call target
        SmallVector<Value *, 2> Args;
        Args.push_back(CI->getArgOperand(0));
        Args.push_back(ConstantInt::get(i32, P.size())); // our index in the function is our place in the array + 1
        CallInst::Create(EmSetjmp, Args, "", CI);
        ToErase.push_back(CI);
      } else {
        errs() << **UI << "\n";
        report_fatal_error("bad use of setjmp, should only call it");
      }
    }
  }

  // Update longjmp FIXME: we could avoid throwing in longjmp as an optimization when longjmping back into the current function perhaps?

  if (Longjmp) Longjmp->replaceAllUsesWith(EmLongjmp);

  // Update all setjmping functions

  for (FunctionPhisMap::iterator I = SetjmpOutputPhis.begin(); I != SetjmpOutputPhis.end(); I++) {
    Function *F = I->first;
    Phis& P = I->second;

    CallInst::Create(PrepSetjmp, "", F->begin()->begin());

    // Update each call that can longjmp so it can return to a setjmp where relevant

    for (Function::iterator BBI = F->begin(), E = F->end(); BBI != E; ) {
      BasicBlock *BB = BBI++;
      for (BasicBlock::iterator Iter = BB->begin(), E = BB->end(); Iter != E; ) {
        Instruction *I = Iter++;
        CallInst *CI;
        if ((CI = dyn_cast<CallInst>(I))) {
          Value *V = CI->getCalledValue();
          if (V == PrepSetjmp || V == EmSetjmp || V == CheckLongjmp || V == GetLongjmpResult || V == PreInvoke || V == PostInvoke) continue;
          if (Function *CF = dyn_cast<Function>(V)) if (CF->isIntrinsic()) continue;
          // TODO: proper analysis of what can actually longjmp. Currently we assume anything but setjmp can.
          // This may longjmp, so we need to check if it did. Split at that point, and
          // envelop the call in pre/post invoke, if we need to
          CallInst *After;
          Instruction *Check = NULL;
          if (Iter != E && (After = dyn_cast<CallInst>(Iter)) && After->getCalledValue() == PostInvoke) {
            // use the pre|postinvoke that exceptions lowering already made
            Check = Iter++;
          }
          BasicBlock *Tail = SplitBlock(BB, Iter); // Iter already points to the next instruction, as we need
          TerminatorInst *TI = BB->getTerminator();
          if (!Check) {
            // no existing pre|postinvoke, create our own
            CallInst::Create(PreInvoke, "", CI);
            Check = CallInst::Create(PostInvoke, "", TI); // CI is at the end of the block

            // If we are calling a function that is noreturn, we must remove that attribute. The code we
            // insert here does expect it to return, after we catch the exception.
            if (CI->doesNotReturn()) {
              if (Function *F = dyn_cast<Function>(CI->getCalledValue())) {
                F->removeFnAttr(Attribute::NoReturn);
              }
              CI->setAttributes(CI->getAttributes().removeAttribute(TheModule->getContext(), AttributeSet::FunctionIndex, Attribute::NoReturn));
              assert(!CI->doesNotReturn());
            }
          }

          // We need to replace the terminator in Tail - SplitBlock makes BB go straight to Tail, we need to check if a longjmp occurred, and
          // go to the right setjmp-tail if so
          SmallVector<Value *, 1> Args;
          Args.push_back(Check);
          Instruction *LongjmpCheck = CallInst::Create(CheckLongjmp, Args, "", BB);
          Instruction *LongjmpResult = CallInst::Create(GetLongjmpResult, Args, "", BB);
          SwitchInst *SI = SwitchInst::Create(LongjmpCheck, Tail, 2, BB);
          // -1 means no longjmp happened, continue normally (will hit the default switch case). 0 means a longjmp that is not ours to handle, needs a rethrow. Otherwise
          // the index mean is the same as the index in P+1 (to avoid 0).
          for (unsigned i = 0; i < P.size(); i++) {
            SI->addCase(cast<ConstantInt>(ConstantInt::get(i32, i+1)), P[i]->getParent());
            P[i]->addIncoming(LongjmpResult, BB);
          }
          ToErase.push_back(TI); // new terminator is now the switch

          // we are splitting the block here, and must continue to find other calls in the block - which is now split. so continue
          // to traverse in the Tail
          BB = Tail;
          Iter = BB->begin();
          E = BB->end();
        } else if (InvokeInst *CI = dyn_cast<InvokeInst>(I)) { // XXX check if target is setjmp
          (void)CI;
          report_fatal_error("TODO: invoke inside setjmping functions");
        }
      }
    }

    // add a cleanup before each return
    for (Function::iterator BBI = F->begin(), E = F->end(); BBI != E; ) {
      BasicBlock *BB = BBI++;
      TerminatorInst *TI = BB->getTerminator();
      if (isa<ReturnInst>(TI)) {
        CallInst::Create(CleanupSetjmp, "", TI);
      }
    }
  }

  for (unsigned i = 0; i < ToErase.size(); i++) {
    ToErase[i]->eraseFromParent();
  }

  // Finally, our modifications to the cfg can break dominance of SSA variables. For example,
  //   if (x()) { .. setjmp() .. }
  //   if (y()) { .. longjmp() .. }
  // We must split the longjmp block, and it can jump into the setjmp one. But that means that when
  // we split the setjmp block, it's first part no longer dominates its second part - there is
  // a theoretically possible control flow path where x() is false, then y() is true and we
  // reach the second part of the setjmp block, without ever reaching the first part. So,
  // we recalculate regs vs. mem
  for (FunctionPhisMap::iterator I = SetjmpOutputPhis.begin(); I != SetjmpOutputPhis.end(); I++) {
    Function *F = I->first;
    doRegToMem(*F);
    doMemToReg(*F);
  }

  return true;
}
Esempio n. 3
0
void AAAnalyzer::handle_inst(Instruction *inst, FunctionWrapper * parent_func) {
    //outs()<<*inst<<"\n"; outs().flush();
    switch (inst->getOpcode()) {
            // common/bitwise binary operations
            // Terminator instructions
        case Instruction::Ret:
        {
            ReturnInst* retInst = ((ReturnInst*) inst);
            if (retInst->getNumOperands() > 0 && !retInst->getOperandUse(0)->getType()->isVoidTy()) {
                parent_func->addRet(retInst->getOperandUse(0));
            }
        }
            break;
        case Instruction::Resume:
        {
            Value* resume = ((ResumeInst*) inst)->getOperand(0);
            parent_func->addResume(resume);
        }
            break;
        case Instruction::Switch:
        case Instruction::Br:
        case Instruction::IndirectBr:
        case Instruction::Unreachable:
            break;

            // vector operations
        case Instruction::ExtractElement:
        {
        }
            break;
        case Instruction::InsertElement:
        {
        }
            break;
        case Instruction::ShuffleVector:
        {
        }
            break;

            // aggregate operations
        case Instruction::ExtractValue:
        {
            Value * agg = ((ExtractValueInst*) inst)->getAggregateOperand();
            DyckVertex* aggV = wrapValue(agg);

            Type* aggTy = agg->getType();

            ArrayRef<unsigned> indices = ((ExtractValueInst*) inst)->getIndices();
            DyckVertex* currentStruct = aggV;

            for (unsigned int i = 0; i < indices.size(); i++) {
                if (isa<CompositeType>(aggTy) && aggTy->isSized()) {
                    if (!aggTy->isStructTy()) {
                        aggTy = ((CompositeType*) aggTy)->getTypeAtIndex(indices[i]);
#ifndef ARRAY_SIMPLIFIED
                        current = addPtrOffset(current, (int) indices[i] * dl.getTypeAllocSize(aggTy), dgraph);
#endif
                        if (i == indices.size() - 1) {
                            this->makeAlias(currentStruct, wrapValue(inst));
                        }
                    } else {
                        aggTy = ((CompositeType*) aggTy)->getTypeAtIndex(indices[i]);

                        if (i != indices.size() - 1) {
                            currentStruct = this->addField(currentStruct, -2 - (int) indices[i], NULL);
                        } else {
                            currentStruct = this->addField(currentStruct, -2 - (int) indices[i], wrapValue(inst));
                        }
                    }
                } else {
                    break;
                }
            }
        }
            break;
        case Instruction::InsertValue:
        {
            DyckVertex* resultV = wrapValue(inst);
            Value * agg = ((InsertValueInst*) inst)->getAggregateOperand();
            if (!isa<UndefValue>(agg)) {
                makeAlias(resultV, wrapValue(agg));
            }

            Value * val = ((InsertValueInst*) inst)->getInsertedValueOperand();
            DyckVertex* insertedVal = wrapValue(val);

            Type *aggTy = inst->getType();

            ArrayRef<unsigned> indices = ((InsertValueInst*) inst)->getIndices();

            DyckVertex* currentStruct = resultV;

            for (unsigned int i = 0; i < indices.size(); i++) {
                if (isa<CompositeType>(aggTy) && aggTy->isSized()) {
                    if (!aggTy->isStructTy()) {
                        aggTy = ((CompositeType*) aggTy)->getTypeAtIndex(indices[i]);
#ifndef ARRAY_SIMPLIFIED
                        current = addPtrOffset(current, (int) indices[i] * dl.getTypeAllocSize(aggTy), dgraph);
#endif
                        if (i == indices.size() - 1) {
                            this->makeAlias(currentStruct, insertedVal);
                        }
                    } else {
                        aggTy = ((CompositeType*) aggTy)->getTypeAtIndex(indices[i]);

                        if (i != indices.size() - 1) {
                            currentStruct = this->addField(currentStruct, -2 - (int) indices[i], NULL);
                        } else {
                            currentStruct = this->addField(currentStruct, -2 - (int) indices[i], insertedVal);
                        }
                    }
                } else {
                    break;
                }
            }
        }
            break;

            // memory accessing and addressing operations
        case Instruction::Alloca:
        {
        }
            break;
        case Instruction::Fence:
        {
        }
            break;
        case Instruction::AtomicCmpXchg:
        {
            Value * retXchg = inst;
            Value * ptrXchg = inst->getOperand(0);
            Value * newXchg = inst->getOperand(2);
            addPtrTo(wrapValue(ptrXchg), wrapValue(retXchg));
            addPtrTo(wrapValue(ptrXchg), wrapValue(newXchg));
        }
            break;
        case Instruction::AtomicRMW:
        {
            Value * retRmw = inst;
            Value * ptrRmw = ((AtomicRMWInst*) inst)->getPointerOperand();
            addPtrTo(wrapValue(ptrRmw), wrapValue(retRmw));

            switch (((AtomicRMWInst*) inst)->getOperation()) {
                case AtomicRMWInst::Max:
                case AtomicRMWInst::Min:
                case AtomicRMWInst::UMax:
                case AtomicRMWInst::UMin:
                case AtomicRMWInst::Xchg:
                {
                    Value * newRmw = ((AtomicRMWInst*) inst)->getValOperand();
                    addPtrTo(wrapValue(ptrRmw), wrapValue(newRmw));
                }
                    break;
                default:
                    //others are binary ops like add/sub/...
                    ///@TODO
                    break;
            }
        }
            break;
        case Instruction::Load:
        {
            Value *lval = inst;
            Value *ladd = inst->getOperand(0);
            addPtrTo(wrapValue(ladd), wrapValue(lval));
        }
            break;
        case Instruction::Store:
        {
            Value * sval = inst->getOperand(0);
            Value * sadd = inst->getOperand(1);
            addPtrTo(wrapValue(sadd), wrapValue(sval));
        }
            break;
        case Instruction::GetElementPtr:
        {
            makeAlias(wrapValue(inst), handle_gep((GEPOperator*) inst));
        }
            break;

            // conversion operations
        case Instruction::Trunc:
        case Instruction::ZExt:
        case Instruction::SExt:
        case Instruction::FPTrunc:
        case Instruction::FPExt:
        case Instruction::FPToUI:
        case Instruction::FPToSI:
        case Instruction::UIToFP:
        case Instruction::SIToFP:
        case Instruction::BitCast:
        case Instruction::PtrToInt:
        case Instruction::IntToPtr:
        {
            Value * itpv = inst->getOperand(0);
            makeAlias(wrapValue(inst), wrapValue(itpv));
        }
            break;

            // other operations
        case Instruction::Invoke: // invoke is a terminal operation
        {
            InvokeInst * invoke = (InvokeInst*) inst;
            LandingPadInst* lpd = invoke->getLandingPadInst();
            parent_func->addLandingPad(invoke, lpd);

            Value * cv = invoke->getCalledValue();
            vector<Value*> args;
            for (unsigned i = 0; i < invoke->getNumArgOperands(); i++) {
                args.push_back(invoke->getArgOperand(i));
            }

            this->handle_invoke_call_inst(invoke, cv, &args, parent_func);
        }
            break;
        case Instruction::Call:
        {
            CallInst * callinst = (CallInst*) inst;

            if (callinst->isInlineAsm()) {
                break;
            }

            Value * cv = callinst->getCalledValue();
            vector<Value*> args;
            for (unsigned i = 0; i < callinst->getNumArgOperands(); i++) {
                args.push_back(callinst->getArgOperand(i));
            }

            this->handle_invoke_call_inst(callinst, cv, &args, parent_func);
        }
            break;
        case Instruction::PHI:
        {
            PHINode *phi = (PHINode *) inst;
            int nums = phi->getNumIncomingValues();
            for (int i = 0; i < nums; i++) {
                Value * p = phi->getIncomingValue(i);
                makeAlias(wrapValue(inst), wrapValue(p));
            }
        }
            break;
        case Instruction::Select:
        {
            Value *first = ((SelectInst*) inst)->getTrueValue();
            Value *second = ((SelectInst*) inst)->getFalseValue();
            makeAlias(wrapValue(inst), wrapValue(first));
            makeAlias(wrapValue(inst), wrapValue(second));
        }
            break;
        case Instruction::VAArg:
        {
            parent_func->addVAArg(inst);

            DyckVertex* vaarg = wrapValue(inst);
            Value * ptrVaarg = inst->getOperand(0);
            addPtrTo(wrapValue(ptrVaarg), vaarg);
        }
            break;
        case Instruction::LandingPad: // handled with invoke inst
        case Instruction::ICmp:
        case Instruction::FCmp:
        default:
            break;
    }
}
Esempio n. 4
0
/// HandleURoRInvokes - Handle invokes of "_Unwind_Resume_or_Rethrow" calls. The
/// "unwind" part of these invokes jump to a landing pad within the current
/// function. This is a candidate to merge the selector associated with the URoR
/// invoke with the one from the URoR's landing pad.
bool DwarfEHPrepare::HandleURoRInvokes() {
  if (!EHCatchAllValue) {
    EHCatchAllValue =
      F->getParent()->getNamedGlobal("llvm.eh.catch.all.value");
    if (!EHCatchAllValue) return false;
  }

  if (!SelectorIntrinsic) {
    SelectorIntrinsic =
      Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_selector);
    if (!SelectorIntrinsic) return false;
  }

  SmallPtrSet<IntrinsicInst*, 32> Sels;
  SmallPtrSet<IntrinsicInst*, 32> CatchAllSels;
  FindAllCleanupSelectors(Sels, CatchAllSels);

  if (!DT)
    // We require DominatorTree information.
    return CleanupSelectors(CatchAllSels);

  if (!URoR) {
    URoR = F->getParent()->getFunction("_Unwind_Resume_or_Rethrow");
    if (!URoR) return CleanupSelectors(CatchAllSels);
  }

  SmallPtrSet<InvokeInst*, 32> URoRInvokes;
  FindAllURoRInvokes(URoRInvokes);

  SmallPtrSet<IntrinsicInst*, 32> SelsToConvert;

  for (SmallPtrSet<IntrinsicInst*, 32>::iterator
         SI = Sels.begin(), SE = Sels.end(); SI != SE; ++SI) {
    const BasicBlock *SelBB = (*SI)->getParent();
    for (SmallPtrSet<InvokeInst*, 32>::iterator
           UI = URoRInvokes.begin(), UE = URoRInvokes.end(); UI != UE; ++UI) {
      const BasicBlock *URoRBB = (*UI)->getParent();
      if (DT->dominates(SelBB, URoRBB)) {
        SelsToConvert.insert(*SI);
        break;
      }
    }
  }

  bool Changed = false;

  if (Sels.size() != SelsToConvert.size()) {
    // If we haven't been able to convert all of the clean-up selectors, then
    // loop through the slow way to see if they still need to be converted.
    if (!ExceptionValueIntrinsic) {
      ExceptionValueIntrinsic =
        Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_exception);
      if (!ExceptionValueIntrinsic)
        return CleanupSelectors(CatchAllSels);
    }

    for (Value::use_iterator
           I = ExceptionValueIntrinsic->use_begin(),
           E = ExceptionValueIntrinsic->use_end(); I != E; ++I) {
      IntrinsicInst *EHPtr = dyn_cast<IntrinsicInst>(*I);
      if (!EHPtr || EHPtr->getParent()->getParent() != F) continue;

      Changed |= PromoteEHPtrStore(EHPtr);

      bool URoRInvoke = false;
      SmallPtrSet<IntrinsicInst*, 8> SelCalls;
      Changed |= FindSelectorAndURoR(EHPtr, URoRInvoke, SelCalls);

      if (URoRInvoke) {
        // This EH pointer is being used by an invoke of an URoR instruction and
        // an eh.selector intrinsic call. If the eh.selector is a 'clean-up', we
        // need to convert it to a 'catch-all'.
        for (SmallPtrSet<IntrinsicInst*, 8>::iterator
               SI = SelCalls.begin(), SE = SelCalls.end(); SI != SE; ++SI)
          if (!HasCatchAllInSelector(*SI))
              SelsToConvert.insert(*SI);
      }
    }
  }

  if (!SelsToConvert.empty()) {
    // Convert all clean-up eh.selectors, which are associated with "invokes" of
    // URoR calls, into catch-all eh.selectors.
    Changed = true;

    for (SmallPtrSet<IntrinsicInst*, 8>::iterator
           SI = SelsToConvert.begin(), SE = SelsToConvert.end();
         SI != SE; ++SI) {
      IntrinsicInst *II = *SI;

      // Use the exception object pointer and the personality function
      // from the original selector.
      CallSite CS(II);
      IntrinsicInst::op_iterator I = CS.arg_begin();
      IntrinsicInst::op_iterator E = CS.arg_end();
      IntrinsicInst::op_iterator B = prior(E);

      // Exclude last argument if it is an integer.
      if (isa<ConstantInt>(B)) E = B;

      // Add exception object pointer (front).
      // Add personality function (next).
      // Add in any filter IDs (rest).
      SmallVector<Value*, 8> Args(I, E);

      Args.push_back(EHCatchAllValue->getInitializer()); // Catch-all indicator.

      CallInst *NewSelector =
        CallInst::Create(SelectorIntrinsic, Args.begin(), Args.end(),
                         "eh.sel.catch.all", II);

      NewSelector->setTailCall(II->isTailCall());
      NewSelector->setAttributes(II->getAttributes());
      NewSelector->setCallingConv(II->getCallingConv());

      II->replaceAllUsesWith(NewSelector);
      II->eraseFromParent();
    }
  }

  Changed |= CleanupSelectors(CatchAllSels);
  return Changed;
}
Esempio n. 5
0
//
// Method: runOnModule()
//
// Description:
//  Entry point for this LLVM pass.
//  If a function returns a struct, make it return
//  a pointer to the struct.
//
// Inputs:
//  M - A reference to the LLVM module to transform
//
// Outputs:
//  M - The transformed LLVM module.
//
// Return value:
//  true  - The module was modified.
//  false - The module was not modified.
//
bool StructRet::runOnModule(Module& M) {
  const llvm::DataLayout targetData(&M);

  std::vector<Function*> worklist;
  for (Module::iterator I = M.begin(); I != M.end(); ++I)
    if (!I->mayBeOverridden()) {
      if(I->hasAddressTaken())
        continue;
      if(I->getReturnType()->isStructTy()) {
        worklist.push_back(I);
      }
    }

  while(!worklist.empty()) {
    Function *F = worklist.back();
    worklist.pop_back();
    Type *NewArgType = F->getReturnType()->getPointerTo();

    // Construct the new Type
    std::vector<Type*>TP;
    TP.push_back(NewArgType);
    for (Function::arg_iterator ii = F->arg_begin(), ee = F->arg_end();
         ii != ee; ++ii) {
      TP.push_back(ii->getType());
    }

    FunctionType *NFTy = FunctionType::get(F->getReturnType(), TP, F->isVarArg());

    // Create the new function body and insert it into the module.
    Function *NF = Function::Create(NFTy, 
                                    F->getLinkage(),
                                    F->getName(), &M);
    ValueToValueMapTy ValueMap;
    Function::arg_iterator NI = NF->arg_begin();
    NI->setName("ret");
    ++NI;
    for (Function::arg_iterator II = F->arg_begin(); II != F->arg_end(); ++II, ++NI) {
      ValueMap[II] = NI;
      NI->setName(II->getName());
      AttributeSet attrs = F->getAttributes().getParamAttributes(II->getArgNo() + 1);
      if (!attrs.isEmpty())
        NI->addAttr(attrs);
    }
    // Perform the cloning.
    SmallVector<ReturnInst*,100> Returns;
    if (!F->isDeclaration())
      CloneFunctionInto(NF, F, ValueMap, false, Returns);
    std::vector<Value*> fargs;
    for(Function::arg_iterator ai = NF->arg_begin(), 
        ae= NF->arg_end(); ai != ae; ++ai) {
      fargs.push_back(ai);
    }
    NF->setAttributes(NF->getAttributes().addAttributes(
        M.getContext(), 0, F->getAttributes().getRetAttributes()));
    NF->setAttributes(NF->getAttributes().addAttributes(
        M.getContext(), ~0, F->getAttributes().getFnAttributes()));
    
    for (Function::iterator B = NF->begin(), FE = NF->end(); B != FE; ++B) {      
      for (BasicBlock::iterator I = B->begin(), BE = B->end(); I != BE;) {
        ReturnInst * RI = dyn_cast<ReturnInst>(I++);
        if(!RI)
          continue;
        LoadInst *LI = dyn_cast<LoadInst>(RI->getOperand(0));
        assert(LI && "Return should be preceded by a load instruction");
        IRBuilder<> Builder(RI);
        Builder.CreateMemCpy(fargs.at(0),
            LI->getPointerOperand(),
            targetData.getTypeStoreSize(LI->getType()),
            targetData.getPrefTypeAlignment(LI->getType()));
      }
    }

    for(Value::use_iterator ui = F->use_begin(), ue = F->use_end();
        ui != ue; ) {
      CallInst *CI = dyn_cast<CallInst>(*ui++);
      if(!CI)
        continue;
      if(CI->getCalledFunction() != F)
        continue;
      if(CI->hasByValArgument())
        continue;
      AllocaInst *AllocaNew = new AllocaInst(F->getReturnType(), 0, "", CI);
      SmallVector<Value*, 8> Args;

      //this should probably be done in a different manner
      AttributeSet NewCallPAL=AttributeSet();
      
      // Get the initial attributes of the call
      AttributeSet CallPAL = CI->getAttributes();
      AttributeSet RAttrs = CallPAL.getRetAttributes();
      AttributeSet FnAttrs = CallPAL.getFnAttributes();
      
      if (!RAttrs.isEmpty())
        NewCallPAL=NewCallPAL.addAttributes(F->getContext(),0, RAttrs);

      Args.push_back(AllocaNew);
      for(unsigned j = 0; j < CI->getNumOperands()-1; j++) {
        Args.push_back(CI->getOperand(j));
        // position in the NewCallPAL
        AttributeSet Attrs = CallPAL.getParamAttributes(j);
        if (!Attrs.isEmpty())
          NewCallPAL=NewCallPAL.addAttributes(F->getContext(),Args.size(), Attrs);
      }
      // Create the new attributes vec.
      if (!FnAttrs.isEmpty())
        NewCallPAL=NewCallPAL.addAttributes(F->getContext(),~0, FnAttrs);

      CallInst *CallI = CallInst::Create(NF, Args, "", CI);
      CallI->setCallingConv(CI->getCallingConv());
      CallI->setAttributes(NewCallPAL);
      LoadInst *LI = new LoadInst(AllocaNew, "", CI);
      CI->replaceAllUsesWith(LI);
      CI->eraseFromParent();
    }
    if(F->use_empty())
      F->eraseFromParent();
  }
  return true;
}
Esempio n. 6
0
bool PathList::runOnModule(Module &M) {
	module = &M;
	
	llvm::dbgs() << "[runOnModule]: Moduel M has " << M.getFunctionList().size() << " Functions in all.\n";
	
	// for test
	Function *f1 = M.getFunction("fprintf");
	if (!f1)
		dbgs() << "[Test]: can not find function fprintf.\n";
	else
		dbgs() << "[Test]: find function fprintf.\n";
	  
	CallGraph &CG = getAnalysis<CallGraph>();
//	CG.dump();
	
	CallGraphNode *cgNode = CG.getRoot();
	cgNode->dump();
//	errs()<<node->getFunction()->getName()<<'\n';
	
	Function *startFunc;
	Function *endFunc;
	startFunc = M.getFunction("__user_main");
	
	//std::string fileName("/home/xqx/data/xqx/projects/benckmarks-klee/texinfo-4.8/build-shit/makeinfo/../../makeinfo/insertion.c");
	//int lineNo = 407;
	
	BB = getBB(fileName, lineNo);
	*targetBbpp = getBB(fileName, lineNo);
	if (BB) {
		endFunc = BB->getParent();
		if (!endFunc) {
			errs()<<"Error: get endFunc failed.\n";
			return false;
		}
		if (!startFunc) {
		  	errs()<<"Error: get startFunc failed.\n";
			return false;
		}
		errs()<<startFunc->getName()<<'\n';
	}
	else {
		errs()<<"Error: get BB failed.\n";
		return false;
	}
	
	
	
	//read start and end from xml files
//	defectList enStart, enEnd;
//	getEntryList("/tmp/entrys.xml", &enStart, "start");
//	getEntryList("/tmp/entrys.xml", &enEnd, "end");
//	getEntryList("/tmp/entrys.xml", &dl, "end");
//	dumpEntryList(&enStart);
//	dumpEntryList(&enEnd);
//	dumpEntryList(&dl);
	
	//read bug information from xml file
/*	for (defectList::iterator dit = dl.begin(); dit != dl.end(); dit++) {
		StringRef file(dit->first.c_str());
		std::vector<int> lines = dit->second;
		BasicBlock *BB = getBB(file, *(lines.begin()));
		if (BB) {
			endFunc = BB->getParent();
		}
	}
*/	
	//to store temporary path
	std::vector<BasicBlock*> p;
	// a counter
	int map_count = 0;
	
	for (Module::iterator i = M.begin(), e = M.end(); i != e; ++i) {
		Function *F = i;
		if (!F) {
			llvm::errs() << "***NULL Function***\n";
			continue;
		}
		cgNode = CG.getOrInsertFunction(F);
		F = cgNode->getFunction();
//		
		for (CallGraphNode::iterator I = cgNode->begin(), E = cgNode->end();
				I != E; ++I){
			CallGraphNode::CallRecord *cr = &*I;
//			llvm::errs() << "\tCS<" << cr->first << "> calls";
			// check if the CallInst is existed
			if(cr->first){
				Instruction *TmpIns = dyn_cast<Instruction>(cr->first);
				if(TmpIns) {
//					errs() << "\t" << *TmpIns << "\n";
					//unsigned int l, c;
					//std::string cfi_path = getInstPath(TmpIns, l, c);
					//if (!cfi_path.empty()) {
					//	if (cfi_path.find("uclibc") != std::string::npos) {
					//		dbgs() << "[Filter Uclib]: find an instruction from uclibc.\n";
					//		continue;
					//	} else if (cfi_path.find("POSIX") != std::string::npos) {
					//		dbgs() << "[Filter Uclib]: find an instruction from POSIX.\n";
					//		continue;
					//	}
					//}
				} else
					continue;
			}
			// get the funciton pointer which is called by current CallRecord cr
			Function *FI = cr->second->getFunction();
			if (!FI)
				continue;
			
			// create a new CalledFunctions element and push it into calledFunctionMap.
			calledFunctionMap[FI].push_back(std::make_pair(F, dyn_cast<Instruction>(cr->first)));
			// for debuging
			map_count++;			
		}

	}
	
	dbgs() << "[Count Number of calledFunctionMap]: "<< calledFunctionMap.size() <<'\n';
	
	// analyze the global function pointer table
	if(function_pointer_analysis()) {
		errs() << "[Analyze global function pointer table success]\n";
	} else {
		errs() << "[Analyze global function pointer table failed]\n";
	}
	
	dbgs() << "[Count Number of calledFunctionMap]: "<< calledFunctionMap.size() <<'\n';
	
	// filter the instructions from uclibc
	//filter_uclibc();

	llvm::errs() << "=================================hh\n";
	llvm::errs() << "get Function Path: " << endFunc->getName() 
		<< " to " << startFunc->getName() << " \n";
	
//	printCalledFuncAndCFGPath(endFunc, startFunc, BB, p);
		
	// modification by wh
	evo_paths = new entire_path;
	//filter_paths = new func_bbs_type;
	//BB_paths_map = new std::map<std::pair<Function*, BasicBlock*>, std::vector<BasicBlock*> >;
	std::vector<std::pair< Function*, Instruction*> > tmp_func_path;
//	std::vector<BasicBlock*> tmp_bb_path;
//	explore_function_paths(endFunc, startFunc, bug_Inst, &tmp_func_path);
	collect_funcitons(endFunc, startFunc, bug_Inst, &tmp_func_path);
//	dbgs() << "++++++Found " << evo_paths->size() << " function paths.\n";
	
//	for (entire_path::iterator ep_it = evo_paths->begin(); ep_it != evo_paths->end(); ep_it++) {
//		for (std::vector<std::pair< Function*, Instruction*> >::iterator pair_it = ep_it->begin(); pair_it != ep_it->end(); pair_it++) {
//			if (filter_paths->size() != 0) {
//				std::vector<Instruction*>::iterator inst_it = std::find((*filter_paths)[pair_it->first].begin(), (*filter_paths)[pair_it->first].end(), pair_it->second);
//				if (inst_it != (*filter_paths)[pair_it->first].end()) {
//					continue;
//				}
//			}
//			(*filter_paths)[pair_it->first].push_back(pair_it->second);
//		}
//	}
	dbgs() << "[filter_paths]: contain " << filter_paths->size() << " functions in all.\n";
	
	for (func_bbs_type::iterator fbs_it = filter_paths->begin(); fbs_it != filter_paths->end(); fbs_it++) {
		for (std::vector<Instruction*>::iterator bb_it2 = fbs_it->second.begin(); bb_it2 != fbs_it->second.end(); bb_it2++) {
			dbgs() << "^^^^^^ " << fbs_it->first->getName() << ": " << (*bb_it2)->getParent()->getName() << '\n';
			// to expand functions
			call_insts.push_back((*bb_it2));
			
			explore_basicblock_paths(fbs_it->first, (*bb_it2)->getParent(), &(*BB_paths_map)[std::make_pair(fbs_it->first, *bb_it2)]);
			dbgs() << "^^^^^^ found " << (*BB_paths_map)[std::make_pair(fbs_it->first, *bb_it2)].size() << " basicblocks.\n";
		}
	}
	
	llvm::dbgs() << "!!!!!!!! Found " << call_insts.size() << " call instructions.\n";
	llvm::dbgs() << "!!!!!!!! Found " << path_basicblocks.size() << " path basicblocks.\n";
	
	// expand functions
	for (std::vector<Instruction*>::iterator ci_it = call_insts.begin(); ci_it != call_insts.end(); ci_it++) {
		BasicBlock *call_bb = (*ci_it)->getParent();
		if (!call_bb) {
			continue;
		}
		for (BasicBlock::iterator inst = call_bb->begin(); inst != call_bb->end(); inst++) {
			if (&*inst == *ci_it) {
				break;
			}
			if (isa<CallInst>(&*inst)) {
				std::vector<Instruction*>::iterator ci = std::find(path_call_insts.begin(), path_call_insts.end(), &*inst);
				if (ci != path_call_insts.end())
					continue;
				path_call_insts.push_back(&*inst);
			}
		}
	}
	llvm::dbgs() << "@@@@@@@@ After search call_insts, found " << path_call_insts.size() << " call instructions.\n";
	for (std::vector<BasicBlock*>::iterator p_bb_it = path_basicblocks.begin(); p_bb_it != path_basicblocks.end(); p_bb_it++) {
		for (BasicBlock::iterator inst = (*p_bb_it)->begin(); inst != (*p_bb_it)->end(); inst++) {
			if (isa<CallInst>(&*inst)) {
				std::vector<Instruction*>::iterator ci = std::find(path_call_insts.begin(), path_call_insts.end(), &*inst);
				if (ci != path_call_insts.end())
					continue;
				path_call_insts.push_back(&*inst);
			}
		}
	}
	llvm::dbgs() << "@@@@@@@@ After search path_basicblocks, found " << path_call_insts.size() << " call instructions.\n";
	for (std::vector<Instruction*>::iterator iit = path_call_insts.begin(); iit != path_call_insts.end(); iit++) {
		CallInst *ci = dyn_cast<CallInst>(*iit);
		if (!ci)
			continue;
		Function *ff = ci->getCalledFunction();
		if (!ff) {
			//ci->dump();
			//dbgs() << "\t[called value] " << ci->getOperand(0)->getName() << '\n'; 
			
			continue;
		}
		std::vector<Function*>::iterator fit = std::find(otherCalledFuncs->begin(), otherCalledFuncs->end(), ff);
		if (fit == otherCalledFuncs->end())
			otherCalledFuncs->push_back(ff);
	}
	llvm::dbgs() << "((((((((( Found " << otherCalledFuncs->size() << " functions.\n";
	
	for (int index = 0; index < otherCalledFuncs->size(); index++) {
		Function *f = otherCalledFuncs->at(index);
/*		if (!f) {
			//f->dump();
			llvm::dbgs() << "?????? index = " << index << " size = " << otherCalledFuncs->size()<< '\n';
			continue;
		}
*/		for (inst_iterator f_it = inst_begin(f); f_it != inst_end(f); f_it++) {
			CallInst *ci = dyn_cast<CallInst>(&*f_it);
			if (!ci)
				continue;
			if (!ci->getCalledFunction()) {
				//ci->dump();
				continue;
			}
			std::vector<Function*>::iterator fit = std::find(otherCalledFuncs->begin(), otherCalledFuncs->end(), ci->getCalledFunction());
			if (fit == otherCalledFuncs->end())
				otherCalledFuncs->push_back(ci->getCalledFunction());
		}
	}
	llvm::dbgs() << "((((((((( Found " << otherCalledFuncs->size() << " functions.\n";
	
	//This should be just for statistic.
	int tmp_funcNum_in_filter_notIn_other = 0;
	for (func_bbs_type::iterator fbs_it = filter_paths->begin(); fbs_it != filter_paths->end(); fbs_it++) {
		if (!fbs_it->first) {
			llvm::dbgs() << "[Warning]: Found a null Function pointer in filter_paths.\n";
			continue;
		}
		std::vector<Function*>::iterator fit = std::find(otherCalledFuncs->begin(), otherCalledFuncs->end(), fbs_it->first);
		if (fit == otherCalledFuncs->end())
			//otherCalledFuncs->push_back(fbs_it->first);
			tmp_funcNum_in_filter_notIn_other ++;
	}
	llvm::dbgs() << "<><><><> After searching filter_paths, found " << otherCalledFuncs->size() + tmp_funcNum_in_filter_notIn_other << " functions.\n";
/*	for (entire_path::iterator ep_it = evo_paths->begin(); ep_it != evo_paths->end(); ep_it++) {
		dbgs() << "Path length is: " << ep_it->size() << '\n';
		for (std::vector<std::pair< Function*, BasicBlock*> >::iterator pair_it = ep_it->begin(); pair_it != ep_it->end(); pair_it++) {
			 dbgs() << "^^^^^^ " << pair_it->first->getName() << ": " << pair_it->second->getName() << '\n';
			 explore_basicblock_paths(pair_it->first, pair_it->second, &(*BB_paths_map)[*pair_it]);
			 dbgs() << "^^^^^^ found " << (*BB_paths_map)[*pair_it].size() << " basicblocks.\n";
		}
	}
*/		
	llvm::errs() << "on-end\n";
	llvm::errs() << "=================================\n";
	
	// output all of the paths
/*	errs()<<"Find "<<paths_found->size()<<" paths in all.\n";
	for(paths::iterator ips = paths_found->begin();ips != paths_found->end();ips++) {
//		std::vector<BasicBlock*> *tmpP = dyn_cast<std::vector<BasicBlock*>*>(&*ips);
		dbgs() << "=========A Path Start============\n";
		for(std::vector<BasicBlock*>::iterator ps = ips->begin(), pe = ips->end(); ps != pe; ps++) {
			BasicBlock *tmpStr = *ps;
			errs()<<"\t"<<tmpStr->getParent()->getName()<<": "<<tmpStr->getName()<<" -> \n";
		}
		errs()<<"=================================\n";
	}
*/	
	return false;
}
Esempio n. 7
0
bool SjLjEHPass::insertSjLjEHSupport(Function &F) {
  SmallVector<ReturnInst*,16> Returns;
  SmallVector<UnwindInst*,16> Unwinds;
  SmallVector<InvokeInst*,16> Invokes;

  // Look through the terminators of the basic blocks to find invokes, returns
  // and unwinds.
  for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
    if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) {
      // Remember all return instructions in case we insert an invoke into this
      // function.
      Returns.push_back(RI);
    } else if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
      Invokes.push_back(II);
    } else if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
      Unwinds.push_back(UI);
    }
  }
  // If we don't have any invokes or unwinds, there's nothing to do.
  if (Unwinds.empty() && Invokes.empty()) return false;

  // Find the eh.selector.*, eh.exception and alloca calls.
  //
  // Remember any allocas() that aren't in the entry block, as the
  // jmpbuf saved SP will need to be updated for them.
  //
  // We'll use the first eh.selector to determine the right personality
  // function to use. For SJLJ, we always use the same personality for the
  // whole function, not on a per-selector basis.
  // FIXME: That's a bit ugly. Better way?
  SmallVector<CallInst*,16> EH_Selectors;
  SmallVector<CallInst*,16> EH_Exceptions;
  SmallVector<Instruction*,16> JmpbufUpdatePoints;
  // Note: Skip the entry block since there's nothing there that interests
  // us. eh.selector and eh.exception shouldn't ever be there, and we
  // want to disregard any allocas that are there.
  for (Function::iterator BB = F.begin(), E = F.end(); ++BB != E;) {
    for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
      if (CallInst *CI = dyn_cast<CallInst>(I)) {
        if (CI->getCalledFunction() == SelectorFn) {
          if (!PersonalityFn) PersonalityFn = CI->getArgOperand(1);
          EH_Selectors.push_back(CI);
        } else if (CI->getCalledFunction() == ExceptionFn) {
          EH_Exceptions.push_back(CI);
        } else if (CI->getCalledFunction() == StackRestoreFn) {
          JmpbufUpdatePoints.push_back(CI);
        }
      } else if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
        JmpbufUpdatePoints.push_back(AI);
      }
    }
  }
  // If we don't have any eh.selector calls, we can't determine the personality
  // function. Without a personality function, we can't process exceptions.
  if (!PersonalityFn) return false;

  NumInvokes += Invokes.size();
  NumUnwinds += Unwinds.size();

  if (!Invokes.empty()) {
    // We have invokes, so we need to add register/unregister calls to get
    // this function onto the global unwind stack.
    //
    // First thing we need to do is scan the whole function for values that are
    // live across unwind edges.  Each value that is live across an unwind edge
    // we spill into a stack location, guaranteeing that there is nothing live
    // across the unwind edge.  This process also splits all critical edges
    // coming out of invoke's.
    splitLiveRangesAcrossInvokes(Invokes);

    BasicBlock *EntryBB = F.begin();
    // Create an alloca for the incoming jump buffer ptr and the new jump buffer
    // that needs to be restored on all exits from the function.  This is an
    // alloca because the value needs to be added to the global context list.
    unsigned Align = 4; // FIXME: Should be a TLI check?
    AllocaInst *FunctionContext =
      new AllocaInst(FunctionContextTy, 0, Align,
                     "fcn_context", F.begin()->begin());

    Value *Idxs[2];
    const Type *Int32Ty = Type::getInt32Ty(F.getContext());
    Value *Zero = ConstantInt::get(Int32Ty, 0);
    // We need to also keep around a reference to the call_site field
    Idxs[0] = Zero;
    Idxs[1] = ConstantInt::get(Int32Ty, 1);
    CallSite = GetElementPtrInst::Create(FunctionContext, Idxs, Idxs+2,
                                         "call_site",
                                         EntryBB->getTerminator());

    // The exception selector comes back in context->data[1]
    Idxs[1] = ConstantInt::get(Int32Ty, 2);
    Value *FCData = GetElementPtrInst::Create(FunctionContext, Idxs, Idxs+2,
                                              "fc_data",
                                              EntryBB->getTerminator());
    Idxs[1] = ConstantInt::get(Int32Ty, 1);
    Value *SelectorAddr = GetElementPtrInst::Create(FCData, Idxs, Idxs+2,
                                                    "exc_selector_gep",
                                                    EntryBB->getTerminator());
    // The exception value comes back in context->data[0]
    Idxs[1] = Zero;
    Value *ExceptionAddr = GetElementPtrInst::Create(FCData, Idxs, Idxs+2,
                                                     "exception_gep",
                                                     EntryBB->getTerminator());

    // The result of the eh.selector call will be replaced with a
    // a reference to the selector value returned in the function
    // context. We leave the selector itself so the EH analysis later
    // can use it.
    for (int i = 0, e = EH_Selectors.size(); i < e; ++i) {
      CallInst *I = EH_Selectors[i];
      Value *SelectorVal = new LoadInst(SelectorAddr, "select_val", true, I);
      I->replaceAllUsesWith(SelectorVal);
    }
    // eh.exception calls are replaced with references to the proper
    // location in the context. Unlike eh.selector, the eh.exception
    // calls are removed entirely.
    for (int i = 0, e = EH_Exceptions.size(); i < e; ++i) {
      CallInst *I = EH_Exceptions[i];
      // Possible for there to be duplicates, so check to make sure
      // the instruction hasn't already been removed.
      if (!I->getParent()) continue;
      Value *Val = new LoadInst(ExceptionAddr, "exception", true, I);
      const Type *Ty = Type::getInt8PtrTy(F.getContext());
      Val = CastInst::Create(Instruction::IntToPtr, Val, Ty, "", I);

      I->replaceAllUsesWith(Val);
      I->eraseFromParent();
    }

    // The entry block changes to have the eh.sjlj.setjmp, with a conditional
    // branch to a dispatch block for non-zero returns. If we return normally,
    // we're not handling an exception and just register the function context
    // and continue.

    // Create the dispatch block.  The dispatch block is basically a big switch
    // statement that goes to all of the invoke landing pads.
    BasicBlock *DispatchBlock =
            BasicBlock::Create(F.getContext(), "eh.sjlj.setjmp.catch", &F);

    // Insert a load in the Catch block, and a switch on its value.  By default,
    // we go to a block that just does an unwind (which is the correct action
    // for a standard call).
    BasicBlock *UnwindBlock =
      BasicBlock::Create(F.getContext(), "unwindbb", &F);
    Unwinds.push_back(new UnwindInst(F.getContext(), UnwindBlock));

    Value *DispatchLoad = new LoadInst(CallSite, "invoke.num", true,
                                       DispatchBlock);
    SwitchInst *DispatchSwitch =
      SwitchInst::Create(DispatchLoad, UnwindBlock, Invokes.size(),
                         DispatchBlock);
    // Split the entry block to insert the conditional branch for the setjmp.
    BasicBlock *ContBlock = EntryBB->splitBasicBlock(EntryBB->getTerminator(),
                                                     "eh.sjlj.setjmp.cont");

    // Populate the Function Context
    //   1. LSDA address
    //   2. Personality function address
    //   3. jmpbuf (save SP, FP and call eh.sjlj.setjmp)

    // LSDA address
    Idxs[0] = Zero;
    Idxs[1] = ConstantInt::get(Int32Ty, 4);
    Value *LSDAFieldPtr =
      GetElementPtrInst::Create(FunctionContext, Idxs, Idxs+2,
                                "lsda_gep",
                                EntryBB->getTerminator());
    Value *LSDA = CallInst::Create(LSDAAddrFn, "lsda_addr",
                                   EntryBB->getTerminator());
    new StoreInst(LSDA, LSDAFieldPtr, true, EntryBB->getTerminator());

    Idxs[1] = ConstantInt::get(Int32Ty, 3);
    Value *PersonalityFieldPtr =
      GetElementPtrInst::Create(FunctionContext, Idxs, Idxs+2,
                                "lsda_gep",
                                EntryBB->getTerminator());
    new StoreInst(PersonalityFn, PersonalityFieldPtr, true,
                  EntryBB->getTerminator());

    // Save the frame pointer.
    Idxs[1] = ConstantInt::get(Int32Ty, 5);
    Value *JBufPtr
      = GetElementPtrInst::Create(FunctionContext, Idxs, Idxs+2,
                                  "jbuf_gep",
                                  EntryBB->getTerminator());
    Idxs[1] = ConstantInt::get(Int32Ty, 0);
    Value *FramePtr =
      GetElementPtrInst::Create(JBufPtr, Idxs, Idxs+2, "jbuf_fp_gep",
                                EntryBB->getTerminator());

    Value *Val = CallInst::Create(FrameAddrFn,
                                  ConstantInt::get(Int32Ty, 0),
                                  "fp",
                                  EntryBB->getTerminator());
    new StoreInst(Val, FramePtr, true, EntryBB->getTerminator());

    // Save the stack pointer.
    Idxs[1] = ConstantInt::get(Int32Ty, 2);
    Value *StackPtr =
      GetElementPtrInst::Create(JBufPtr, Idxs, Idxs+2, "jbuf_sp_gep",
                                EntryBB->getTerminator());

    Val = CallInst::Create(StackAddrFn, "sp", EntryBB->getTerminator());
    new StoreInst(Val, StackPtr, true, EntryBB->getTerminator());

    // Call the setjmp instrinsic. It fills in the rest of the jmpbuf.
    Value *SetjmpArg =
      CastInst::Create(Instruction::BitCast, JBufPtr,
                       Type::getInt8PtrTy(F.getContext()), "",
                       EntryBB->getTerminator());
    Value *DispatchVal = CallInst::Create(BuiltinSetjmpFn, SetjmpArg,
                                          "dispatch",
                                          EntryBB->getTerminator());
    // check the return value of the setjmp. non-zero goes to dispatcher.
    Value *IsNormal = new ICmpInst(EntryBB->getTerminator(),
                                   ICmpInst::ICMP_EQ, DispatchVal, Zero,
                                   "notunwind");
    // Nuke the uncond branch.
    EntryBB->getTerminator()->eraseFromParent();

    // Put in a new condbranch in its place.
    BranchInst::Create(ContBlock, DispatchBlock, IsNormal, EntryBB);

    // Register the function context and make sure it's known to not throw
    CallInst *Register =
      CallInst::Create(RegisterFn, FunctionContext, "",
                       ContBlock->getTerminator());
    Register->setDoesNotThrow();

    // At this point, we are all set up, update the invoke instructions
    // to mark their call_site values, and fill in the dispatch switch
    // accordingly.
    for (unsigned i = 0, e = Invokes.size(); i != e; ++i)
      markInvokeCallSite(Invokes[i], i+1, CallSite, DispatchSwitch);

    // Mark call instructions that aren't nounwind as no-action
    // (call_site == -1). Skip the entry block, as prior to then, no function
    // context has been created for this function and any unexpected exceptions
    // thrown will go directly to the caller's context, which is what we want
    // anyway, so no need to do anything here.
    for (Function::iterator BB = F.begin(), E = F.end(); ++BB != E;) {
      for (BasicBlock::iterator I = BB->begin(), end = BB->end(); I != end; ++I)
        if (CallInst *CI = dyn_cast<CallInst>(I)) {
          // Ignore calls to the EH builtins (eh.selector, eh.exception)
          Constant *Callee = CI->getCalledFunction();
          if (Callee != SelectorFn && Callee != ExceptionFn
              && !CI->doesNotThrow())
            insertCallSiteStore(CI, -1, CallSite);
        }
    }

    // Replace all unwinds with a branch to the unwind handler.
    // ??? Should this ever happen with sjlj exceptions?
    for (unsigned i = 0, e = Unwinds.size(); i != e; ++i) {
      BranchInst::Create(UnwindBlock, Unwinds[i]);
      Unwinds[i]->eraseFromParent();
    }

    // Following any allocas not in the entry block, update the saved SP
    // in the jmpbuf to the new value.
    for (unsigned i = 0, e = JmpbufUpdatePoints.size(); i != e; ++i) {
      Instruction *AI = JmpbufUpdatePoints[i];
      Instruction *StackAddr = CallInst::Create(StackAddrFn, "sp");
      StackAddr->insertAfter(AI);
      Instruction *StoreStackAddr = new StoreInst(StackAddr, StackPtr, true);
      StoreStackAddr->insertAfter(StackAddr);
    }

    // Finally, for any returns from this function, if this function contains an
    // invoke, add a call to unregister the function context.
    for (unsigned i = 0, e = Returns.size(); i != e; ++i)
      CallInst::Create(UnregisterFn, FunctionContext, "", Returns[i]);
  }

  return true;
}
Esempio n. 8
0
GraphNode* Graph::addInst(Value *v) {

        GraphNode *Op, *Var, *Operand;

        CallInst* CI = dyn_cast<CallInst> (v);
        bool hasVarNode = true;

        if (isValidInst(v)) { //If is a data manipulator instruction
                Var = this->findNode(v);

                /*
                 * If Var is NULL, the value hasn't been processed yet, so we must process it
                 *
                 * However, if Var is a Pointer, maybe the memory node already exists but the
                 * operation node aren't in the graph, yet. Thus we must process it.
                 */
                if (Var == NULL || (Var != NULL && findOpNode(v) == NULL)) { //If it has not processed yet

                        //If Var isn't NULL, we won't create another node for it
                        if (Var == NULL) {

                                if (CI) {
                                        hasVarNode = !CI->getType()->isVoidTy();
                                }

                                if (hasVarNode) {
                                        if (StoreInst* SI = dyn_cast<StoreInst>(v))
                                                Var = addInst(SI->getOperand(1)); // We do this here because we want to represent the store instructions as a flow of information of a data to a memory node
                                        else if ((!isa<Constant> (v)) && isMemoryPointer(v)) {
                                                Var = new MemNode(
                                                                USE_ALIAS_SETS ? AS->getValueSetKey(v) : 0, AS);
                                                memNodes[USE_ALIAS_SETS ? AS->getValueSetKey(v) : 0]
                                                                = Var;
                                        } else {
                                                Var = new VarNode(v);
                                                varNodes[v] = Var;
                                        }
                                        nodes.insert(Var);
                                }

                        }

                        if (isa<Instruction> (v)) {

                                if (CI) {
                                        Op = new CallNode(CI);
                                        callNodes[CI] = Op;
                                } else {
                                        Op = new OpNode(dyn_cast<Instruction> (v)->getOpcode(), v);
                                }
                                opNodes[v] = Op;

                                nodes.insert(Op);
                                if (hasVarNode)
                                        Op->connect(Var);

                                //Connect the operands to the OpNode
                                for (unsigned int i = 0; i < cast<User> (v)->getNumOperands(); i++) {

                                        if (isa<StoreInst> (v) && i == 1)
                                                continue; // We do this here because we want to represent the store instructions as a flow of information of a data to a memory node

                                        Value *v1 = cast<User> (v)->getOperand(i);
                                        Operand = this->addInst(v1);

                                        if (Operand != NULL)
                                                Operand->connect(Op);
                                }
                        }
                }

                return Var;
        }
        return NULL;
}
Esempio n. 9
0
/// Attempt to merge an objc_release with a store, load, and objc_retain to form
/// an objc_storeStrong. This can be a little tricky because the instructions
/// don't always appear in order, and there may be unrelated intervening
/// instructions.
void ObjCARCContract::ContractRelease(Instruction *Release,
                                      inst_iterator &Iter) {
  LoadInst *Load = dyn_cast<LoadInst>(GetObjCArg(Release));
  if (!Load || !Load->isSimple()) return;

  // For now, require everything to be in one basic block.
  BasicBlock *BB = Release->getParent();
  if (Load->getParent() != BB) return;

  // Walk down to find the store and the release, which may be in either order.
  BasicBlock::iterator I = Load, End = BB->end();
  ++I;
  AliasAnalysis::Location Loc = AA->getLocation(Load);
  StoreInst *Store = 0;
  bool SawRelease = false;
  for (; !Store || !SawRelease; ++I) {
    if (I == End)
      return;

    Instruction *Inst = I;
    if (Inst == Release) {
      SawRelease = true;
      continue;
    }

    InstructionClass Class = GetBasicInstructionClass(Inst);

    // Unrelated retains are harmless.
    if (IsRetain(Class))
      continue;

    if (Store) {
      // The store is the point where we're going to put the objc_storeStrong,
      // so make sure there are no uses after it.
      if (CanUse(Inst, Load, PA, Class))
        return;
    } else if (AA->getModRefInfo(Inst, Loc) & AliasAnalysis::Mod) {
      // We are moving the load down to the store, so check for anything
      // else which writes to the memory between the load and the store.
      Store = dyn_cast<StoreInst>(Inst);
      if (!Store || !Store->isSimple()) return;
      if (Store->getPointerOperand() != Loc.Ptr) return;
    }
  }

  Value *New = StripPointerCastsAndObjCCalls(Store->getValueOperand());

  // Walk up to find the retain.
  I = Store;
  BasicBlock::iterator Begin = BB->begin();
  while (I != Begin && GetBasicInstructionClass(I) != IC_Retain)
    --I;
  Instruction *Retain = I;
  if (GetBasicInstructionClass(Retain) != IC_Retain) return;
  if (GetObjCArg(Retain) != New) return;

  Changed = true;
  ++NumStoreStrongs;

  LLVMContext &C = Release->getContext();
  Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
  Type *I8XX = PointerType::getUnqual(I8X);

  Value *Args[] = { Load->getPointerOperand(), New };
  if (Args[0]->getType() != I8XX)
    Args[0] = new BitCastInst(Args[0], I8XX, "", Store);
  if (Args[1]->getType() != I8X)
    Args[1] = new BitCastInst(Args[1], I8X, "", Store);
  CallInst *StoreStrong =
    CallInst::Create(getStoreStrongCallee(BB->getParent()->getParent()),
                     Args, "", Store);
  StoreStrong->setDoesNotThrow();
  StoreStrong->setDebugLoc(Store->getDebugLoc());

  // We can't set the tail flag yet, because we haven't yet determined
  // whether there are any escaping allocas. Remember this call, so that
  // we can set the tail flag once we know it's safe.
  StoreStrongCalls.insert(StoreStrong);

  if (&*Iter == Store) ++Iter;
  Store->eraseFromParent();
  Release->eraseFromParent();
  EraseInstruction(Retain);
  if (Load->use_empty())
    Load->eraseFromParent();
}
Esempio n. 10
0
void DSWP::insertConsume(Instruction *u, Instruction *v, DType dtype,
					     int channel, int uthread, int vthread) {
	Instruction *oldu = dyn_cast<Instruction>(newToOld[u]);
	Instruction *insPos = placeEquivalents[vthread][oldu];
	if (insPos == NULL) {
		insPos = dyn_cast<Instruction>(instMap[vthread][oldu]);
		if (insPos == NULL) {
			error("can't insert nowhere");
		}
	}

	// call sync_consume(channel)
	Function *fun = module->getFunction("sync_consume");
	vector<Value *> args;
	args.push_back(ConstantInt::get(Type::getInt32Ty(*context), channel));
	CallInst *call = CallInst::Create(fun, args, "c" + itoa(channel), insPos);

	if (dtype == REG) {
		CastInst *cast;
		string name = call->getName().str() + "_val";

		if (u->getType()->isIntegerTy()) {
			cast = new TruncInst(call, u->getType(), name);
		}
		else if (u->getType()->isFloatingPointTy()) {
			if (u->getType()->isFloatTy())
				error("cannot deal with double");
			cast = new BitCastInst(call, u->getType(), name);
		}
		else if (u->getType()->isPointerTy()){
			cast = new IntToPtrInst(call, u->getType(), name);
		} else {
			error("what's the hell type");
		}

		cast->insertBefore(insPos);

		// replace the uses
		for (Instruction::use_iterator ui = oldu->use_begin(),
									   ue = oldu->use_end();
				ui != ue; ++ui) {

			Instruction *user = dyn_cast<Instruction>(*ui);
			if (user == NULL) {
				error("used by a non-instruction?");
			}

			// make sure it's in the same function...
			if (user->getParent()->getParent() != v->getParent()->getParent()) {
				continue;
			}

			// call replaceUses so that it handles phi nodes
			map<Value *, Value *> reps;
			reps[oldu] = cast;
			replaceUses(user, reps);
		}

	} /* TODO: need to handle true memory dependences more than just syncing?
	else if (dtype == DTRUE) {	//READ after WRITE
		error("check mem dep!!");

		if (!isa<LoadInst>(v)) {
			error("not true dependency");
		}
		BitCastInst *cast = new BitCastInst(
			call, v->getType(), call->getName().str() + "_ptr");
		cast->insertBefore(v);

		// replace the v with 'cast' in v's thread:
		// (other thread with be dealed using dependence)
		for (Instruction::use_iterator ui = v->use_begin(), ue = v->use_end();
				ui != ue; ui++) {
			Instruction *user = dyn_cast<Instruction>(*ui);

			if (user == NULL) {
				error("how could it be NULL");
			}

		//	int userthread = this->getNewInstAssigned(user);
			if (user->getParent()->getParent() != v->getParent()->getParent()) {
				continue;
			}

			for (unsigned i = 0; i < user->getNumOperands(); i++) {
				Value * op = user->getOperand(i);
				if (op == v) {
					user->setOperand(i, cast);
				}
			}
		}
	} */ else {
		// nothing to do
	}
}
// The fractional part of a float is enough to accurately represent up to
// a 24-bit signed integer.
Value* AMDGPUCodeGenPrepare::expandDivRem24(IRBuilder<> &Builder,
                                            BinaryOperator &I,
                                            Value *Num, Value *Den,
                                            bool IsDiv, bool IsSigned) const {
  assert(Num->getType()->isIntegerTy(32));

  const DataLayout &DL = Mod->getDataLayout();
  unsigned LHSSignBits = ComputeNumSignBits(Num, DL, 0, AC, &I);
  if (LHSSignBits < 9)
    return nullptr;

  unsigned RHSSignBits = ComputeNumSignBits(Den, DL, 0, AC, &I);
  if (RHSSignBits < 9)
    return nullptr;


  unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
  unsigned DivBits = 32 - SignBits;
  if (IsSigned)
    ++DivBits;

  Type *Ty = Num->getType();
  Type *I32Ty = Builder.getInt32Ty();
  Type *F32Ty = Builder.getFloatTy();
  ConstantInt *One = Builder.getInt32(1);
  Value *JQ = One;

  if (IsSigned) {
    // char|short jq = ia ^ ib;
    JQ = Builder.CreateXor(Num, Den);

    // jq = jq >> (bitsize - 2)
    JQ = Builder.CreateAShr(JQ, Builder.getInt32(30));

    // jq = jq | 0x1
    JQ = Builder.CreateOr(JQ, One);
  }

  // int ia = (int)LHS;
  Value *IA = Num;

  // int ib, (int)RHS;
  Value *IB = Den;

  // float fa = (float)ia;
  Value *FA = IsSigned ? Builder.CreateSIToFP(IA, F32Ty)
                       : Builder.CreateUIToFP(IA, F32Ty);

  // float fb = (float)ib;
  Value *FB = IsSigned ? Builder.CreateSIToFP(IB,F32Ty)
                       : Builder.CreateUIToFP(IB,F32Ty);

  Value *RCP = Builder.CreateFDiv(ConstantFP::get(F32Ty, 1.0), FB);
  Value *FQM = Builder.CreateFMul(FA, RCP);

  // fq = trunc(fqm);
  CallInst* FQ = Builder.CreateIntrinsic(Intrinsic::trunc, { FQM });
  FQ->copyFastMathFlags(Builder.getFastMathFlags());

  // float fqneg = -fq;
  Value *FQNeg = Builder.CreateFNeg(FQ);

  // float fr = mad(fqneg, fb, fa);
  Value *FR = Builder.CreateIntrinsic(Intrinsic::amdgcn_fmad_ftz,
                                      { FQNeg, FB, FA }, FQ);

  // int iq = (int)fq;
  Value *IQ = IsSigned ? Builder.CreateFPToSI(FQ, I32Ty)
                       : Builder.CreateFPToUI(FQ, I32Ty);

  // fr = fabs(fr);
  FR = Builder.CreateIntrinsic(Intrinsic::fabs, { FR }, FQ);

  // fb = fabs(fb);
  FB = Builder.CreateIntrinsic(Intrinsic::fabs, { FB }, FQ);

  // int cv = fr >= fb;
  Value *CV = Builder.CreateFCmpOGE(FR, FB);

  // jq = (cv ? jq : 0);
  JQ = Builder.CreateSelect(CV, JQ, Builder.getInt32(0));

  // dst = iq + jq;
  Value *Div = Builder.CreateAdd(IQ, JQ);

  Value *Res = Div;
  if (!IsDiv) {
    // Rem needs compensation, it's easier to recompute it
    Value *Rem = Builder.CreateMul(Div, Den);
    Res = Builder.CreateSub(Num, Rem);
  }

  // Truncate to number of bits this divide really is.
  if (IsSigned) {
    Res = Builder.CreateTrunc(Res, Builder.getIntNTy(DivBits));
    Res = Builder.CreateSExt(Res, Ty);
  } else {
    ConstantInt *TruncMask = Builder.getInt32((UINT64_C(1) << DivBits) - 1);
    Res = Builder.CreateAnd(Res, TruncMask);
  }

  return Res;
}
Esempio n. 12
0
LLVMValueRef LLVMGetCalledFunction(LLVMValueRef I)
{
  CallInst *CI = (CallInst*)unwrap(I);
  return wrap(CI->getCalledValue());
}
Esempio n. 13
0
bool ObjCARCContract::tryToPeepholeInstruction(
  Function &F, Instruction *Inst, inst_iterator &Iter,
  SmallPtrSetImpl<Instruction *> &DependingInsts,
  SmallPtrSetImpl<const BasicBlock *> &Visited,
  bool &TailOkForStoreStrongs,
  const DenseMap<BasicBlock *, ColorVector> &BlockColors) {
    // Only these library routines return their argument. In particular,
    // objc_retainBlock does not necessarily return its argument.
  ARCInstKind Class = GetBasicARCInstKind(Inst);
    switch (Class) {
    case ARCInstKind::FusedRetainAutorelease:
    case ARCInstKind::FusedRetainAutoreleaseRV:
      return false;
    case ARCInstKind::Autorelease:
    case ARCInstKind::AutoreleaseRV:
      return contractAutorelease(F, Inst, Class, DependingInsts, Visited);
    case ARCInstKind::Retain:
      // Attempt to convert retains to retainrvs if they are next to function
      // calls.
      if (!optimizeRetainCall(F, Inst))
        return false;
      // If we succeed in our optimization, fall through.
      LLVM_FALLTHROUGH;
    case ARCInstKind::RetainRV:
    case ARCInstKind::ClaimRV: {
      // If we're compiling for a target which needs a special inline-asm
      // marker to do the return value optimization, insert it now.
      if (!RVInstMarker)
        return false;
      BasicBlock::iterator BBI = Inst->getIterator();
      BasicBlock *InstParent = Inst->getParent();

      // Step up to see if the call immediately precedes the RV call.
      // If it's an invoke, we have to cross a block boundary. And we have
      // to carefully dodge no-op instructions.
      do {
        if (BBI == InstParent->begin()) {
          BasicBlock *Pred = InstParent->getSinglePredecessor();
          if (!Pred)
            goto decline_rv_optimization;
          BBI = Pred->getTerminator()->getIterator();
          break;
        }
        --BBI;
      } while (IsNoopInstruction(&*BBI));

      if (&*BBI == GetArgRCIdentityRoot(Inst)) {
        LLVM_DEBUG(dbgs() << "Adding inline asm marker for the return value "
                             "optimization.\n");
        Changed = true;
        InlineAsm *IA = InlineAsm::get(
            FunctionType::get(Type::getVoidTy(Inst->getContext()),
                              /*isVarArg=*/false),
            RVInstMarker->getString(),
            /*Constraints=*/"", /*hasSideEffects=*/true);

        createCallInst(IA, None, "", Inst, BlockColors);
      }
    decline_rv_optimization:
      return false;
    }
    case ARCInstKind::InitWeak: {
      // objc_initWeak(p, null) => *p = null
      CallInst *CI = cast<CallInst>(Inst);
      if (IsNullOrUndef(CI->getArgOperand(1))) {
        Value *Null =
          ConstantPointerNull::get(cast<PointerType>(CI->getType()));
        Changed = true;
        new StoreInst(Null, CI->getArgOperand(0), CI);

        LLVM_DEBUG(dbgs() << "OBJCARCContract: Old = " << *CI << "\n"
                          << "                 New = " << *Null << "\n");

        CI->replaceAllUsesWith(Null);
        CI->eraseFromParent();
      }
      return true;
    }
    case ARCInstKind::Release:
      // Try to form an objc store strong from our release. If we fail, there is
      // nothing further to do below, so continue.
      tryToContractReleaseIntoStoreStrong(Inst, Iter, BlockColors);
      return true;
    case ARCInstKind::User:
      // Be conservative if the function has any alloca instructions.
      // Technically we only care about escaping alloca instructions,
      // but this is sufficient to handle some interesting cases.
      if (isa<AllocaInst>(Inst))
        TailOkForStoreStrongs = false;
      return true;
    case ARCInstKind::IntrinsicUser:
      // Remove calls to @llvm.objc.clang.arc.use(...).
      Inst->eraseFromParent();
      return true;
    default:
      return true;
    }
}
Esempio n. 14
0
/// Attempt to merge an objc_release with a store, load, and objc_retain to form
/// an objc_storeStrong. An objc_storeStrong:
///
///   objc_storeStrong(i8** %old_ptr, i8* new_value)
///
/// is equivalent to the following IR sequence:
///
///   ; Load old value.
///   %old_value = load i8** %old_ptr               (1)
///
///   ; Increment the new value and then release the old value. This must occur
///   ; in order in case old_value releases new_value in its destructor causing
///   ; us to potentially have a dangling ptr.
///   tail call i8* @objc_retain(i8* %new_value)    (2)
///   tail call void @objc_release(i8* %old_value)  (3)
///
///   ; Store the new_value into old_ptr
///   store i8* %new_value, i8** %old_ptr           (4)
///
/// The safety of this optimization is based around the following
/// considerations:
///
///  1. We are forming the store strong at the store. Thus to perform this
///     optimization it must be safe to move the retain, load, and release to
///     (4).
///  2. We need to make sure that any re-orderings of (1), (2), (3), (4) are
///     safe.
void ObjCARCContract::tryToContractReleaseIntoStoreStrong(
    Instruction *Release, inst_iterator &Iter,
    const DenseMap<BasicBlock *, ColorVector> &BlockColors) {
  // See if we are releasing something that we just loaded.
  auto *Load = dyn_cast<LoadInst>(GetArgRCIdentityRoot(Release));
  if (!Load || !Load->isSimple())
    return;

  // For now, require everything to be in one basic block.
  BasicBlock *BB = Release->getParent();
  if (Load->getParent() != BB)
    return;

  // First scan down the BB from Load, looking for a store of the RCIdentityRoot
  // of Load's
  StoreInst *Store =
      findSafeStoreForStoreStrongContraction(Load, Release, PA, AA);
  // If we fail, bail.
  if (!Store)
    return;

  // Then find what new_value's RCIdentity Root is.
  Value *New = GetRCIdentityRoot(Store->getValueOperand());

  // Then walk up the BB and look for a retain on New without any intervening
  // instructions which conservatively might decrement ref counts.
  Instruction *Retain =
      findRetainForStoreStrongContraction(New, Store, Release, PA);

  // If we fail, bail.
  if (!Retain)
    return;

  Changed = true;
  ++NumStoreStrongs;

  LLVM_DEBUG(
      llvm::dbgs() << "    Contracting retain, release into objc_storeStrong.\n"
                   << "        Old:\n"
                   << "            Store:   " << *Store << "\n"
                   << "            Release: " << *Release << "\n"
                   << "            Retain:  " << *Retain << "\n"
                   << "            Load:    " << *Load << "\n");

  LLVMContext &C = Release->getContext();
  Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
  Type *I8XX = PointerType::getUnqual(I8X);

  Value *Args[] = { Load->getPointerOperand(), New };
  if (Args[0]->getType() != I8XX)
    Args[0] = new BitCastInst(Args[0], I8XX, "", Store);
  if (Args[1]->getType() != I8X)
    Args[1] = new BitCastInst(Args[1], I8X, "", Store);
  Function *Decl = EP.get(ARCRuntimeEntryPointKind::StoreStrong);
  CallInst *StoreStrong = createCallInst(Decl, Args, "", Store, BlockColors);
  StoreStrong->setDoesNotThrow();
  StoreStrong->setDebugLoc(Store->getDebugLoc());

  // We can't set the tail flag yet, because we haven't yet determined
  // whether there are any escaping allocas. Remember this call, so that
  // we can set the tail flag once we know it's safe.
  StoreStrongCalls.insert(StoreStrong);

  LLVM_DEBUG(llvm::dbgs() << "        New Store Strong: " << *StoreStrong
                          << "\n");

  if (&*Iter == Retain) ++Iter;
  if (&*Iter == Store) ++Iter;
  Store->eraseFromParent();
  Release->eraseFromParent();
  EraseInstruction(Retain);
  if (Load->use_empty())
    Load->eraseFromParent();
}
Esempio n. 15
0
/// performLocalRetainMotion - Scan forward from the specified retain, moving it
/// later in the function if possible, over instructions that provably can't
/// release the object.  If we get to a release of the object, zap both.
///
/// NOTE: this handles both objc_retain and swift_retain.
///
static bool performLocalRetainMotion(CallInst &Retain, BasicBlock &BB,
                                     SwiftRCIdentity *RC) {
  // FIXME: Call classifier should identify the object for us.  Too bad C++
  // doesn't have nice Swift-style enums.
  Value *RetainedObject = RC->getSwiftRCIdentityRoot(Retain.getArgOperand(0));

  BasicBlock::iterator BBI = Retain.getIterator(),
                       BBE = BB.getTerminator()->getIterator();

  bool isObjCRetain = Retain.getCalledFunction()->getName() == "objc_retain";

  bool MadeProgress = false;

  // Scan until we get to the end of the block.
  for (++BBI; BBI != BBE; ++BBI) {
    Instruction &CurInst = *BBI;

    // Classify the instruction. This switch does a "break" when the instruction
    // can be skipped and is interesting, and a "continue" when it is a retain
    // of the same pointer.
    switch (classifyInstruction(CurInst)) {
    // These instructions should not reach here based on the pass ordering.
    // i.e. LLVMARCOpt -> LLVMContractOpt.
    case RT_RetainN:
    case RT_UnknownRetainN:
    case RT_BridgeRetainN:
    case RT_ReleaseN:
    case RT_UnknownReleaseN:
    case RT_BridgeReleaseN:
        llvm_unreachable("These are only created by LLVMARCContract !");
    case RT_NoMemoryAccessed:
    case RT_AllocObject:
    case RT_CheckUnowned:
      // Skip over random instructions that don't touch memory.  They don't need
      // protection by retain/release.
      break;

    case RT_FixLifetime: // This only stops release motion. Retains can move over it.
      break;

    case RT_Retain:
    case RT_UnknownRetain:
    case RT_BridgeRetain:
    case RT_RetainUnowned:
    case RT_ObjCRetain: {  // swift_retain(obj)
      //CallInst &ThisRetain = cast<CallInst>(CurInst);
      //Value *ThisRetainedObject = ThisRetain.getArgOperand(0);

      // If we see a retain of the same object, we can skip over it, but we
      // can't count it as progress.  Just pushing a retain(x) past a retain(y)
      // doesn't change the program.
      continue;
    }


    case RT_UnknownRelease:
    case RT_BridgeRelease:
    case RT_ObjCRelease:
    case RT_Release: {
      // If we get to a release that is provably to this object, then we can zap
      // it and the retain.
      CallInst &ThisRelease = cast<CallInst>(CurInst);
      Value *ThisReleasedObject = ThisRelease.getArgOperand(0);
      ThisReleasedObject = RC->getSwiftRCIdentityRoot(ThisReleasedObject);
      if (ThisReleasedObject == RetainedObject) {
        Retain.eraseFromParent();
        ThisRelease.eraseFromParent();
        if (isObjCRetain) {
          ++NumObjCRetainReleasePairs;
        } else {
          ++NumRetainReleasePairs;
        }
        return true;
      }

      // Otherwise, if this is some other pointer, we can only ignore it if we
      // can prove that the two objects don't alias.
      // Retain.dump(); ThisRelease.dump(); BB.getParent()->dump();
      goto OutOfLoop;
    }

    case RT_Unknown:
      // Loads cannot affect the retain.
      if (isa<LoadInst>(CurInst))
        continue;

      // Load, store, memcpy etc can't do a release.
      if (isa<LoadInst>(CurInst) || isa<StoreInst>(CurInst) ||
          isa<MemIntrinsic>(CurInst))
        break;

      // CurInst->dump(); BBI->dump();
      // Otherwise, we get to something unknown/unhandled.  Bail out for now.
      goto OutOfLoop;
    }

    // If the switch did a break, we made some progress moving this retain.
    MadeProgress = true;
  }
OutOfLoop:

  // If we were able to move the retain down, move it now.
  // TODO: This is where we'd plug in some global algorithms someday.
  if (MadeProgress) {
    Retain.moveBefore(&*BBI);
    return true;
  }

  return false;
}
Esempio n. 16
0
bool ObjCARCContract::runOnFunction(Function &F) {
  if (!EnableARCOpts)
    return false;

  // If nothing in the Module uses ARC, don't do anything.
  if (!Run)
    return false;

  Changed = false;
  AA = &getAnalysis<AliasAnalysis>();
  DT = &getAnalysis<DominatorTree>();

  PA.setAA(&getAnalysis<AliasAnalysis>());

  // Track whether it's ok to mark objc_storeStrong calls with the "tail"
  // keyword. Be conservative if the function has variadic arguments.
  // It seems that functions which "return twice" are also unsafe for the
  // "tail" argument, because they are setjmp, which could need to
  // return to an earlier stack state.
  bool TailOkForStoreStrongs = !F.isVarArg() &&
                               !F.callsFunctionThatReturnsTwice();

  // For ObjC library calls which return their argument, replace uses of the
  // argument with uses of the call return value, if it dominates the use. This
  // reduces register pressure.
  SmallPtrSet<Instruction *, 4> DependingInstructions;
  SmallPtrSet<const BasicBlock *, 4> Visited;
  for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
    Instruction *Inst = &*I++;

    DEBUG(dbgs() << "ObjCARCContract: Visiting: " << *Inst << "\n");

    // Only these library routines return their argument. In particular,
    // objc_retainBlock does not necessarily return its argument.
    InstructionClass Class = GetBasicInstructionClass(Inst);
    switch (Class) {
    case IC_Retain:
    case IC_FusedRetainAutorelease:
    case IC_FusedRetainAutoreleaseRV:
      break;
    case IC_Autorelease:
    case IC_AutoreleaseRV:
      if (ContractAutorelease(F, Inst, Class, DependingInstructions, Visited))
        continue;
      break;
    case IC_RetainRV: {
      // If we're compiling for a target which needs a special inline-asm
      // marker to do the retainAutoreleasedReturnValue optimization,
      // insert it now.
      if (!RetainRVMarker)
        break;
      BasicBlock::iterator BBI = Inst;
      BasicBlock *InstParent = Inst->getParent();

      // Step up to see if the call immediately precedes the RetainRV call.
      // If it's an invoke, we have to cross a block boundary. And we have
      // to carefully dodge no-op instructions.
      do {
        if (&*BBI == InstParent->begin()) {
          BasicBlock *Pred = InstParent->getSinglePredecessor();
          if (!Pred)
            goto decline_rv_optimization;
          BBI = Pred->getTerminator();
          break;
        }
        --BBI;
      } while (isNoopInstruction(BBI));

      if (&*BBI == GetObjCArg(Inst)) {
        DEBUG(dbgs() << "ObjCARCContract: Adding inline asm marker for "
                        "retainAutoreleasedReturnValue optimization.\n");
        Changed = true;
        InlineAsm *IA =
          InlineAsm::get(FunctionType::get(Type::getVoidTy(Inst->getContext()),
                                           /*isVarArg=*/false),
                         RetainRVMarker->getString(),
                         /*Constraints=*/"", /*hasSideEffects=*/true);
        CallInst::Create(IA, "", Inst);
      }
    decline_rv_optimization:
      break;
    }
    case IC_InitWeak: {
      // objc_initWeak(p, null) => *p = null
      CallInst *CI = cast<CallInst>(Inst);
      if (isNullOrUndef(CI->getArgOperand(1))) {
        Value *Null =
          ConstantPointerNull::get(cast<PointerType>(CI->getType()));
        Changed = true;
        new StoreInst(Null, CI->getArgOperand(0), CI);

        DEBUG(dbgs() << "OBJCARCContract: Old = " << *CI << "\n"
                     << "                 New = " << *Null << "\n");

        CI->replaceAllUsesWith(Null);
        CI->eraseFromParent();
      }
      continue;
    }
    case IC_Release:
      ContractRelease(Inst, I);
      continue;
    case IC_User:
      // Be conservative if the function has any alloca instructions.
      // Technically we only care about escaping alloca instructions,
      // but this is sufficient to handle some interesting cases.
      if (isa<AllocaInst>(Inst))
        TailOkForStoreStrongs = false;
      continue;
    default:
      continue;
    }

    DEBUG(dbgs() << "ObjCARCContract: Finished List.\n\n");

    // Don't use GetObjCArg because we don't want to look through bitcasts
    // and such; to do the replacement, the argument must have type i8*.
    const Value *Arg = cast<CallInst>(Inst)->getArgOperand(0);
    for (;;) {
      // If we're compiling bugpointed code, don't get in trouble.
      if (!isa<Instruction>(Arg) && !isa<Argument>(Arg))
        break;
      // Look through the uses of the pointer.
      for (Value::const_use_iterator UI = Arg->use_begin(), UE = Arg->use_end();
           UI != UE; ) {
        Use &U = UI.getUse();
        unsigned OperandNo = UI.getOperandNo();
        ++UI; // Increment UI now, because we may unlink its element.

        // If the call's return value dominates a use of the call's argument
        // value, rewrite the use to use the return value. We check for
        // reachability here because an unreachable call is considered to
        // trivially dominate itself, which would lead us to rewriting its
        // argument in terms of its return value, which would lead to
        // infinite loops in GetObjCArg.
        if (DT->isReachableFromEntry(U) && DT->dominates(Inst, U)) {
          Changed = true;
          Instruction *Replacement = Inst;
          Type *UseTy = U.get()->getType();
          if (PHINode *PHI = dyn_cast<PHINode>(U.getUser())) {
            // For PHI nodes, insert the bitcast in the predecessor block.
            unsigned ValNo = PHINode::getIncomingValueNumForOperand(OperandNo);
            BasicBlock *BB = PHI->getIncomingBlock(ValNo);
            if (Replacement->getType() != UseTy)
              Replacement = new BitCastInst(Replacement, UseTy, "",
                                            &BB->back());
            // While we're here, rewrite all edges for this PHI, rather
            // than just one use at a time, to minimize the number of
            // bitcasts we emit.
            for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i)
              if (PHI->getIncomingBlock(i) == BB) {
                // Keep the UI iterator valid.
                if (&PHI->getOperandUse(
                      PHINode::getOperandNumForIncomingValue(i)) ==
                    &UI.getUse())
                  ++UI;
                PHI->setIncomingValue(i, Replacement);
              }
          } else {
            if (Replacement->getType() != UseTy)
              Replacement = new BitCastInst(Replacement, UseTy, "",
                                            cast<Instruction>(U.getUser()));
            U.set(Replacement);
          }
        }
      }

      // If Arg is a no-op casted pointer, strip one level of casts and iterate.
      if (const BitCastInst *BI = dyn_cast<BitCastInst>(Arg))
        Arg = BI->getOperand(0);
      else if (isa<GEPOperator>(Arg) &&
               cast<GEPOperator>(Arg)->hasAllZeroIndices())
        Arg = cast<GEPOperator>(Arg)->getPointerOperand();
      else if (isa<GlobalAlias>(Arg) &&
               !cast<GlobalAlias>(Arg)->mayBeOverridden())
        Arg = cast<GlobalAlias>(Arg)->getAliasee();
      else
        break;
    }
  }

  // If this function has no escaping allocas or suspicious vararg usage,
  // objc_storeStrong calls can be marked with the "tail" keyword.
  if (TailOkForStoreStrongs)
    for (SmallPtrSet<CallInst *, 8>::iterator I = StoreStrongCalls.begin(),
         E = StoreStrongCalls.end(); I != E; ++I)
      (*I)->setTailCall();
  StoreStrongCalls.clear();

  return Changed;
}
Esempio n. 17
0
/// performStoreOnlyObjectElimination - Scan the graph of uses of the specified
/// object allocation.  If the object does not escape and is only stored to
/// (this happens because GVN and other optimizations hoists forward substitutes
/// all stores to the object to eliminate all loads from it), then zap the
/// object and all accesses related to it.
static bool performStoreOnlyObjectElimination(CallInst &Allocation,
                                              BasicBlock::iterator &BBI) {
  DtorKind DtorInfo = analyzeDestructor(Allocation.getArgOperand(0));

  // We can't delete the object if its destructor has side effects.
  if (DtorInfo != DtorKind::NoSideEffects)
    return false;

  // Do a depth first search exploring all of the uses of the object pointer,
  // following through casts, pointer adjustments etc.  If we find any loads or
  // any escape sites of the object, we give up.  If we succeed in walking the
  // entire graph of uses, we can remove the resultant set.
  SmallSetVector<Instruction*, 16> InvolvedInstructions;
  SmallVector<Instruction*, 16> Worklist;
  Worklist.push_back(&Allocation);

  // Stores - Keep track of all of the store instructions we see.
  SmallVector<StoreInst*, 16> Stores;

  while (!Worklist.empty()) {
    Instruction *I = Worklist.pop_back_val();

    // Insert the instruction into our InvolvedInstructions set.  If we have
    // already seen it, then don't reprocess all of the uses.
    if (!InvolvedInstructions.insert(I)) continue;

    // Okay, this is the first time we've seen this instruction, proceed.
    switch (classifyInstruction(*I)) {
    // These instructions should not reach here based on the pass ordering.
    // i.e. LLVMARCOpt -> LLVMContractOpt.
    case RT_RetainN:
    case RT_UnknownRetainN:
    case RT_BridgeRetainN:
    case RT_ReleaseN:
    case RT_UnknownReleaseN:
    case RT_BridgeReleaseN:
      llvm_unreachable("These are only created by LLVMARCContract !");
    case RT_AllocObject:
      // If this is a different swift_allocObject than we started with, then
      // there is some computation feeding into a size or alignment computation
      // that we have to keep... unless we can delete *that* entire object as
      // well.
      break;

    case RT_NoMemoryAccessed:
      // If no memory is accessed, then something is being done with the
      // pointer: maybe it is bitcast or GEP'd. Since there are no side effects,
      // it is perfectly fine to delete this instruction if all uses of the
      // instruction are also eliminable.

      if (I->mayHaveSideEffects() || isa<TerminatorInst>(I))
        return false;
      break;

    case RT_Release:
    case RT_Retain:
    case RT_FixLifetime:
    case RT_CheckUnowned:
      // It is perfectly fine to eliminate various retains and releases of this
      // object: we are zapping all accesses or none.
      break;

    // If this is an unknown instruction, we have more interesting things to
    // consider.
    case RT_Unknown:
    case RT_ObjCRelease:
    case RT_ObjCRetain:
    case RT_UnknownRetain:
    case RT_UnknownRelease:
    case RT_BridgeRetain:
    case RT_BridgeRelease:
    case RT_RetainUnowned:

      // Otherwise, this really is some unhandled instruction.  Bail out.
      return false;
    }

    // Okay, if we got here, the instruction can be eaten so-long as all of its
    // uses can be.  Scan through the uses and add them to the worklist for
    // recursive processing.
    for (auto UI = I->user_begin(), E = I->user_end(); UI != E; ++UI) {
      Instruction *User = cast<Instruction>(*UI);

      // Handle stores as a special case here: we want to make sure that the
      // object is being stored *to*, not itself being stored (which would be an
      // escape point).  Since stores themselves don't have any uses, we can
      // short-cut the classification scheme above.
      if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
        // If this is a store *to* the object, we can zap it.
        if (UI.getUse().getOperandNo() == StoreInst::getPointerOperandIndex()) {
          InvolvedInstructions.insert(SI);
          continue;
        }
        // Otherwise, using the object as a source (or size) is an escape.
        return false;
      }
      if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
        // If this is a memset/memcpy/memmove *to* the object, we can zap it.
        if (UI.getUse().getOperandNo() == 0) {
          InvolvedInstructions.insert(MI);
          continue;
        }
        // Otherwise, using the object as a source (or size) is an escape.
        return false;
      }

      // Otherwise, normal instructions just go on the worklist for processing.
      Worklist.push_back(User);
    }
  }

  // Ok, we succeeded!  This means we can zap all of the instructions that use
  // the object.  One thing we have to be careful of is to make sure that we
  // don't invalidate "BBI" (the iterator the outer walk of the optimization
  // pass is using, and indicates the next instruction to process).  This would
  // happen if we delete the instruction it is pointing to.  Advance the
  // iterator if that would happen.
  while (InvolvedInstructions.count(&*BBI))
    ++BBI;

  // Zap all of the instructions.
  for (auto I : InvolvedInstructions) {
    if (!I->use_empty())
      I->replaceAllUsesWith(UndefValue::get(I->getType()));
    I->eraseFromParent();
  }

  ++NumStoreOnlyObjectsEliminated;
  return true;
}
Esempio n. 18
0
/// LowerUnwinds - Turn unwind instructions into calls to _Unwind_Resume,
/// rethrowing any previously caught exception.  This will crash horribly
/// at runtime if there is no such exception: using unwind to throw a new
/// exception is currently not supported.
bool DwarfEHPrepare::LowerUnwindsAndResumes() {
  SmallVector<Instruction*, 16> ResumeInsts;

  for (Function::iterator fi = F->begin(), fe = F->end(); fi != fe; ++fi) {
    for (BasicBlock::iterator bi = fi->begin(), be = fi->end(); bi != be; ++bi){
      if (isa<UnwindInst>(bi))
        ResumeInsts.push_back(bi);
      else if (CallInst *call = dyn_cast<CallInst>(bi))
        if (Function *fn = dyn_cast<Function>(call->getCalledValue()))
          if (fn->getName() == "llvm.eh.resume")
            ResumeInsts.push_back(bi);
    }
  }

  if (ResumeInsts.empty()) return false;

  // Find the rewind function if we didn't already.
  if (!RewindFunction) {
    LLVMContext &Ctx = ResumeInsts[0]->getContext();
    std::vector<const Type*>
      Params(1, Type::getInt8PtrTy(Ctx));
    FunctionType *FTy = FunctionType::get(Type::getVoidTy(Ctx),
                                          Params, false);
    const char *RewindName = TLI->getLibcallName(RTLIB::UNWIND_RESUME);
    RewindFunction = F->getParent()->getOrInsertFunction(RewindName, FTy);
  }

  bool Changed = false;

  for (SmallVectorImpl<Instruction*>::iterator
         I = ResumeInsts.begin(), E = ResumeInsts.end(); I != E; ++I) {
    Instruction *RI = *I;

    // Replace the resuming instruction with a call to _Unwind_Resume (or the
    // appropriate target equivalent).

    llvm::Value *ExnValue;
    if (isa<UnwindInst>(RI))
      ExnValue = CreateExceptionValueCall(RI->getParent());
    else
      ExnValue = cast<CallInst>(RI)->getArgOperand(0);

    // Create the call...
    CallInst *CI = CallInst::Create(RewindFunction, ExnValue, "", RI);
    CI->setCallingConv(TLI->getLibcallCallingConv(RTLIB::UNWIND_RESUME));

    // ...followed by an UnreachableInst, if it was an unwind.
    // Calls to llvm.eh.resume are typically already followed by this.
    if (isa<UnwindInst>(RI))
      new UnreachableInst(RI->getContext(), RI);

    if (isa<UnwindInst>(RI))
      ++NumUnwindsLowered;
    else
      ++NumResumesLowered;

    // Nuke the resume instruction.
    RI->eraseFromParent();

    Changed = true;
  }

  return Changed;
}
bool ReplaceNopCastsAndByteSwaps::processBasicBlock(BasicBlock& BB)
{
	bool Changed = false;
	
	/**
	 * First pass: replace nopCasts with bitcasts and bswap intrinsics with logic operations
	
	 */
	for ( BasicBlock::iterator it = BB.begin(); it != BB.end(); )
	{
		Instruction * Inst = it++;
		
		if (isNopCast(Inst) )
		{
			assert( isa<CallInst>(Inst) );
			
			CallInst * call = cast<CallInst>(Inst);
			
			if ( TypeSupport::isClientType( call->getType()) )
			{
				llvm::errs() << "Cast of client type: " << *call << "\n";
				continue;
			}
			if ( TypeSupport::isClientType( call->getArgOperand(0)->getType()) )
			{
				llvm::errs() << "Cast of client type: " << *call->getArgOperand(0) << "\n";
				continue;
			}

			ReplaceInstWithInst( call,  BitCastInst::Create( Instruction::CastOps::BitCast, call->getArgOperand(0), call->getType() ) );

			Changed = true;
		}
		else if( IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst) )
		{
			if(II->getIntrinsicID() == Intrinsic::bswap)
			{
				IL->LowerIntrinsicCall(II);
				Changed = true;
			}
			else if(II->getIntrinsicID() == Intrinsic::cheerp_deallocate)
			{
				II->eraseFromParent();
				Changed = true;
			}
		}
	}
	
	/**
	 * Second pass: collapse bitcasts of bitcasts.
	 * 
	 * Note: this might leave some dead instruction around, but we don't care since bitcasts are inlined anyway
	 */
	for ( BasicBlock::iterator it = BB.begin(); it != BB.end(); ++it )
	{
		if ( isa<BitCastInst>(it) ) 
		{
			while ( BitCastInst * src = dyn_cast<BitCastInst>(it->getOperand(0) ) )
			{
				it->setOperand(0, src->getOperand(0) );
				Changed = true;
			}
		}
	}

	return Changed;
}
Esempio n. 20
0
/// processMemCpy - perform simplification of memcpy's.  If we have memcpy A
/// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
/// B to be a memcpy from X to Z (or potentially a memmove, depending on
/// circumstances). This allows later passes to remove the first memcpy
/// altogether.
bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
  MemoryDependenceAnalysis &MD = getAnalysis<MemoryDependenceAnalysis>();

  // The are two possible optimizations we can do for memcpy:
  //   a) memcpy-memcpy xform which exposes redundance for DSE.
  //   b) call-memcpy xform for return slot optimization.
  MemDepResult dep = MD.getDependency(M);
  if (!dep.isClobber())
    return false;
  if (!isa<MemCpyInst>(dep.getInst())) {
    if (CallInst *C = dyn_cast<CallInst>(dep.getInst()))
      return performCallSlotOptzn(M, C);
    return false;
  }
  
  MemCpyInst *MDep = cast<MemCpyInst>(dep.getInst());
  
  // We can only transforms memcpy's where the dest of one is the source of the
  // other
  if (M->getSource() != MDep->getDest())
    return false;
  
  // Second, the length of the memcpy's must be the same, or the preceeding one
  // must be larger than the following one.
  ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
  ConstantInt *C2 = dyn_cast<ConstantInt>(M->getLength());
  if (!C1 || !C2)
    return false;
  
  uint64_t DepSize = C1->getValue().getZExtValue();
  uint64_t CpySize = C2->getValue().getZExtValue();
  
  if (DepSize < CpySize)
    return false;
  
  // Finally, we have to make sure that the dest of the second does not
  // alias the source of the first
  AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
  if (AA.alias(M->getRawDest(), CpySize, MDep->getRawSource(), DepSize) !=
      AliasAnalysis::NoAlias)
    return false;
  else if (AA.alias(M->getRawDest(), CpySize, M->getRawSource(), CpySize) !=
           AliasAnalysis::NoAlias)
    return false;
  else if (AA.alias(MDep->getRawDest(), DepSize, MDep->getRawSource(), DepSize)
           != AliasAnalysis::NoAlias)
    return false;
  
  // If all checks passed, then we can transform these memcpy's
  const Type *ArgTys[3] = { M->getRawDest()->getType(),
                            MDep->getRawSource()->getType(),
                            M->getLength()->getType() };
  Function *MemCpyFun = Intrinsic::getDeclaration(
                                 M->getParent()->getParent()->getParent(),
                                 M->getIntrinsicID(), ArgTys, 3);
    
  Value *Args[5] = {
    M->getRawDest(), MDep->getRawSource(), M->getLength(),
    M->getAlignmentCst(), M->getVolatileCst()
  };
  
  CallInst *C = CallInst::Create(MemCpyFun, Args, Args+5, "", M);
  
  
  // If C and M don't interfere, then this is a valid transformation.  If they
  // did, this would mean that the two sources overlap, which would be bad.
  if (MD.getDependency(C) == dep) {
    MD.removeInstruction(M);
    M->eraseFromParent();
    ++NumMemCpyInstr;
    return true;
  }
  
  // Otherwise, there was no point in doing this, so we remove the call we
  // inserted and act like nothing happened.
  MD.removeInstruction(C);
  C->eraseFromParent();
  return false;
}
Esempio n. 21
0
/// run - Start execution with the specified function and arguments.
///
GenericValue JIT::runFunction(Function *F,
                              const std::vector<GenericValue> &ArgValues) {
  assert(F && "Function *F was null at entry to run()");

  void *FPtr = getPointerToFunction(F);
  assert(FPtr && "Pointer to fn's code was null after getPointerToFunction");
  FunctionType *FTy = F->getFunctionType();
  Type *RetTy = FTy->getReturnType();

  assert((FTy->getNumParams() == ArgValues.size() ||
          (FTy->isVarArg() && FTy->getNumParams() <= ArgValues.size())) &&
         "Wrong number of arguments passed into function!");
  assert(FTy->getNumParams() == ArgValues.size() &&
         "This doesn't support passing arguments through varargs (yet)!");

  // Handle some common cases first.  These cases correspond to common `main'
  // prototypes.
  if (RetTy->isIntegerTy(32) || RetTy->isVoidTy()) {
    switch (ArgValues.size()) {
    case 3:
      if (FTy->getParamType(0)->isIntegerTy(32) &&
          FTy->getParamType(1)->isPointerTy() &&
          FTy->getParamType(2)->isPointerTy()) {
        int (*PF)(int, char **, const char **) =
          (int(*)(int, char **, const char **))(intptr_t)FPtr;

        // Call the function.
        GenericValue rv;
        rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue(),
                                 (char **)GVTOP(ArgValues[1]),
                                 (const char **)GVTOP(ArgValues[2])));
        return rv;
      }
      break;
    case 2:
      if (FTy->getParamType(0)->isIntegerTy(32) &&
          FTy->getParamType(1)->isPointerTy()) {
        int (*PF)(int, char **) = (int(*)(int, char **))(intptr_t)FPtr;

        // Call the function.
        GenericValue rv;
        rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue(),
                                 (char **)GVTOP(ArgValues[1])));
        return rv;
      }
      break;
    case 1:
      if (FTy->getParamType(0)->isIntegerTy(32)) {
        GenericValue rv;
        int (*PF)(int) = (int(*)(int))(intptr_t)FPtr;
        rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue()));
        return rv;
      }
      if (FTy->getParamType(0)->isPointerTy()) {
        GenericValue rv;
        int (*PF)(char *) = (int(*)(char *))(intptr_t)FPtr;
        rv.IntVal = APInt(32, PF((char*)GVTOP(ArgValues[0])));
        return rv;
      }
      break;
    }
  }

  // Handle cases where no arguments are passed first.
  if (ArgValues.empty()) {
    GenericValue rv;
    switch (RetTy->getTypeID()) {
    default: llvm_unreachable("Unknown return type for function call!");
    case Type::IntegerTyID: {
      unsigned BitWidth = cast<IntegerType>(RetTy)->getBitWidth();
      if (BitWidth == 1)
        rv.IntVal = APInt(BitWidth, ((bool(*)())(intptr_t)FPtr)());
      else if (BitWidth <= 8)
        rv.IntVal = APInt(BitWidth, ((char(*)())(intptr_t)FPtr)());
      else if (BitWidth <= 16)
        rv.IntVal = APInt(BitWidth, ((short(*)())(intptr_t)FPtr)());
      else if (BitWidth <= 32)
        rv.IntVal = APInt(BitWidth, ((int(*)())(intptr_t)FPtr)());
      else if (BitWidth <= 64)
        rv.IntVal = APInt(BitWidth, ((int64_t(*)())(intptr_t)FPtr)());
      else
        llvm_unreachable("Integer types > 64 bits not supported");
      return rv;
    }
    case Type::VoidTyID:
      rv.IntVal = APInt(32, ((int(*)())(intptr_t)FPtr)());
      return rv;
    case Type::FloatTyID:
      rv.FloatVal = ((float(*)())(intptr_t)FPtr)();
      return rv;
    case Type::DoubleTyID:
      rv.DoubleVal = ((double(*)())(intptr_t)FPtr)();
      return rv;
    case Type::X86_FP80TyID:
    case Type::FP128TyID:
    case Type::PPC_FP128TyID:
      llvm_unreachable("long double not supported yet");
    case Type::PointerTyID:
      return PTOGV(((void*(*)())(intptr_t)FPtr)());
    }
  }

  // Okay, this is not one of our quick and easy cases.  Because we don't have a
  // full FFI, we have to codegen a nullary stub function that just calls the
  // function we are interested in, passing in constants for all of the
  // arguments.  Make this function and return.

  // First, create the function.
  FunctionType *STy=FunctionType::get(RetTy, false);
  Function *Stub = Function::Create(STy, Function::InternalLinkage, "",
                                    F->getParent());

  // Insert a basic block.
  BasicBlock *StubBB = BasicBlock::Create(F->getContext(), "", Stub);

  // Convert all of the GenericValue arguments over to constants.  Note that we
  // currently don't support varargs.
  SmallVector<Value*, 8> Args;
  for (unsigned i = 0, e = ArgValues.size(); i != e; ++i) {
    Constant *C = 0;
    Type *ArgTy = FTy->getParamType(i);
    const GenericValue &AV = ArgValues[i];
    switch (ArgTy->getTypeID()) {
    default: llvm_unreachable("Unknown argument type for function call!");
    case Type::IntegerTyID:
        C = ConstantInt::get(F->getContext(), AV.IntVal);
        break;
    case Type::FloatTyID:
        C = ConstantFP::get(F->getContext(), APFloat(AV.FloatVal));
        break;
    case Type::DoubleTyID:
        C = ConstantFP::get(F->getContext(), APFloat(AV.DoubleVal));
        break;
    case Type::PPC_FP128TyID:
    case Type::X86_FP80TyID:
    case Type::FP128TyID:
        C = ConstantFP::get(F->getContext(), APFloat(AV.IntVal));
        break;
    case Type::PointerTyID:
      void *ArgPtr = GVTOP(AV);
      if (sizeof(void*) == 4)
        C = ConstantInt::get(Type::getInt32Ty(F->getContext()),
                             (int)(intptr_t)ArgPtr);
      else
        C = ConstantInt::get(Type::getInt64Ty(F->getContext()),
                             (intptr_t)ArgPtr);
      // Cast the integer to pointer
      C = ConstantExpr::getIntToPtr(C, ArgTy);
      break;
    }
    Args.push_back(C);
  }

  CallInst *TheCall = CallInst::Create(F, Args, "", StubBB);
  TheCall->setCallingConv(F->getCallingConv());
  TheCall->setTailCall();
  if (!TheCall->getType()->isVoidTy())
    // Return result of the call.
    ReturnInst::Create(F->getContext(), TheCall, StubBB);
  else
    ReturnInst::Create(F->getContext(), StubBB);           // Just return void.

  // Finally, call our nullary stub function.
  GenericValue Result = runFunction(Stub, std::vector<GenericValue>());
  // Erase it, since no other function can have a reference to it.
  Stub->eraseFromParent();
  // And return the result.
  return Result;
}
Esempio n. 22
0
void OptimizeFastMemoryChecks::visitCallInst(CallInst &CI) {
  CheckInfoType *Info = MSCI->getCheckInfo(CI.getCalledFunction());
  if (Info && Info->isFastMemoryCheck())
    FastCheckCalls.push_back(&CI);
}
Esempio n. 23
0
/// run - Start execution with the specified function and arguments.
///
GenericValue JIT::runFunction(Function *F,
                              const std::vector<GenericValue> &ArgValues) {
  assert(F && "Function *F was null at entry to run()");

  void *FPtr = getPointerToFunction(F);
  assert(FPtr && "Pointer to fn's code was null after getPointerToFunction");
  const FunctionType *FTy = F->getFunctionType();
  const Type *RetTy = FTy->getReturnType();

  assert((FTy->getNumParams() <= ArgValues.size() || FTy->isVarArg()) &&
         "Too many arguments passed into function!");
  assert(FTy->getNumParams() == ArgValues.size() &&
         "This doesn't support passing arguments through varargs (yet)!");

  //// Brooks
  //// X86 Opcode 0x0f39 called here
  cout<<"****** SWITCHING TO SIMULATION MODE FROM NATIVE MODE ***** \n";
  ptlcall_switch_to_sim();
  cout<<"____ SIWTCH DONE ____ \n";

  int * a=(int*)0x85df65d;
  cout<<"Value: "<<std::hex<<*(a)<<" "<<*(a+1)<<"\n";
  asm(".byte 0x0f; .byte 0x39;");

  // Handle some common cases first.  These cases correspond to common `main'
  // prototypes.
  if (RetTy == Type::Int32Ty || RetTy == Type::VoidTy) {
    switch (ArgValues.size()) {
    case 3:
      if (FTy->getParamType(0) == Type::Int32Ty &&
          isa<PointerType>(FTy->getParamType(1)) &&
          isa<PointerType>(FTy->getParamType(2))) {
        int (*PF)(int, char **, const char **) =
          (int(*)(int, char **, const char **))(intptr_t)FPtr;

        // Call the function.
        GenericValue rv;
        rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue(), 
                                 (char **)GVTOP(ArgValues[1]),
                                 (const char **)GVTOP(ArgValues[2])));
        return rv;
      }
      break;
    case 2:
      if (FTy->getParamType(0) == Type::Int32Ty &&
          isa<PointerType>(FTy->getParamType(1))) {
        int (*PF)(int, char **) = (int(*)(int, char **))(intptr_t)FPtr;

        // Call the function.
        GenericValue rv;
        rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue(), 
                                 (char **)GVTOP(ArgValues[1])));
        return rv;
      }
      break;
    case 1:
      if (FTy->getNumParams() == 1 &&
          FTy->getParamType(0) == Type::Int32Ty) {
        GenericValue rv;
        int (*PF)(int) = (int(*)(int))(intptr_t)FPtr;
        rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue()));
        return rv;
      }
      break;
    }
  }

  // Handle cases where no arguments are passed first.
  if (ArgValues.empty()) {
    GenericValue rv;
    switch (RetTy->getTypeID()) {
    default: assert(0 && "Unknown return type for function call!");
    case Type::IntegerTyID: {
      unsigned BitWidth = cast<IntegerType>(RetTy)->getBitWidth();
      if (BitWidth == 1)
        rv.IntVal = APInt(BitWidth, ((bool(*)())(intptr_t)FPtr)());
      else if (BitWidth <= 8)
        rv.IntVal = APInt(BitWidth, ((char(*)())(intptr_t)FPtr)());
      else if (BitWidth <= 16)
        rv.IntVal = APInt(BitWidth, ((short(*)())(intptr_t)FPtr)());
      else if (BitWidth <= 32)
        rv.IntVal = APInt(BitWidth, ((int(*)())(intptr_t)FPtr)());
      else if (BitWidth <= 64)
        rv.IntVal = APInt(BitWidth, ((int64_t(*)())(intptr_t)FPtr)());
      else 
        assert(0 && "Integer types > 64 bits not supported");
      return rv;
    }
    case Type::VoidTyID:
      rv.IntVal = APInt(32, ((int(*)())(intptr_t)FPtr)());
      return rv;
    case Type::FloatTyID:
      rv.FloatVal = ((float(*)())(intptr_t)FPtr)();
      return rv;
    case Type::DoubleTyID:
      rv.DoubleVal = ((double(*)())(intptr_t)FPtr)();
      return rv;
    case Type::X86_FP80TyID:
    case Type::FP128TyID:
    case Type::PPC_FP128TyID:
      assert(0 && "long double not supported yet");
      return rv;
    case Type::PointerTyID:
      return PTOGV(((void*(*)())(intptr_t)FPtr)());
    }
  }

  // Okay, this is not one of our quick and easy cases.  Because we don't have a
  // full FFI, we have to codegen a nullary stub function that just calls the
  // function we are interested in, passing in constants for all of the
  // arguments.  Make this function and return.

  // First, create the function.
  FunctionType *STy=FunctionType::get(RetTy, std::vector<const Type*>(), false);
  Function *Stub = Function::Create(STy, Function::InternalLinkage, "",
                                    F->getParent());

  // Insert a basic block.
  BasicBlock *StubBB = BasicBlock::Create("", Stub);

  // Convert all of the GenericValue arguments over to constants.  Note that we
  // currently don't support varargs.
  SmallVector<Value*, 8> Args;
  for (unsigned i = 0, e = ArgValues.size(); i != e; ++i) {
    Constant *C = 0;
    const Type *ArgTy = FTy->getParamType(i);
    const GenericValue &AV = ArgValues[i];
    switch (ArgTy->getTypeID()) {
    default: assert(0 && "Unknown argument type for function call!");
    case Type::IntegerTyID:
        C = ConstantInt::get(AV.IntVal);
        break;
    case Type::FloatTyID:
        C = ConstantFP::get(APFloat(AV.FloatVal));
        break;
    case Type::DoubleTyID:
        C = ConstantFP::get(APFloat(AV.DoubleVal));
        break;
    case Type::PPC_FP128TyID:
    case Type::X86_FP80TyID:
    case Type::FP128TyID:
        C = ConstantFP::get(APFloat(AV.IntVal));
        break;
    case Type::PointerTyID:
      void *ArgPtr = GVTOP(AV);
      if (sizeof(void*) == 4)
        C = ConstantInt::get(Type::Int32Ty, (int)(intptr_t)ArgPtr);
      else
        C = ConstantInt::get(Type::Int64Ty, (intptr_t)ArgPtr);
      C = ConstantExpr::getIntToPtr(C, ArgTy);  // Cast the integer to pointer
      break;
    }
    Args.push_back(C);
  }

  CallInst *TheCall = CallInst::Create(F, Args.begin(), Args.end(), "", StubBB);
  TheCall->setTailCall();
  if (TheCall->getType() != Type::VoidTy)
    ReturnInst::Create(TheCall, StubBB);             // Return result of the call.
  else
    ReturnInst::Create(StubBB);                      // Just return void.

  // Finally, return the value returned by our nullary stub function.
  return runFunction(Stub, std::vector<GenericValue>());
}
Esempio n. 24
0
bool IRTranslator::translateKnownIntrinsic(const CallInst &CI,
                                           Intrinsic::ID ID) {
  unsigned Op = 0;
  switch (ID) {
  default: return false;
  case Intrinsic::uadd_with_overflow: Op = TargetOpcode::G_UADDE; break;
  case Intrinsic::sadd_with_overflow: Op = TargetOpcode::G_SADDO; break;
  case Intrinsic::usub_with_overflow: Op = TargetOpcode::G_USUBE; break;
  case Intrinsic::ssub_with_overflow: Op = TargetOpcode::G_SSUBO; break;
  case Intrinsic::umul_with_overflow: Op = TargetOpcode::G_UMULO; break;
  case Intrinsic::smul_with_overflow: Op = TargetOpcode::G_SMULO; break;
  case Intrinsic::memcpy:
    return translateMemcpy(CI);
  case Intrinsic::eh_typeid_for: {
    GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
    unsigned Reg = getOrCreateVReg(CI);
    unsigned TypeID = MIRBuilder.getMF().getMMI().getTypeIDFor(GV);
    MIRBuilder.buildConstant(Reg, TypeID);
    return true;
  }
  case Intrinsic::objectsize: {
    // If we don't know by now, we're never going to know.
    const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));

    MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
    return true;
  }
  case Intrinsic::stackguard:
    getStackGuard(getOrCreateVReg(CI));
    return true;
  case Intrinsic::stackprotector: {
    MachineFunction &MF = MIRBuilder.getMF();
    LLT PtrTy{*CI.getArgOperand(0)->getType(), *DL};
    unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy);
    getStackGuard(GuardVal);

    AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
    MIRBuilder.buildStore(
        GuardVal, getOrCreateVReg(*Slot),
        *MF.getMachineMemOperand(
            MachinePointerInfo::getFixedStack(MF, getOrCreateFrameIndex(*Slot)),
            MachineMemOperand::MOStore | MachineMemOperand::MOVolatile,
            PtrTy.getSizeInBits() / 8, 8));
    return true;
  }
  }

  LLT Ty{*CI.getOperand(0)->getType(), *DL};
  LLT s1 = LLT::scalar(1);
  unsigned Width = Ty.getSizeInBits();
  unsigned Res = MRI->createGenericVirtualRegister(Ty);
  unsigned Overflow = MRI->createGenericVirtualRegister(s1);
  auto MIB = MIRBuilder.buildInstr(Op)
                 .addDef(Res)
                 .addDef(Overflow)
                 .addUse(getOrCreateVReg(*CI.getOperand(0)))
                 .addUse(getOrCreateVReg(*CI.getOperand(1)));

  if (Op == TargetOpcode::G_UADDE || Op == TargetOpcode::G_USUBE) {
    unsigned Zero = MRI->createGenericVirtualRegister(s1);
    EntryBuilder.buildConstant(Zero, 0);
    MIB.addUse(Zero);
  }

  MIRBuilder.buildSequence(getOrCreateVReg(CI), Res, 0, Overflow, Width);
  return true;
}
Esempio n. 25
0
/// setupEntryBlockAndCallSites - Setup the entry block by creating and filling
/// the function context and marking the call sites with the appropriate
/// values. These values are used by the DWARF EH emitter.
bool SjLjEHPrepare::setupEntryBlockAndCallSites(Function &F) {
  SmallVector<ReturnInst *, 16> Returns;
  SmallVector<InvokeInst *, 16> Invokes;
  SmallSetVector<LandingPadInst *, 16> LPads;

  // Look through the terminators of the basic blocks to find invokes.
  for (BasicBlock &BB : F)
    if (auto *II = dyn_cast<InvokeInst>(BB.getTerminator())) {
      if (Function *Callee = II->getCalledFunction())
        if (Callee->getIntrinsicID() == Intrinsic::donothing) {
          // Remove the NOP invoke.
          BranchInst::Create(II->getNormalDest(), II);
          II->eraseFromParent();
          continue;
        }

      Invokes.push_back(II);
      LPads.insert(II->getUnwindDest()->getLandingPadInst());
    } else if (auto *RI = dyn_cast<ReturnInst>(BB.getTerminator())) {
      Returns.push_back(RI);
    }

  if (Invokes.empty())
    return false;

  NumInvokes += Invokes.size();

  lowerIncomingArguments(F);
  lowerAcrossUnwindEdges(F, Invokes);

  Value *FuncCtx =
      setupFunctionContext(F, makeArrayRef(LPads.begin(), LPads.end()));
  BasicBlock *EntryBB = &F.front();
  IRBuilder<> Builder(EntryBB->getTerminator());

  // Get a reference to the jump buffer.
  Value *JBufPtr =
      Builder.CreateConstGEP2_32(FunctionContextTy, FuncCtx, 0, 5, "jbuf_gep");

  // Save the frame pointer.
  Value *FramePtr = Builder.CreateConstGEP2_32(doubleUnderJBufTy, JBufPtr, 0, 0,
                                               "jbuf_fp_gep");

  Value *Val = Builder.CreateCall(FrameAddrFn, Builder.getInt32(0), "fp");
  Builder.CreateStore(Val, FramePtr, /*isVolatile=*/true);

  // Save the stack pointer.
  Value *StackPtr = Builder.CreateConstGEP2_32(doubleUnderJBufTy, JBufPtr, 0, 2,
                                               "jbuf_sp_gep");

  Val = Builder.CreateCall(StackAddrFn, {}, "sp");
  Builder.CreateStore(Val, StackPtr, /*isVolatile=*/true);

  // Call the setup_dispatch instrinsic. It fills in the rest of the jmpbuf.
  Builder.CreateCall(BuiltinSetupDispatchFn, {});

  // Store a pointer to the function context so that the back-end will know
  // where to look for it.
  Value *FuncCtxArg = Builder.CreateBitCast(FuncCtx, Builder.getInt8PtrTy());
  Builder.CreateCall(FuncCtxFn, FuncCtxArg);

  // At this point, we are all set up, update the invoke instructions to mark
  // their call_site values.
  for (unsigned I = 0, E = Invokes.size(); I != E; ++I) {
    insertCallSiteStore(Invokes[I], I + 1);

    ConstantInt *CallSiteNum =
        ConstantInt::get(Type::getInt32Ty(F.getContext()), I + 1);

    // Record the call site value for the back end so it stays associated with
    // the invoke.
    CallInst::Create(CallSiteFn, CallSiteNum, "", Invokes[I]);
  }

  // Mark call instructions that aren't nounwind as no-action (call_site ==
  // -1). Skip the entry block, as prior to then, no function context has been
  // created for this function and any unexpected exceptions thrown will go
  // directly to the caller's context, which is what we want anyway, so no need
  // to do anything here.
  for (BasicBlock &BB : F) {
    if (&BB == &F.front())
      continue;
    for (Instruction &I : BB)
      if (I.mayThrow())
        insertCallSiteStore(&I, -1);
  }

  // Register the function context and make sure it's known to not throw
  CallInst *Register =
      CallInst::Create(RegisterFn, FuncCtx, "", EntryBB->getTerminator());
  Register->setDoesNotThrow();

  // Following any allocas not in the entry block, update the saved SP in the
  // jmpbuf to the new value.
  for (BasicBlock &BB : F) {
    if (&BB == &F.front())
      continue;
    for (Instruction &I : BB) {
      if (auto *CI = dyn_cast<CallInst>(&I)) {
        if (CI->getCalledFunction() != StackRestoreFn)
          continue;
      } else if (!isa<AllocaInst>(&I)) {
        continue;
      }
      Instruction *StackAddr = CallInst::Create(StackAddrFn, "sp");
      StackAddr->insertAfter(&I);
      Instruction *StoreStackAddr = new StoreInst(StackAddr, StackPtr, true);
      StoreStackAddr->insertAfter(StackAddr);
    }
  }

  // Finally, for any returns from this function, if this function contains an
  // invoke, add a call to unregister the function context.
  for (ReturnInst *Return : Returns)
    CallInst::Create(UnregisterFn, FuncCtx, "", Return);

  return true;
}
//
// Function: insertPoolFrees()
//
// Description:
//  This function takes a list of alloca instructions and inserts code to
//  unregister them at every unwind and return instruction.
//
// Inputs:
//  PoolRegisters - The list of calls to poolregister() inserted for stack
//                  objects.
//  ExitPoints    - The list of instructions that can cause the function to
//                  return.
//  Context       - The LLVM Context in which to insert instructions.
//
void
RegisterStackObjPass::insertPoolFrees
  (const std::vector<CallInst *> & PoolRegisters,
   const std::vector<Instruction *> & ExitPoints,
   LLVMContext * Context) {
  // List of alloca instructions we create to store the pointers to be
  // deregistered.
  std::vector<AllocaInst *> PtrList;

  // List of pool handles; this is a parallel array to PtrList
  std::vector<Value *> PHList;

  // The infamous void pointer type
  PointerType * VoidPtrTy = getVoidPtrType(*Context);

  //
  // Create alloca instructions for every registered alloca.  These will hold
  // a pointer to the registered stack objects and will be referenced by
  // poolunregister().
  //
  for (unsigned index = 0; index < PoolRegisters.size(); ++index) {
    //
    // Take the first element off of the worklist.
    //
    CallInst * CI = PoolRegisters[index];
    CallSite CS(CI);

    //
    // Get the pool handle and allocated pointer from the poolregister() call.
    //
    Value * PH  = CS.getArgument(0);
    Value * Ptr = CS.getArgument(1);

    //
    // Create a place to store the pointer returned from alloca.  Initialize it
    // with a null pointer.
    //
    BasicBlock & EntryBB = CI->getParent()->getParent()->getEntryBlock();
    Instruction * InsertPt = &(EntryBB.front());
    AllocaInst * PtrLoc = new AllocaInst (VoidPtrTy,
                                          Ptr->getName() + ".st",
                                          InsertPt);
    Value * NullPointer = ConstantPointerNull::get(VoidPtrTy);
    new StoreInst (NullPointer, PtrLoc, InsertPt);

    //
    // Store the registered pointer into the memory we allocated in the entry
    // block.
    //
    new StoreInst (Ptr, PtrLoc, CI);

    //
    // Record the alloca that stores the pointer to deregister.
    // Record the pool handle with it.
    //
    PtrList.push_back (PtrLoc);
    PHList.push_back (PH);
  }

  //
  // For each point where the function can exit, insert code to deregister all
  // stack objects.
  //
  for (unsigned index = 0; index < ExitPoints.size(); ++index) {
    //
    // Take the first element off of the worklist.
    //
    Instruction * Return = ExitPoints[index];

    //
    // Deregister each registered stack object.
    //
    for (unsigned i = 0; i < PtrList.size(); ++i) {
      //
      // Get the location holding the pointer and the pool handle associated
      // with it.
      //
      AllocaInst * PtrLoc = PtrList[i];
      Value * PH = PHList[i];

      //
      // Generate a load instruction to get the registered pointer.
      //
      LoadInst * Ptr = new LoadInst (PtrLoc, "", Return);

      //
      // Create the call to poolunregister().
      //
      std::vector<Value *> args;
      args.push_back (PH);
      args.push_back (Ptr);
      CallInst::Create (StackFree, args, "", Return);
    }
  }

  //
  // Lastly, promote the allocas we created into LLVM virtual registers.
  //
  PromoteMemToReg(PtrList, *DT);
}
Esempio n. 27
0
/// DupRetToEnableTailCallOpts - Look for opportunities to duplicate return
/// instructions to the predecessor to enable tail call optimizations. The
/// case it is currently looking for is:
/// bb0:
///   %tmp0 = tail call i32 @f0()
///   br label %return
/// bb1:
///   %tmp1 = tail call i32 @f1()
///   br label %return
/// bb2:
///   %tmp2 = tail call i32 @f2()
///   br label %return
/// return:
///   %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
///   ret i32 %retval
///
/// =>
///
/// bb0:
///   %tmp0 = tail call i32 @f0()
///   ret i32 %tmp0
/// bb1:
///   %tmp1 = tail call i32 @f1()
///   ret i32 %tmp1
/// bb2:
///   %tmp2 = tail call i32 @f2()
///   ret i32 %tmp2
///
bool CodeGenPrepare::DupRetToEnableTailCallOpts(ReturnInst *RI) {
  if (!TLI)
    return false;

  Value *V = RI->getReturnValue();
  PHINode *PN = V ? dyn_cast<PHINode>(V) : NULL;
  if (V && !PN)
    return false;

  BasicBlock *BB = RI->getParent();
  if (PN && PN->getParent() != BB)
    return false;

  // It's not safe to eliminate the sign / zero extension of the return value.
  // See llvm::isInTailCallPosition().
  const Function *F = BB->getParent();
  Attributes CallerRetAttr = F->getAttributes().getRetAttributes();
  if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt))
    return false;

  // Make sure there are no instructions between the PHI and return, or that the
  // return is the first instruction in the block.
  if (PN) {
    BasicBlock::iterator BI = BB->begin();
    do { ++BI; } while (isa<DbgInfoIntrinsic>(BI));
    if (&*BI != RI)
      return false;
  } else {
    BasicBlock::iterator BI = BB->begin();
    while (isa<DbgInfoIntrinsic>(BI)) ++BI;
    if (&*BI != RI)
      return false;
  }

  /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail
  /// call.
  SmallVector<CallInst*, 4> TailCalls;
  if (PN) {
    for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) {
      CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I));
      // Make sure the phi value is indeed produced by the tail call.
      if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) &&
          TLI->mayBeEmittedAsTailCall(CI))
        TailCalls.push_back(CI);
    }
  } else {
    SmallPtrSet<BasicBlock*, 4> VisitedBBs;
    for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) {
      if (!VisitedBBs.insert(*PI))
        continue;

      BasicBlock::InstListType &InstList = (*PI)->getInstList();
      BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin();
      BasicBlock::InstListType::reverse_iterator RE = InstList.rend();
      do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI));
      if (RI == RE)
        continue;

      CallInst *CI = dyn_cast<CallInst>(&*RI);
      if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI))
        TailCalls.push_back(CI);
    }
  }

  bool Changed = false;
  for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) {
    CallInst *CI = TailCalls[i];
    CallSite CS(CI);

    // Conservatively require the attributes of the call to match those of the
    // return. Ignore noalias because it doesn't affect the call sequence.
    Attributes CalleeRetAttr = CS.getAttributes().getRetAttributes();
    if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias)
      continue;

    // Make sure the call instruction is followed by an unconditional branch to
    // the return block.
    BasicBlock *CallBB = CI->getParent();
    BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator());
    if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB)
      continue;

    // Duplicate the return into CallBB.
    (void)FoldReturnIntoUncondBranch(RI, BB, CallBB);
    ModifiedDT = Changed = true;
    ++NumRetsDup;
  }

  // If we eliminated all predecessors of the block, delete the block now.
  if (Changed && pred_begin(BB) == pred_end(BB))
    BB->eraseFromParent();

  return Changed;
}
Esempio n. 28
0
/// performLocalReleaseMotion - Scan backwards from the specified release,
/// moving it earlier in the function if possible, over instructions that do not
/// access the released object.  If we get to a retain or allocation of the
/// object, zap both.
static bool performLocalReleaseMotion(CallInst &Release, BasicBlock &BB,
                                      SwiftRCIdentity *RC) {
  // FIXME: Call classifier should identify the object for us.  Too bad C++
  // doesn't have nice Swift-style enums.
  Value *ReleasedObject = RC->getSwiftRCIdentityRoot(Release.getArgOperand(0));

  BasicBlock::iterator BBI = Release.getIterator();

  // Scan until we get to the top of the block.
  while (BBI != BB.begin()) {
    --BBI;

    // Don't analyze PHI nodes.  We can't move retains before them and they
    // aren't "interesting".
    if (isa<PHINode>(BBI) ||
        // If we found the instruction that defines the value we're releasing,
        // don't push the release past it.
        &*BBI == Release.getArgOperand(0)) {
      ++BBI;
      goto OutOfLoop;
    }

    switch (classifyInstruction(*BBI)) {
    // These instructions should not reach here based on the pass ordering.
    // i.e. LLVMARCOpt -> LLVMContractOpt.
    case RT_UnknownRetainN:
    case RT_BridgeRetainN:
    case RT_RetainN:
    case RT_UnknownReleaseN:
    case RT_BridgeReleaseN:
    case RT_ReleaseN:
        llvm_unreachable("These are only created by LLVMARCContract !");
    case RT_NoMemoryAccessed:
      // Skip over random instructions that don't touch memory.  They don't need
      // protection by retain/release.
      continue;

    case RT_UnknownRelease:
    case RT_BridgeRelease:
    case RT_ObjCRelease:
    case RT_Release: {
      // If we get to a release, we can generally ignore it and scan past it.
      // However, if we get to a release of obviously the same object, we stop
      // scanning here because it should have already be moved as early as
      // possible, so there is no reason to move its friend to the same place.
      //
      // NOTE: If this occurs frequently, maybe we can have a release(Obj, N)
      // API to drop multiple retain counts at once.
      CallInst &ThisRelease = cast<CallInst>(*BBI);
      Value *ThisReleasedObject = ThisRelease.getArgOperand(0);
      ThisReleasedObject = RC->getSwiftRCIdentityRoot(ThisReleasedObject);
      if (ThisReleasedObject == ReleasedObject) {
        //Release.dump(); ThisRelease.dump(); BB.getParent()->dump();
        ++BBI;
        goto OutOfLoop;
      }
      continue;
    }

    case RT_UnknownRetain:
    case RT_BridgeRetain:
    case RT_ObjCRetain:
    case RT_Retain: {  // swift_retain(obj)
      CallInst &Retain = cast<CallInst>(*BBI);
      Value *RetainedObject = Retain.getArgOperand(0);
      RetainedObject = RC->getSwiftRCIdentityRoot(RetainedObject);

      // Since we canonicalized earlier, we know that if our retain has any
      // uses, they were replaced already. This assertion documents this
      // assumption.
      assert(Retain.use_empty() && "Retain should have been canonicalized to "
             "have no uses.");

      // If the retain and release are to obviously pointer-equal objects, then
      // we can delete both of them.  We have proven that they do not protect
      // anything of value.
      if (RetainedObject == ReleasedObject) {
        Retain.eraseFromParent();
        Release.eraseFromParent();
        ++NumRetainReleasePairs;
        return true;
      }

      // Otherwise, this is a retain of an object that is not statically known
      // to be the same object.  It may still be dynamically the same object
      // though.  In this case, we can't move the release past it.
      // TODO: Strengthen analysis.
      //Release.dump(); ThisRelease.dump(); BB.getParent()->dump();
     ++BBI;
      goto OutOfLoop;
    }

    case RT_AllocObject: {   // %obj = swift_alloc(...)
      CallInst &Allocation = cast<CallInst>(*BBI);

      // If this is an allocation of an unrelated object, just ignore it.
      // TODO: This is not safe without proving the object being released is not
      // related to the allocated object.  Consider something silly like this:
      //   A = allocate()
      //   B = bitcast A to object
      //   release(B)
      if (ReleasedObject != &Allocation) {
        // Release.dump(); BB.getParent()->dump();
        ++BBI;
        goto OutOfLoop;
      }

      // If this is a release right after an allocation of the object, then we
      // can zap both.
      Allocation.replaceAllUsesWith(UndefValue::get(Allocation.getType()));
      Allocation.eraseFromParent();
      Release.eraseFromParent();
      ++NumAllocateReleasePairs;
      return true;
    }

    case RT_FixLifetime:
    case RT_RetainUnowned:
    case RT_CheckUnowned:
    case RT_Unknown:
      // Otherwise, we have reached something that we do not understand. Do not
      // attempt to shorten the lifetime of this object beyond this point so we
      // are conservative.
      ++BBI;
      goto OutOfLoop;
    }
  }
OutOfLoop:


  // If we got to the top of the block, (and if the instruction didn't start
  // there) move the release to the top of the block.
  // TODO: This is where we'd plug in some global algorithms someday.
  if (&*BBI != &Release) {
    Release.moveBefore(&*BBI);
    return true;
  }

  return false;
}
Esempio n. 29
0
/// \brief Recursively handle the condition leading to a loop
Value *SIAnnotateControlFlow::handleLoopCondition(Value *Cond, PHINode *Broken,
                                             llvm::Loop *L, BranchInst *Term) {

  // Only search through PHI nodes which are inside the loop.  If we try this
  // with PHI nodes that are outside of the loop, we end up inserting new PHI
  // nodes outside of the loop which depend on values defined inside the loop.
  // This will break the module with
  // 'Instruction does not dominate all users!' errors.
  PHINode *Phi = nullptr;
  if ((Phi = dyn_cast<PHINode>(Cond)) && L->contains(Phi)) {

    BasicBlock *Parent = Phi->getParent();
    PHINode *NewPhi = PHINode::Create(Int64, 0, "", &Parent->front());
    Value *Ret = NewPhi;

    // Handle all non-constant incoming values first
    for (unsigned i = 0, e = Phi->getNumIncomingValues(); i != e; ++i) {
      Value *Incoming = Phi->getIncomingValue(i);
      BasicBlock *From = Phi->getIncomingBlock(i);
      if (isa<ConstantInt>(Incoming)) {
        NewPhi->addIncoming(Broken, From);
        continue;
      }

      Phi->setIncomingValue(i, BoolFalse);
      Value *PhiArg = handleLoopCondition(Incoming, Broken, L, Term);
      NewPhi->addIncoming(PhiArg, From);
    }

    BasicBlock *IDom = DT->getNode(Parent)->getIDom()->getBlock();

    for (unsigned i = 0, e = Phi->getNumIncomingValues(); i != e; ++i) {

      Value *Incoming = Phi->getIncomingValue(i);
      if (Incoming != BoolTrue)
        continue;

      BasicBlock *From = Phi->getIncomingBlock(i);
      if (From == IDom) {
        // We're in the following situation:
        //   IDom/From
        //      |   \
        //      |   If-block
        //      |   /
        //     Parent
        // where we want to break out of the loop if the If-block is not taken.
        // Due to the depth-first traversal, there should be an end.cf
        // intrinsic in Parent, and we insert an else.break before it.
        //
        // Note that the end.cf need not be the first non-phi instruction
        // of parent, particularly when we're dealing with a multi-level
        // break, but it should occur within a group of intrinsic calls
        // at the beginning of the block.
        CallInst *OldEnd = dyn_cast<CallInst>(Parent->getFirstInsertionPt());
        while (OldEnd && OldEnd->getCalledFunction() != EndCf)
          OldEnd = dyn_cast<CallInst>(OldEnd->getNextNode());
        if (OldEnd && OldEnd->getCalledFunction() == EndCf) {
          Value *Args[] = { OldEnd->getArgOperand(0), NewPhi };
          Ret = CallInst::Create(ElseBreak, Args, "", OldEnd);
          continue;
        }
      }
      TerminatorInst *Insert = From->getTerminator();
      Value *PhiArg = CallInst::Create(Break, Broken, "", Insert);
      NewPhi->setIncomingValue(i, PhiArg);
    }
    eraseIfUnused(Phi);
    return Ret;

  } else if (Instruction *Inst = dyn_cast<Instruction>(Cond)) {
    BasicBlock *Parent = Inst->getParent();
    Instruction *Insert;
    if (L->contains(Inst)) {
      Insert = Parent->getTerminator();
    } else {
      Insert = L->getHeader()->getFirstNonPHIOrDbgOrLifetime();
    }
    Value *Args[] = { Cond, Broken };
    return CallInst::Create(IfBreak, Args, "", Insert);

  // Insert IfBreak before TERM for constant COND.
  } else if (isa<ConstantInt>(Cond)) {
    Value *Args[] = { Cond, Broken };
    return CallInst::Create(IfBreak, Args, "", Term);

  } else {
    llvm_unreachable("Unhandled loop condition!");
  }
  return nullptr;
}
Esempio n. 30
0
/// InlineHalfPowrs - Inline a sequence of adjacent half_powr calls, rearranging
/// their control flow to better facilitate subsequent optimization.
Instruction *
SimplifyHalfPowrLibCalls::
InlineHalfPowrs(const std::vector<Instruction *> &HalfPowrs,
                Instruction *InsertPt) {
  std::vector<BasicBlock *> Bodies;
  BasicBlock *NewBlock = 0;

  for (unsigned i = 0, e = HalfPowrs.size(); i != e; ++i) {
    CallInst *Call = cast<CallInst>(HalfPowrs[i]);
    Function *Callee = Call->getCalledFunction();

    // Minimally sanity-check the CFG of half_powr to ensure that it contains
    // the kind of code we expect.  If we're running this pass, we have
    // reason to believe it will be what we expect.
    Function::iterator I = Callee->begin();
    BasicBlock *Prologue = I++;
    if (I == Callee->end()) break;
    BasicBlock *SubnormalHandling = I++;
    if (I == Callee->end()) break;
    BasicBlock *Body = I++;
    if (I != Callee->end()) break;
    if (SubnormalHandling->getSinglePredecessor() != Prologue)
      break;
    BranchInst *PBI = dyn_cast<BranchInst>(Prologue->getTerminator());
    if (!PBI || !PBI->isConditional())
      break;
    BranchInst *SNBI = dyn_cast<BranchInst>(SubnormalHandling->getTerminator());
    if (!SNBI || SNBI->isConditional())
      break;
    if (!isa<ReturnInst>(Body->getTerminator()))
      break;

    Instruction *NextInst = llvm::next(BasicBlock::iterator(Call));

    // Inline the call, taking care of what code ends up where.
    NewBlock = SplitBlock(NextInst->getParent(), NextInst, this);

    InlineFunctionInfo IFI(0, TD);
    bool B = InlineFunction(Call, IFI);
    assert(B && "half_powr didn't inline?"); B=B;

    BasicBlock *NewBody = NewBlock->getSinglePredecessor();
    assert(NewBody);
    Bodies.push_back(NewBody);
  }

  if (!NewBlock)
    return InsertPt;

  // Put the code for all the bodies into one block, to facilitate
  // subsequent optimization.
  (void)SplitEdge(NewBlock->getSinglePredecessor(), NewBlock, this);
  for (unsigned i = 0, e = Bodies.size(); i != e; ++i) {
    BasicBlock *Body = Bodies[i];
    Instruction *FNP = Body->getFirstNonPHI();
    // Splice the insts from body into NewBlock.
    NewBlock->getInstList().splice(NewBlock->begin(), Body->getInstList(),
                                   FNP, Body->getTerminator());
  }

  return NewBlock->begin();
}