Esempio n. 1
0
bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
  // Lower all uses of llvm.objectsize.*
  IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
  if (II && II->getIntrinsicID() == Intrinsic::objectsize) {
    bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1);
    const Type *ReturnTy = CI->getType();
    Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);    
    CI->replaceAllUsesWith(RetVal);
    CI->eraseFromParent();
    return true;
  }

  // From here on out we're working with named functions.
  if (CI->getCalledFunction() == 0) return false;
  
  // We'll need TargetData from here on out.
  const TargetData *TD = TLI ? TLI->getTargetData() : 0;
  if (!TD) return false;
  
  // Lower all default uses of _chk calls.  This is very similar
  // to what InstCombineCalls does, but here we are only lowering calls
  // that have the default "don't know" as the objectsize.  Anything else
  // should be left alone.
  CodeGenPrepareFortifiedLibCalls Simplifier;
  return Simplifier.fold(CI, TD);
}
Esempio n. 2
0
bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
    BasicBlock *BB = CI->getParent();

    // Lower inline assembly if we can.
    // If we found an inline asm expession, and if the target knows how to
    // lower it to normal LLVM code, do so now.
    if (TLI && isa<InlineAsm>(CI->getCalledValue())) {
        if (TLI->ExpandInlineAsm(CI)) {
            // Avoid invalidating the iterator.
            CurInstIterator = BB->begin();
            // Avoid processing instructions out of order, which could cause
            // reuse before a value is defined.
            SunkAddrs.clear();
            return true;
        }
        // Sink address computing for memory operands into the block.
        if (OptimizeInlineAsmInst(CI))
            return true;
    }

    // Lower all uses of llvm.objectsize.*
    IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
    if (II && II->getIntrinsicID() == Intrinsic::objectsize) {
        bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1);
        Type *ReturnTy = CI->getType();
        Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);

        // Substituting this can cause recursive simplifications, which can
        // invalidate our iterator.  Use a WeakVH to hold onto it in case this
        // happens.
        WeakVH IterHandle(CurInstIterator);

        ReplaceAndSimplifyAllUses(CI, RetVal, TLI ? TLI->getTargetData() : 0,
                                  TLInfo, ModifiedDT ? 0 : DT);

        // If the iterator instruction was recursively deleted, start over at the
        // start of the block.
        if (IterHandle != CurInstIterator) {
            CurInstIterator = BB->begin();
            SunkAddrs.clear();
        }
        return true;
    }

    // From here on out we're working with named functions.
    if (CI->getCalledFunction() == 0) return false;

    // We'll need TargetData from here on out.
    const TargetData *TD = TLI ? TLI->getTargetData() : 0;
    if (!TD) return false;

    // Lower all default uses of _chk calls.  This is very similar
    // to what InstCombineCalls does, but here we are only lowering calls
    // that have the default "don't know" as the objectsize.  Anything else
    // should be left alone.
    CodeGenPrepareFortifiedLibCalls Simplifier;
    return Simplifier.fold(CI, TD);
}